From 35c4b678d10c5353b80de460426cf90883a72e1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Nov 2025 15:12:28 +0000 Subject: [PATCH] vendor: Bump github.com/operator-framework/operator-sdk Bumps the github-dependencies group in /tools with 1 update: [github.com/operator-framework/operator-sdk](https://github.com/operator-framework/operator-sdk). Updates `github.com/operator-framework/operator-sdk` from 1.39.2 to 1.40.0 - [Release notes](https://github.com/operator-framework/operator-sdk/releases) - [Changelog](https://github.com/operator-framework/operator-sdk/blob/master/.goreleaser.yml) - [Commits](https://github.com/operator-framework/operator-sdk/compare/v1.39.2...v1.40.0) --- updated-dependencies: - dependency-name: github.com/operator-framework/operator-sdk dependency-version: 1.40.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- tools/go.mod | 272 +- tools/go.sum | 785 +- tools/vendor/cel.dev/expr/.bazelversion | 2 +- tools/vendor/cel.dev/expr/MODULE.bazel | 34 +- tools/vendor/cel.dev/expr/README.md | 2 - tools/vendor/cel.dev/expr/cloudbuild.yaml | 2 +- tools/vendor/cel.dev/expr/eval.pb.go | 361 +- tools/vendor/dario.cat/mergo/FUNDING.json | 7 + tools/vendor/dario.cat/mergo/README.md | 5 - tools/vendor/dario.cat/mergo/SECURITY.md | 4 +- .../AdaLogics/go-fuzz-headers/consumer.go | 48 +- .../Azure/go-ansiterm/osc_string_state.go | 18 +- .../github.com/BurntSushi/toml/README.md | 2 +- .../github.com/BurntSushi/toml/decode.go | 31 +- .../github.com/BurntSushi/toml/encode.go | 46 +- .../github.com/BurntSushi/toml/error.go | 65 +- .../vendor/github.com/BurntSushi/toml/lex.go | 33 +- .../vendor/github.com/BurntSushi/toml/meta.go | 3 - .../github.com/BurntSushi/toml/parse.go | 17 +- .../Masterminds/semver/v3/CHANGELOG.md | 28 +- .../Masterminds/semver/v3/README.md | 18 +- .../Masterminds/semver/v3/constraints.go | 127 +- .../Masterminds/semver/v3/version.go | 173 +- .../Microsoft/hcsshim/.clang-format | 12 + .../Microsoft/hcsshim/.golangci.yml | 43 +- .../github.com/Microsoft/hcsshim/Makefile | 116 +- .../Microsoft/hcsshim/Makefile.bootfiles | 197 + .../github.com/Microsoft/hcsshim/README.md | 2 +- .../Microsoft/hcsshim/internal/hcs/process.go | 9 +- .../hcsshim/internal/hcs/schema2/chipset.go | 2 + .../hcs/schema2/{cim_mount.go => cimfs.go} | 8 - .../hcsshim/internal/hcs/schema2/firmware.go | 8 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 - .../hcs/schema2/memory_backing_type.go | 21 + .../hcsshim/internal/hcs/schema2/numa.go | 19 + .../hcsshim/internal/hcs/schema2/numa_node.go | 17 + .../internal/hcs/schema2/numa_node_memory.go | 19 + .../hcs/schema2/numa_node_processor.go | 17 + .../internal/hcs/schema2/numa_processors.go | 21 + .../internal/hcs/schema2/numa_setting.go | 21 + .../internal/hcs/schema2/processor_2.go | 23 - .../internal/hcs/schema2/properties.go | 2 + .../internal/hcs/schema2/property_type.go | 1 + .../hcsshim/internal/hcs/schema2/topology.go | 12 +- .../internal/hcs/schema2/virtual_machine.go | 39 +- .../hcs/schema2/virtual_machine_memory.go | 33 + .../hcs/schema2/virtual_machine_processor.go | 21 + .../hcs/schema2/virtual_pci_device.go | 3 +- .../internal/hcs/schema2/virtual_slit_type.go | 23 + .../hcs/schema2/windows_crash_reporting.go | 2 + .../Microsoft/hcsshim/internal/hcs/system.go | 21 +- .../hcsshim/internal/hns/hnsaccelnet.go | 4 +- .../hcsshim/internal/jobobject/jobobject.go | 20 +- .../hcsshim/internal/jobobject/limits.go | 1 + .../Microsoft/hcsshim/internal/log/context.go | 28 +- .../Microsoft/hcsshim/internal/log/format.go | 4 +- .../Microsoft/hcsshim/internal/log/scrub.go | 15 +- .../hcsshim/internal/vmcompute/vmcompute.go | 2 +- .../hcsshim/internal/wclayer/legacy.go | 1 + .../hcsshim/internal/winapi/cimfs.go | 11 + .../internal/winapi/zsyscall_windows.go | 146 + .../osversion/platform_compat_windows.go | 22 +- .../hcsshim/osversion/windowsbuilds.go | 7 + .../hcsshim/pkg/ociwclayer/import.go | 9 +- .../github.com/VividCortex/ewma/.gitignore | 3 + .../github.com/VividCortex/ewma/.whitesource | 3 + .../github.com/VividCortex/ewma/LICENSE | 21 + .../github.com/VividCortex/ewma/README.md | 145 + .../github.com/VividCortex/ewma/codecov.yml | 6 + .../github.com/VividCortex/ewma/ewma.go | 126 + .../github.com/acarl005/stripansi/LICENSE | 21 + .../github.com/acarl005/stripansi/README.md | 30 + .../acarl005/stripansi/stripansi.go | 13 + .../github.com/antlr4-go/antlr/v4/antlrdoc.go | 8 +- .../github.com/antlr4-go/antlr/v4/atn.go | 8 +- .../antlr4-go/antlr/v4/atn_config.go | 3 - .../antlr4-go/antlr/v4/input_stream.go | 2 +- .../github.com/antlr4-go/antlr/v4/jcollect.go | 5 +- .../github.com/antlr4-go/antlr/v4/lexer.go | 2 +- .../antlr4-go/antlr/v4/ll1_analyzer.go | 1 + .../github.com/antlr4-go/antlr/v4/mutex.go | 41 + .../antlr4-go/antlr/v4/mutex_nomutex.go | 32 + .../antlr/v4/parser_atn_simulator.go | 4 +- .../antlr4-go/antlr/v4/prediction_context.go | 60 +- .../antlr4-go/antlr/v4/recognizer.go | 2 +- .../antlr4-go/antlr/v4/statistics.go | 3 +- .../github.com/antlr4-go/antlr/v4/token.go | 82 +- .../github.com/antlr4-go/antlr/v4/utils.go | 53 + .../github.com/cenkalti/backoff/v4/context.go | 62 - .../cenkalti/backoff/v4/exponential.go | 216 - .../github.com/cenkalti/backoff/v4/retry.go | 146 - .../github.com/cenkalti/backoff/v4/tries.go | 38 - .../cenkalti/backoff/{v4 => v5}/.gitignore | 0 .../cenkalti/backoff/v5/CHANGELOG.md | 29 + .../cenkalti/backoff/{v4 => v5}/LICENSE | 0 .../cenkalti/backoff/{v4 => v5}/README.md | 15 +- .../cenkalti/backoff/{v4 => v5}/backoff.go | 14 +- .../github.com/cenkalti/backoff/v5/error.go | 46 + .../cenkalti/backoff/v5/exponential.go | 125 + .../github.com/cenkalti/backoff/v5/retry.go | 139 + .../cenkalti/backoff/{v4 => v5}/ticker.go | 18 +- .../cenkalti/backoff/{v4 => v5}/timer.go | 2 +- .../containerd/api/events/content.pb.go | 98 +- .../containerd/api/events/content.proto | 5 + .../api/events/content_fieldpath.pb.go | 14 + .../archive/compression/compression.go | 4 + .../containerd/log/context_deprecated.go | 149 - .../containerd/containerd/pkg/epoch/epoch.go | 25 +- .../containerd/remotes/docker/auth/fetch.go | 20 +- .../containerd/remotes/docker/authorizer.go | 27 +- .../containerd/containerd/version/version.go | 2 +- .../continuity/fs/stat_darwinbsd.go | 26 + .../containerd/continuity/fs/stat_unix.go | 4 +- .../stargz-snapshotter/estargz/build.go | 8 +- .../stargz-snapshotter/estargz/gzip.go | 6 +- .../stargz-snapshotter/estargz/testutil.go | 13 +- .../containerd/typeurl/v2/README.md | 6 + .../github.com/containerd/typeurl/v2/types.go | 89 +- .../containerd/typeurl/v2/types_gogo.go | 68 + .../github.com/containers/ocicrypt/.gitignore | 1 + .../containers/ocicrypt/.golangci.yml | 35 + .../containers/ocicrypt/ADOPTERS.md | 10 + .../containers/ocicrypt/CODE-OF-CONDUCT.md | 3 + .../containers/ocicrypt/MAINTAINERS | 6 + .../github.com/containers/ocicrypt/Makefile | 35 + .../github.com/containers/ocicrypt/README.md | 50 + .../containers/ocicrypt/SECURITY.md | 3 + .../ocicrypt/blockcipher/blockcipher.go | 160 + .../blockcipher/blockcipher_aes_ctr.go | 193 + .../containers/ocicrypt/config/config.go | 114 + .../ocicrypt/config/constructors.go | 246 + .../config/keyprovider-config/config.go | 80 + .../ocicrypt/crypto/pkcs11/common.go | 134 + .../ocicrypt/crypto/pkcs11/pkcs11helpers.go | 485 + .../crypto/pkcs11/pkcs11helpers_nocgo.go | 30 + .../ocicrypt/crypto/pkcs11/utils.go | 115 + .../containers/ocicrypt/encryption.go | 356 + .../github.com/containers/ocicrypt/gpg.go | 431 + .../containers/ocicrypt/gpgvault.go | 100 + .../ocicrypt/keywrap/jwe/keywrapper_jwe.go | 156 + .../keywrap/keyprovider/keyprovider.go | 242 + .../containers/ocicrypt/keywrap/keywrap.go | 48 + .../ocicrypt/keywrap/pgp/keywrapper_gpg.go | 272 + .../keywrap/pkcs11/keywrapper_pkcs11.go | 152 + .../keywrap/pkcs7/keywrapper_pkcs7.go | 137 + .../github.com/containers/ocicrypt/reader.go | 40 + .../ocicrypt/utils/delayedreader.go | 109 + .../containers/ocicrypt/utils/ioutils.go | 58 + .../utils/keyprovider/keyprovider.pb.go | 243 + .../utils/keyprovider/keyprovider.proto | 17 + .../containers/ocicrypt/utils/testing.go | 174 + .../containers/ocicrypt/utils/utils.go | 249 + .../cyberphone/json-canonicalization/LICENSE | 13 + .../webpki.org/jsoncanonicalizer/es6numfmt.go | 71 + .../jsoncanonicalizer/jsoncanonicalizer.go | 378 + .../cyphar/filepath-securejoin/CHANGELOG.md | 49 +- .../cyphar/filepath-securejoin/VERSION | 2 +- .../cyphar/filepath-securejoin/join.go | 49 +- .../cyphar/filepath-securejoin/mkdir_linux.go | 49 +- tools/vendor/github.com/docker/cli/AUTHORS | 31 +- .../docker/cli/cli/config/config.go | 9 +- .../docker/cli/cli/config/configfile/file.go | 102 +- .../cli/cli/config/credentials/file_store.go | 50 +- .../cli/cli/config/memorystore/store.go | 126 + .../docker/cli/cli/config/types/authconfig.go | 4 +- .../docker/distribution/.dockerignore | 1 - .../docker/distribution/.golangci.yml | 33 - .../github.com/docker/distribution/.mailmap | 54 - .../docker/distribution/BUILDING.md | 117 - .../docker/distribution/CONTRIBUTING.md | 148 - .../github.com/docker/distribution/Dockerfile | 60 - .../docker/distribution/MAINTAINERS | 243 - .../github.com/docker/distribution/Makefile | 102 - .../github.com/docker/distribution/README.md | 130 - .../github.com/docker/distribution/ROADMAP.md | 267 - .../github.com/docker/distribution/blobs.go | 265 - .../github.com/docker/distribution/doc.go | 7 - .../docker/distribution/docker-bake.hcl | 56 - .../github.com/docker/distribution/errors.go | 119 - .../docker/distribution/manifests.go | 125 - .../docker/distribution/metrics/prometheus.go | 13 - .../docker/distribution/registry.go | 118 - .../registry/client/auth/api_version.go | 58 - .../registry/client/auth/session.go | 530 - .../registry/client/blob_writer.go | 164 - .../distribution/registry/client/errors.go | 160 - .../registry/client/repository.go | 870 - .../registry/client/transport/http_reader.go | 249 - .../registry/client/transport/transport.go | 147 - .../registry/storage/cache/cache.go | 35 - .../cache/cachedblobdescriptorstore.go | 129 - .../registry/storage/cache/memory/memory.go | 179 - .../github.com/docker/distribution/tags.go | 27 - .../docker/distribution/vendor.conf | 52 - .../client/command.go | 25 +- tools/vendor/github.com/docker/docker/AUTHORS | 44 +- .../docker/docker/api/types/filters/errors.go | 37 - .../docker/docker/api/types/filters/parse.go | 346 - .../docker/api/types/registry/authconfig.go | 97 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 96 - .../docker/api/types/registry/search.go | 47 - .../docker/api/types/versions/compare.go | 2 +- .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 279 - .../docker/docker/errdefs/http_helpers.go | 46 - .../github.com/docker/docker/errdefs/is.go | 123 - .../docker/docker/pkg/ioutils/buffer.go | 51 - .../docker/docker/pkg/ioutils/bytespipe.go | 187 - .../docker/docker/pkg/ioutils/fswriters.go | 163 - .../docker/docker/pkg/ioutils/readers.go | 172 - .../docker/docker/pkg/ioutils/writeflusher.go | 92 - .../docker/docker/pkg/ioutils/writers.go | 74 - .../github.com/docker/docker/registry/auth.go | 201 - .../docker/docker/registry/config.go | 437 - .../docker/docker/registry/config_unix.go | 16 - .../docker/docker/registry/config_windows.go | 20 - .../docker/docker/registry/errors.go | 36 - .../docker/docker/registry/registry.go | 136 - .../docker/docker/registry/search.go | 162 - .../docker/registry/search_endpoint_v1.go | 200 - .../docker/docker/registry/search_session.go | 218 - .../docker/docker/registry/service.go | 145 - .../docker/docker/registry/service_v2.go | 80 - .../docker/docker/registry/types.go | 41 - .../docker/go-connections/tlsconfig/config.go | 100 +- .../tlsconfig/config_client_ciphers.go | 14 - .../docker/go-metrics/CONTRIBUTING.md | 55 - .../github.com/docker/go-metrics/LICENSE.docs | 425 - .../github.com/docker/go-metrics/NOTICE | 16 - .../github.com/docker/go-metrics/README.md | 91 - .../github.com/docker/go-metrics/counter.go | 52 - .../github.com/docker/go-metrics/docs.go | 3 - .../github.com/docker/go-metrics/gauge.go | 72 - .../github.com/docker/go-metrics/handler.go | 74 - .../github.com/docker/go-metrics/helpers.go | 10 - .../github.com/docker/go-metrics/namespace.go | 315 - .../github.com/docker/go-metrics/register.go | 15 - .../github.com/docker/go-metrics/timer.go | 85 - .../github.com/docker/go-metrics/unit.go | 12 - .../emicklei/go-restful/v3/CHANGES.md | 21 + .../emicklei/go-restful/v3/README.md | 3 +- .../emicklei/go-restful/v3/compress.go | 10 + .../emicklei/go-restful/v3/curly.go | 48 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + .../github.com/evanphx/json-patch/README.md | 4 +- .../github.com/evanphx/json-patch/patch.go | 101 +- .../github.com/evanphx/json-patch/v5/merge.go | 26 +- .../github.com/evanphx/json-patch/v5/patch.go | 120 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 2 +- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 35 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 1 + .../github.com/fsnotify/fsnotify/README.md | 2 - .../fsnotify/fsnotify/backend_fen.go | 107 +- .../fsnotify/fsnotify/backend_inotify.go | 421 +- .../fsnotify/fsnotify/backend_kqueue.go | 112 +- .../fsnotify/fsnotify/backend_other.go | 5 +- .../fsnotify/fsnotify/backend_windows.go | 24 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 10 +- .../fsnotify/fsnotify/internal/darwin.go | 6 +- .../fsnotify/fsnotify/internal/freebsd.go | 4 +- .../fsnotify/fsnotify/internal/unix.go | 6 +- .../fsnotify/fsnotify/internal/windows.go | 4 +- .../github.com/fsnotify/fsnotify/shared.go | 64 + .../fsnotify/fsnotify/staticcheck.conf | 3 + .../github.com/fxamacker/cbor/v2/README.md | 408 +- .../fxamacker/cbor/v2/bytestring.go | 27 + .../github.com/fxamacker/cbor/v2/cache.go | 22 +- .../github.com/fxamacker/cbor/v2/decode.go | 102 +- .../github.com/fxamacker/cbor/v2/doc.go | 51 +- .../github.com/fxamacker/cbor/v2/encode.go | 191 +- .../fxamacker/cbor/v2/encode_map.go | 10 +- .../fxamacker/cbor/v2/encode_map_go117.go | 60 - .../fxamacker/cbor/v2/omitzero_go124.go | 8 + .../fxamacker/cbor/v2/omitzero_pre_go124.go | 8 + .../fxamacker/cbor/v2/simplevalue.go | 29 + .../github.com/fxamacker/cbor/v2/stream.go | 4 +- .../fxamacker/cbor/v2/structfields.go | 15 +- .../github.com/fxamacker/cbor/v2/tag.go | 35 +- .../github.com/go-jose/go-jose/v4/.gitignore | 2 + .../go-jose/go-jose/v4/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v4/.travis.yml | 33 + .../go-jose/go-jose/v4/CHANGELOG.md | 101 + .../go-jose/go-jose/v4/CONTRIBUTING.md | 9 + .../gofuzz => go-jose/go-jose/v4}/LICENSE | 0 .../github.com/go-jose/go-jose/v4/README.md | 106 + .../github.com/go-jose/go-jose/v4/SECURITY.md | 13 + .../go-jose/go-jose/v4/asymmetric.go | 595 + .../go-jose/go-jose/v4/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v4/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v4/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v4/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v4/crypter.go | 599 + .../github.com/go-jose/go-jose/v4/doc.go | 25 + .../github.com/go-jose/go-jose/v4/encoding.go | 228 + .../go-jose/go-jose/v4/json/LICENSE | 27 + .../go-jose/go-jose/v4/json/README.md | 13 + .../go-jose/go-jose/v4/json/decode.go | 1216 + .../go-jose/go-jose/v4/json/encode.go | 1197 + .../go-jose/go-jose/v4/json/indent.go | 141 + .../go-jose/go-jose/v4/json/scanner.go | 623 + .../go-jose/go-jose/v4/json/stream.go | 484 + .../go-jose/go-jose/v4/json/tags.go | 44 + .../github.com/go-jose/go-jose/v4/jwe.go | 391 + .../github.com/go-jose/go-jose/v4/jwk.go | 823 + .../github.com/go-jose/go-jose/v4/jws.go | 470 + .../github.com/go-jose/go-jose/v4/opaque.go | 147 + .../github.com/go-jose/go-jose/v4/shared.go | 531 + .../github.com/go-jose/go-jose/v4/signing.go | 505 + .../go-jose/go-jose/v4/symmetric.go | 521 + .../go-jose/go-jose/v4/symmetric_go124.go} | 22 +- .../go-jose/go-jose/v4/symmetric_legacy.go | 29 + .../github.com/go-logr/logr/.golangci.yaml | 16 +- .../github.com/go-logr/logr/funcr/funcr.go | 8 +- .../go-openapi/jsonpointer/.golangci.yml | 31 +- .../go-openapi/jsonpointer/errors.go | 18 + .../go-openapi/jsonpointer/pointer.go | 49 +- .../github.com/go-openapi/swag/.golangci.yml | 34 +- .../github.com/go-openapi/swag/errors.go | 15 + .../vendor/github.com/go-openapi/swag/json.go | 3 +- .../github.com/go-openapi/swag/loading.go | 2 +- .../vendor/github.com/go-openapi/swag/yaml.go | 32 +- .../go-viper/mapstructure/v2/.editorconfig | 3 + .../go-viper/mapstructure/v2/.golangci.yaml | 55 +- .../go-viper/mapstructure/v2/README.md | 7 +- .../go-viper/mapstructure/v2/decode_hooks.go | 234 +- .../go-viper/mapstructure/v2/errors.go | 244 + .../go-viper/mapstructure/v2/flake.lock | 390 +- .../go-viper/mapstructure/v2/flake.nix | 45 +- .../go-viper/mapstructure/v2/mapstructure.go | 308 +- .../migrate/v4/source/iofs/iofs.go | 1 - tools/vendor/github.com/google/btree/btree.go | 2 +- .../github.com/google/cel-go/cel/BUILD.bazel | 13 +- .../github.com/google/cel-go/cel/decls.go | 70 +- .../github.com/google/cel-go/cel/env.go | 243 +- .../github.com/google/cel-go/cel/folding.go | 60 +- .../github.com/google/cel-go/cel/inlining.go | 2 +- .../vendor/github.com/google/cel-go/cel/io.go | 69 +- .../github.com/google/cel-go/cel/library.go | 477 +- .../github.com/google/cel-go/cel/macro.go | 30 +- .../github.com/google/cel-go/cel/optimizer.go | 6 +- .../github.com/google/cel-go/cel/options.go | 245 +- .../github.com/google/cel-go/cel/program.go | 277 +- .../github.com/google/cel-go/cel/prompt.go | 155 + .../cel-go/cel/templates/authoring.tmpl | 56 + .../github.com/google/cel-go/cel/validator.go | 70 +- .../google/cel-go/checker/checker.go | 17 + .../github.com/google/cel-go/checker/cost.go | 659 +- .../google/cel-go/checker/decls/decls.go | 37 +- .../google/cel-go/checker/errors.go | 4 + .../google/cel-go/common/BUILD.bazel | 2 + .../google/cel-go/common/ast/ast.go | 78 + .../google/cel-go/common/ast/factory.go | 29 +- .../google/cel-go/common/ast/navigable.go | 7 +- .../cel-go/common/containers/container.go | 14 +- .../google/cel-go/common/debug/debug.go | 2 +- .../google/cel-go/common/decls/BUILD.bazel | 2 + .../google/cel-go/common/decls/decls.go | 297 +- .../github.com/google/cel-go/common/doc.go | 154 + .../google/cel-go/common/env/BUILD.bazel | 50 + .../google/cel-go/common/env/env.go | 887 + .../github.com/google/cel-go/common/errors.go | 11 +- .../google/cel-go/common/stdlib/BUILD.bazel | 1 + .../google/cel-go/common/stdlib/standard.go | 702 +- .../google/cel-go/common/types/BUILD.bazel | 1 + .../google/cel-go/common/types/bool.go | 9 + .../google/cel-go/common/types/bytes.go | 15 + .../google/cel-go/common/types/double.go | 22 + .../google/cel-go/common/types/duration.go | 5 + .../google/cel-go/common/types/err.go | 6 + .../google/cel-go/common/types/format.go | 42 + .../google/cel-go/common/types/int.go | 5 + .../google/cel-go/common/types/list.go | 20 +- .../google/cel-go/common/types/map.go | 36 + .../google/cel-go/common/types/null.go | 5 + .../google/cel-go/common/types/object.go | 31 +- .../google/cel-go/common/types/optional.go | 11 + .../google/cel-go/common/types/string.go | 4 + .../google/cel-go/common/types/timestamp.go | 4 + .../google/cel-go/common/types/types.go | 24 +- .../google/cel-go/common/types/uint.go | 6 + .../github.com/google/cel-go/ext/BUILD.bazel | 18 +- .../github.com/google/cel-go/ext/README.md | 64 +- .../github.com/google/cel-go/ext/bindings.go | 10 +- .../google/cel-go/ext/comprehensions.go | 58 +- .../github.com/google/cel-go/ext/encoders.go | 30 +- .../cel-go/ext/extension_option_factory.go | 75 + .../google/cel-go/ext/formatting.go | 27 +- .../google/cel-go/ext/formatting_v2.go | 788 + .../github.com/google/cel-go/ext/guards.go | 8 +- .../github.com/google/cel-go/ext/lists.go | 305 +- .../github.com/google/cel-go/ext/math.go | 47 + .../github.com/google/cel-go/ext/native.go | 22 +- .../github.com/google/cel-go/ext/protos.go | 25 +- .../github.com/google/cel-go/ext/regex.go | 332 + .../github.com/google/cel-go/ext/sets.go | 37 +- .../github.com/google/cel-go/ext/strings.go | 63 +- .../google/cel-go/interpreter/activation.go | 24 + .../cel-go/interpreter/attribute_patterns.go | 13 +- .../cel-go/interpreter/interpretable.go | 122 +- .../google/cel-go/interpreter/interpreter.go | 188 +- .../google/cel-go/interpreter/planner.go | 58 +- .../google/cel-go/interpreter/prune.go | 63 +- .../google/cel-go/interpreter/runtimecost.go | 253 +- .../github.com/google/cel-go/parser/errors.go | 6 +- .../google/cel-go/parser/gen/CEL.g4 | 13 +- .../google/cel-go/parser/gen/CEL.interp | 5 +- .../google/cel-go/parser/gen/CEL.tokens | 1 + .../google/cel-go/parser/gen/CELLexer.interp | 5 +- .../google/cel-go/parser/gen/CELLexer.tokens | 1 + .../cel-go/parser/gen/cel_base_listener.go | 28 +- .../cel-go/parser/gen/cel_base_visitor.go | 17 +- .../google/cel-go/parser/gen/cel_lexer.go | 595 +- .../google/cel-go/parser/gen/cel_listener.go | 29 +- .../google/cel-go/parser/gen/cel_parser.go | 2383 +- .../google/cel-go/parser/gen/cel_visitor.go | 19 +- .../github.com/google/cel-go/parser/helper.go | 5 + .../github.com/google/cel-go/parser/macro.go | 211 +- .../google/cel-go/parser/options.go | 23 + .../github.com/google/cel-go/parser/parser.go | 85 +- .../google/cel-go/parser/unescape.go | 42 +- .../google/cel-go/parser/unparser.go | 42 +- .../internal/redact/redact.go | 6 +- .../pkg/authn/keychain.go | 4 +- .../go-containerregistry/pkg/crane/append.go | 19 +- .../go-containerregistry/pkg/crane/copy.go | 2 +- .../go-containerregistry/pkg/crane/pull.go | 2 +- .../go-containerregistry/pkg/name/digest.go | 21 +- .../go-containerregistry/pkg/name/ref.go | 2 +- .../go-containerregistry/pkg/name/registry.go | 37 + .../pkg/name/repository.go | 37 + .../go-containerregistry/pkg/name/tag.go | 40 +- .../go-containerregistry/pkg/v1/hash.go | 23 +- .../pkg/v1/layout/write.go | 6 +- .../pkg/v1/mutate/index.go | 28 +- .../pkg/v1/mutate/mutate.go | 21 +- .../pkg/v1/remote/pusher.go | 2 +- .../pkg/v1/remote/referrers.go | 2 +- .../pkg/v1/remote/transport/bearer.go | 20 +- .../pkg/v1/remote/write.go | 4 +- .../github.com/google/gofuzz/.travis.yml | 10 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - tools/vendor/github.com/google/gofuzz/fuzz.go | 605 - .../github.com/gorilla/websocket/README.md | 17 +- .../github.com/gorilla/websocket/client.go | 245 +- .../gorilla/websocket/compression.go | 6 +- .../github.com/gorilla/websocket/conn.go | 112 +- .../github.com/gorilla/websocket/proxy.go | 53 +- .../github.com/gorilla/websocket/server.go | 122 +- .../gorilla/websocket/tls_handshake.go | 21 - .../gorilla/websocket/tls_handshake_116.go | 21 - .../github.com/gorilla/websocket/util.go | 15 + .../gorilla/websocket/x_net_proxy.go | 473 - .../grpc-gateway/v2/runtime/errors.go | 32 +- .../grpc-gateway/v2/runtime/handler.go | 22 +- .../grpc-gateway/v2/runtime/marshaler.go | 8 + .../grpc-gateway/v2/runtime/mux.go | 8 + .../grpc-gateway/v2/runtime/query.go | 22 +- .../github.com/klauspost/compress/README.md | 140 +- .../klauspost/compress/flate/deflate.go | 1017 + .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 232 + .../compress/flate/huffman_bit_writer.go | 1183 + .../klauspost/compress/flate/huffman_code.go | 417 + .../compress/flate/huffman_sortByFreq.go | 159 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 865 + .../klauspost/compress/flate/inflate_gen.go | 1283 + .../klauspost/compress/flate/level1.go | 215 + .../klauspost/compress/flate/level2.go | 214 + .../klauspost/compress/flate/level3.go | 241 + .../klauspost/compress/flate/level4.go | 221 + .../klauspost/compress/flate/level5.go | 708 + .../klauspost/compress/flate/level6.go | 325 + .../compress/flate/matchlen_generic.go | 34 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 313 + .../klauspost/compress/flate/token.go | 379 + .../klauspost/compress/huff0/bitreader.go | 25 +- .../klauspost/compress/internal/le/le.go | 5 + .../compress/internal/le/unsafe_disabled.go | 42 + .../compress/internal/le/unsafe_enabled.go | 55 + .../github.com/klauspost/compress/s2sx.mod | 3 +- .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/bitreader.go | 37 +- .../klauspost/compress/zstd/blockdec.go | 19 - .../klauspost/compress/zstd/blockenc.go | 27 +- .../klauspost/compress/zstd/decoder.go | 3 +- .../klauspost/compress/zstd/enc_base.go | 2 +- .../compress/zstd/matchlen_generic.go | 11 +- .../klauspost/compress/zstd/seqdec.go | 2 +- .../klauspost/compress/zstd/seqdec_amd64.s | 64 +- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/seqenc.go | 2 - .../klauspost/compress/zstd/snappy.go | 4 +- .../klauspost/compress/zstd/zstd.go | 7 +- .../github.com/klauspost/pgzip/.gitignore | 24 + .../github.com/klauspost/pgzip/.travis.yml | 28 + .../github.com/klauspost/pgzip/GO_LICENSE | 27 + .../vendor/github.com/klauspost/pgzip/LICENSE | 22 + .../github.com/klauspost/pgzip/README.md | 134 + .../github.com/klauspost/pgzip/gunzip.go | 597 + .../vendor/github.com/klauspost/pgzip/gzip.go | 519 + .../letsencrypt/boulder/LICENSE.txt | 375 + .../letsencrypt/boulder/core/challenges.go | 41 + .../letsencrypt/boulder/core/interfaces.go | 14 + .../letsencrypt/boulder/core/objects.go | 504 + .../letsencrypt/boulder/core/proto/core.pb.go | 1164 + .../letsencrypt/boulder/core/proto/core.proto | 139 + .../letsencrypt/boulder/core/util.go | 400 + .../letsencrypt/boulder/goodkey/good_key.go | 425 + .../boulder/identifier/identifier.go | 214 + .../letsencrypt/boulder/probs/probs.go | 366 + .../letsencrypt/boulder/revocation/reasons.go | 72 + .../mailru/easyjson/jlexer/bytestostr.go | 5 +- .../mailru/easyjson/jlexer/lexer.go | 113 +- .../mailru/easyjson/jwriter/writer.go | 12 + .../github.com/mattn/go-sqlite3/README.md | 11 +- .../github.com/mattn/go-sqlite3/callback.go | 3 +- .../mattn/go-sqlite3/sqlite3-binding.c | 18225 +++++--- .../mattn/go-sqlite3/sqlite3-binding.h | 800 +- .../github.com/mattn/go-sqlite3/sqlite3.go | 80 +- .../go-sqlite3/sqlite3_opt_unlock_notify.c | 4 + .../go-sqlite3/sqlite3_opt_unlock_notify.go | 4 + .../mattn/go-sqlite3/sqlite3_opt_userauth.go | 153 +- .../github.com/mattn/go-sqlite3/sqlite3ext.h | 4 + .../vendor/github.com/miekg/pkcs11/.gitignore | 3 + .../gocapability => miekg/pkcs11}/LICENSE | 17 +- .../github.com/miekg/pkcs11/Makefile.release | 57 + .../vendor/github.com/miekg/pkcs11/README.md | 68 + tools/vendor/github.com/miekg/pkcs11/error.go | 98 + tools/vendor/github.com/miekg/pkcs11/hsm.db | Bin 0 -> 10240 bytes .../vendor/github.com/miekg/pkcs11/params.go | 190 + .../vendor/github.com/miekg/pkcs11/pkcs11.go | 1609 + tools/vendor/github.com/miekg/pkcs11/pkcs11.h | 265 + .../vendor/github.com/miekg/pkcs11/pkcs11f.h | 939 + .../vendor/github.com/miekg/pkcs11/pkcs11go.h | 33 + .../vendor/github.com/miekg/pkcs11/pkcs11t.h | 2047 + .../vendor/github.com/miekg/pkcs11/release.go | 18 + .../github.com/miekg/pkcs11/softhsm.conf | 1 + .../github.com/miekg/pkcs11/softhsm2.conf | 4 + tools/vendor/github.com/miekg/pkcs11/types.go | 315 + .../vendor/github.com/miekg/pkcs11/vendor.go | 127 + .../vendor/github.com/miekg/pkcs11/zconst.go | 766 + .../moby/sys/sequential/sequential_unix.go | 27 +- .../moby/sys/sequential/sequential_windows.go | 89 +- .../github.com/moby/sys/user/idtools.go | 141 + .../github.com/moby/sys/user/idtools_unix.go | 143 + .../moby/sys/user/idtools_windows.go | 13 + .../vendor/github.com/moby/term/term_unix.go | 2 +- .../github.com/onsi/gomega/types/types.go | 49 +- .../image-spec/specs-go/version.go | 2 +- .../runtime-spec/specs-go/config.go | 68 +- .../runtime-spec/specs-go/version.go | 2 +- .../internal/version/version.go | 2 +- .../scaffolds/internal/templates/makefile.go | 8 +- .../pkg/plugins/util/cleanup.go | 313 +- .../api/pkg/manifests/bundleloader.go | 2 +- .../operators/v1alpha1/catalogsource_types.go | 6 +- .../operators/v1alpha1/installplan_types.go | 2 + .../operators/v1alpha1/subscription_types.go | 2 + .../api/pkg/validation/internal/multiarch.go | 2 +- .../operator-registry/alpha/action/render.go | 39 +- .../alpha/declcfg/declcfg.go | 3 +- .../alpha/declcfg/declcfg_to_model.go | 2 +- .../operator-registry/alpha/declcfg/load.go | 23 +- .../alpha/declcfg/model_to_declcfg.go | 2 + .../operator-registry/alpha/declcfg/write.go | 157 +- .../operator-registry/alpha/model/error.go | 7 +- .../operator-registry/alpha/model/model.go | 5 + .../alpha/property/property.go | 4 +- .../operator-registry/pkg/api/api_to_model.go | 1 + .../operator-registry/pkg/api/model_to_api.go | 21 +- .../pkg/containertools/containertool.go | 15 +- .../pkg/containertools/dockerfilegenerator.go | 8 +- .../pkg/containertools/labelreader.go | 1 + .../pkg/containertools/runner.go | 19 +- .../pkg/image/containerdregistry/options.go | 21 +- .../pkg/image/containerdregistry/registry.go | 40 +- .../pkg/image/containerdregistry/resolver.go | 6 +- .../image/containersimageregistry/registry.go | 294 + .../pkg/image/execregistry/registry.go | 2 +- .../operator-registry/pkg/image/mock.go | 2 +- .../operator-registry/pkg/lib/log/null.go | 13 - .../pkg/lib/log/writerhook.go | 76 - .../pkg/lib/semver/semver.go | 2 + .../pkg/lib/validation/bundle.go | 10 +- .../prettyunmarshaler/prettyunmarshaler.go | 15 +- .../operator-registry/pkg/registry/bundle.go | 21 +- .../pkg/registry/bundlegraphloader.go | 5 +- .../pkg/registry/channelupdateoptions.go | 1 + .../operator-registry/pkg/registry/csv.go | 60 +- .../operator-registry/pkg/registry/decode.go | 22 +- .../pkg/registry/directoryGraphLoader.go | 6 +- .../operator-registry/pkg/registry/empty.go | 13 +- .../operator-registry/pkg/registry/graph.go | 2 +- .../pkg/registry/imageinput.go | 3 +- .../operator-registry/pkg/registry/parse.go | 6 +- .../pkg/registry/populator.go | 4 + .../pkg/registry/registry_to_model.go | 3 +- .../operator-registry/pkg/registry/types.go | 9 + .../operator-registry/pkg/sqlite/configmap.go | 16 +- .../pkg/sqlite/conversion.go | 4 +- .../pkg/sqlite/db_options.go | 2 + .../operator-registry/pkg/sqlite/deprecate.go | 1 + .../pkg/sqlite/deprecationmessage.go | 2 +- .../operator-registry/pkg/sqlite/directory.go | 9 +- .../operator-registry/pkg/sqlite/load.go | 61 +- .../operator-registry/pkg/sqlite/loadprocs.go | 4 +- .../sqlite/migrations/001_related_images.go | 14 +- .../sqlite/migrations/003_required_apis.go | 40 +- .../migrations/005_version_skiprange.go | 4 +- .../006_associate_apis_with_bundle.go | 4 +- .../sqlite/migrations/007_replaces_skips.go | 14 +- .../pkg/sqlite/migrations/009_properties.go | 5 +- .../010_set_bundlepath_pkg_property.go | 8 +- .../pkg/sqlite/migrations/012_deprecated.go | 1 + .../pkg/sqlite/migrations/migrations.go | 1 + .../operator-registry/pkg/sqlite/migrator.go | 16 +- .../operator-registry/pkg/sqlite/query.go | 91 +- .../internal/plugins/helm/v1/api.go | 3 + .../plugins/helm/v1/chartutil/chart.go | 16 + .../scaffolds/internal/templates/makefile.go | 15 +- .../internal/plugins/manifests/v2/init.go | 29 +- .../internal/plugins/util/cleanup.go | 313 +- .../vendor/github.com/otiai10/copy/README.md | 6 + tools/vendor/github.com/otiai10/copy/copy.go | 61 +- .../otiai10/copy/copy_namedpipes.go | 1 - .../otiai10/copy/copy_namedpipes_x.go | 1 - .../otiai10/copy/fileinfo_go1.15.go | 17 - .../otiai10/copy/fileinfo_go1.16.go | 17 - .../vendor/github.com/otiai10/copy/options.go | 3 + .../otiai10/copy/permission_control.go | 9 +- .../otiai10/copy/preserve_ltimes.go | 1 - .../otiai10/copy/preserve_ltimes_x.go | 1 - .../github.com/otiai10/copy/preserve_owner.go | 4 +- .../otiai10/copy/preserve_owner_x.go | 5 +- .../github.com/otiai10/copy/stat_times.go | 1 - .../otiai10/copy/stat_times_darwin.go | 1 - .../otiai10/copy/stat_times_freebsd.go | 1 - .../github.com/otiai10/copy/stat_times_js.go | 1 - .../otiai10/copy/stat_times_windows.go | 1 - .../github.com/otiai10/copy/stat_times_x.go | 1 - .../github.com/otiai10/copy/symlink_test_x.go | 45 + .../github.com/otiai10/copy/test_setup.go | 20 - .../github.com/otiai10/copy/test_setup_x.go | 17 - .../vendor/github.com/otiai10/mint/.gitignore | 2 + tools/vendor/github.com/otiai10/mint/LICENSE | 7 + .../vendor/github.com/otiai10/mint/README.md | 62 + .../vendor/github.com/otiai10/mint/because.go | 15 + .../github.com/otiai10/mint/comparer.go | 53 + tools/vendor/github.com/otiai10/mint/exit.go | 41 + .../github.com/otiai10/mint/exit_freebsd.go | 10 + tools/vendor/github.com/otiai10/mint/log.go | 15 + tools/vendor/github.com/otiai10/mint/mint.go | 86 + tools/vendor/github.com/otiai10/mint/mocks.go | 30 + .../github.com/otiai10/mint/mquery/README.md | 31 + .../github.com/otiai10/mint/mquery/mquery.go | 72 + .../vendor/github.com/otiai10/mint/result.go | 23 + .../vendor/github.com/otiai10/mint/testee.go | 145 + .../pelletier/go-toml/v2/.goreleaser.yaml | 2 +- .../pelletier/go-toml/v2/unmarshaler.go | 2 +- .../github.com/proglottis/gpgme/.gitignore | 3 + .../github.com/proglottis/gpgme/LICENSE | 12 + .../github.com/proglottis/gpgme/README.md | 13 + .../github.com/proglottis/gpgme/data.go | 226 + .../github.com/proglottis/gpgme/go_gpgme.c | 103 + .../github.com/proglottis/gpgme/go_gpgme.h | 42 + .../github.com/proglottis/gpgme/gpgme.go | 982 + .../proglottis/gpgme/unset_agent_info.go | 19 + .../gpgme/unset_agent_info_windows.go | 14 + .../client_golang/prometheus/collectorfunc.go | 30 + .../client_golang/prometheus/desc.go | 3 +- .../prometheus/internal/difflib.go | 4 +- .../prometheus/internal/go_runtime_metrics.go | 2 +- .../client_golang/prometheus/labels.go | 3 +- .../client_golang/prometheus/metric.go | 25 +- .../prometheus/process_collector_darwin.go | 6 +- .../process_collector_mem_nocgo_darwin.go | 2 +- .../process_collector_procfsenabled.go | 8 +- .../client_golang/prometheus/promhttp/http.go | 26 +- .../prometheus/promhttp/instrument_server.go | 2 +- .../promhttp/internal/compression.go | 21 + .../client_golang/prometheus/vec.go | 10 +- .../client_golang/prometheus/wrap.go | 36 +- .../prometheus/common/expfmt/decode.go | 39 +- .../prometheus/common/expfmt/encode.go | 10 +- .../prometheus/common/expfmt/expfmt.go | 12 +- .../prometheus/common/expfmt/fuzz.go | 9 +- .../common/expfmt/openmetrics_create.go | 11 +- .../prometheus/common/expfmt/text_create.go | 8 +- .../prometheus/common/expfmt/text_parse.go | 48 +- .../prometheus/common/model/alert.go | 2 +- .../prometheus/common/model/labels.go | 36 +- .../prometheus/common/model/labelset.go | 10 +- .../prometheus/common/model/metric.go | 224 +- .../prometheus/common/model/time.go | 37 +- .../prometheus/common/model/value.go | 15 +- .../common/model/value_histogram.go | 10 +- .../prometheus/common/model/value_type.go | 4 +- .../prometheus/procfs/.golangci.yml | 63 +- .../prometheus/procfs/Makefile.common | 10 +- .../github.com/prometheus/procfs/README.md | 6 +- .../github.com/prometheus/procfs/arp.go | 4 +- .../vendor/github.com/prometheus/procfs/fs.go | 10 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../github.com/prometheus/procfs/fscache.go | 6 +- .../prometheus/procfs/internal/fs/fs.go | 3 + .../prometheus/procfs/internal/util/parse.go | 14 + .../procfs/internal/util/sysreadfile.go | 20 + .../prometheus/procfs/mountstats.go | 27 +- .../prometheus/procfs/net_dev_snmp6.go | 96 + .../prometheus/procfs/net_ip_socket.go | 8 +- .../prometheus/procfs/net_protocols.go | 21 +- .../github.com/prometheus/procfs/net_tcp.go | 4 + .../github.com/prometheus/procfs/net_unix.go | 8 +- .../github.com/prometheus/procfs/proc.go | 8 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../github.com/prometheus/procfs/proc_io.go | 2 +- .../prometheus/procfs/proc_netstat.go | 224 +- .../prometheus/procfs/proc_smaps.go | 4 +- .../github.com/prometheus/procfs/proc_snmp.go | 120 +- .../prometheus/procfs/proc_snmp6.go | 150 +- .../prometheus/procfs/proc_status.go | 18 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 22 +- .../rubenv/sql-migrate/.golangci.yaml | 204 +- .../github.com/rubenv/sql-migrate/migrate.go | 12 +- .../github.com/sagikazarmark/locafero/.envrc | 4 +- .../sagikazarmark/locafero/.golangci.yaml | 48 +- .../sagikazarmark/locafero/README.md | 4 +- .../sagikazarmark/locafero/file_type.go | 12 +- .../sagikazarmark/locafero/finder.go | 102 +- .../sagikazarmark/locafero/flake.lock | 401 +- .../sagikazarmark/locafero/flake.nix | 66 +- .../sagikazarmark/locafero/justfile | 7 +- .../santhosh-tekuri/jsonschema/v6/.gitmodules | 4 + .../jsonschema/v6/.golangci.yml | 7 + .../jsonschema/v6/.pre-commit-hooks.yaml | 7 + .../santhosh-tekuri/jsonschema/v6/LICENSE | 175 + .../santhosh-tekuri/jsonschema/v6/README.md | 88 + .../santhosh-tekuri/jsonschema/v6/compiler.go | 332 + .../santhosh-tekuri/jsonschema/v6/content.go | 51 + .../santhosh-tekuri/jsonschema/v6/draft.go | 360 + .../santhosh-tekuri/jsonschema/v6/format.go | 708 + .../santhosh-tekuri/jsonschema/v6/go.work | 8 + .../santhosh-tekuri/jsonschema/v6/go.work.sum | 4 + .../jsonschema/v6/kind/kind.go | 651 + .../santhosh-tekuri/jsonschema/v6/loader.go | 266 + .../jsonschema/v6/metaschemas/draft-04/schema | 151 + .../jsonschema/v6/metaschemas/draft-06/schema | 150 + .../jsonschema/v6/metaschemas/draft-07/schema | 172 + .../metaschemas/draft/2019-09/meta/applicator | 55 + .../v6/metaschemas/draft/2019-09/meta/content | 15 + .../v6/metaschemas/draft/2019-09/meta/core | 56 + .../v6/metaschemas/draft/2019-09/meta/format | 13 + .../metaschemas/draft/2019-09/meta/meta-data | 35 + .../metaschemas/draft/2019-09/meta/validation | 97 + .../v6/metaschemas/draft/2019-09/schema | 41 + .../metaschemas/draft/2020-12/meta/applicator | 47 + .../v6/metaschemas/draft/2020-12/meta/content | 15 + .../v6/metaschemas/draft/2020-12/meta/core | 50 + .../draft/2020-12/meta/format-annotation | 13 + .../draft/2020-12/meta/format-assertion | 13 + .../metaschemas/draft/2020-12/meta/meta-data | 35 + .../draft/2020-12/meta/unevaluated | 14 + .../metaschemas/draft/2020-12/meta/validation | 97 + .../v6/metaschemas/draft/2020-12/schema | 57 + .../jsonschema/v6/objcompiler.go | 549 + .../santhosh-tekuri/jsonschema/v6/output.go | 216 + .../santhosh-tekuri/jsonschema/v6/position.go | 142 + .../santhosh-tekuri/jsonschema/v6/root.go | 202 + .../santhosh-tekuri/jsonschema/v6/roots.go | 286 + .../santhosh-tekuri/jsonschema/v6/schema.go | 254 + .../santhosh-tekuri/jsonschema/v6/util.go | 464 + .../jsonschema/v6/validator.go | 975 + .../santhosh-tekuri/jsonschema/v6/vocab.go | 111 + .../go-securesystemslib/LICENSE | 21 + .../encrypted/encrypted.go | 290 + .../sergi/go-diff/diffmatchpatch/diff.go | 39 +- .../sergi/go-diff/diffmatchpatch/index.go | 32 + .../go-diff/diffmatchpatch/stringutil.go | 4 +- .../doc.go => sigstore/fulcio/COPYRIGHT.txt} | 8 +- .../fulcio/LICENSE} | 3 +- .../sigstore/fulcio/pkg/certificate/doc.go | 17 + .../fulcio/pkg/certificate/extensions.go | 439 + .../sigstore/protobuf-specs/COPYRIGHT.txt | 14 + .../protobuf-specs/LICENSE} | 2 +- .../gen/pb-go/common/v1/sigstore_common.pb.go | 1299 + .../sigstore/sigstore/COPYRIGHT.txt | 14 + .../github.com/sigstore/sigstore/LICENSE | 202 + .../sigstore/pkg/cryptoutils/certificate.go | 173 + .../sigstore/sigstore/pkg/cryptoutils/doc.go | 17 + .../sigstore/pkg/cryptoutils/generic.go | 31 + .../sigstore/pkg/cryptoutils/password.go | 94 + .../sigstore/pkg/cryptoutils/privatekey.go | 152 + .../sigstore/pkg/cryptoutils/publickey.go | 186 + .../sigstore/sigstore/pkg/cryptoutils/sans.go | 149 + .../pkg/signature/algorithm_registry.go | 314 + .../sigstore/sigstore/pkg/signature/doc.go | 17 + .../sigstore/sigstore/pkg/signature/ecdsa.go | 270 + .../sigstore/pkg/signature/ed25519.go | 197 + .../sigstore/pkg/signature/ed25519ph.go | 211 + .../sigstore/pkg/signature/message.go | 111 + .../sigstore/pkg/signature/options.go | 65 + .../sigstore/pkg/signature/options/context.go | 37 + .../sigstore/pkg/signature/options/digest.go | 35 + .../sigstore/pkg/signature/options/doc.go | 17 + .../pkg/signature/options/keyversion.go | 50 + .../pkg/signature/options/loadoptions.go | 76 + .../sigstore/pkg/signature/options/noop.go | 59 + .../sigstore/pkg/signature/options/rand.go | 41 + .../signature/options/remoteverification.go | 32 + .../sigstore/pkg/signature/options/rpcauth.go | 58 + .../pkg/signature/options/signeropts.go | 40 + .../sigstore/pkg/signature/payload/doc.go | 17 + .../sigstore/pkg/signature/payload/payload.go | 122 + .../sigstore/pkg/signature/publickey.go | 25 + .../sigstore/pkg/signature/rsapkcs1v15.go | 225 + .../sigstore/sigstore/pkg/signature/rsapss.go | 260 + .../sigstore/sigstore/pkg/signature/signer.go | 147 + .../sigstore/pkg/signature/signerverifier.go | 127 + .../sigstore/sigstore/pkg/signature/util.go | 74 + .../sigstore/pkg/signature/verifier.go | 156 + .../pkcs7}/.gitignore | 16 +- .../vendor/github.com/smallstep/pkcs7/LICENSE | 22 + .../github.com/smallstep/pkcs7/Makefile | 20 + .../github.com/smallstep/pkcs7/README.md | 63 + .../vendor/github.com/smallstep/pkcs7/ber.go | 266 + .../github.com/smallstep/pkcs7/decrypt.go | 233 + .../github.com/smallstep/pkcs7/encrypt.go | 475 + .../pkcs7/internal/legacy/x509/debug.go | 14 + .../pkcs7/internal/legacy/x509/doc.go | 14 + .../pkcs7/internal/legacy/x509/oid.go | 377 + .../pkcs7/internal/legacy/x509/parser.go | 1027 + .../pkcs7/internal/legacy/x509/pkcs1.go | 15 + .../pkcs7/internal/legacy/x509/verify.go | 193 + .../pkcs7/internal/legacy/x509/x509.go | 488 + .../github.com/smallstep/pkcs7/pkcs7.go | 353 + .../vendor/github.com/smallstep/pkcs7/sign.go | 474 + .../github.com/smallstep/pkcs7/verify.go | 385 + .../github.com/sourcegraph/conc/Makefile | 24 + .../internal/multierror/multierror_go119.go | 10 - .../internal/multierror/multierror_go120.go | 10 - .../github.com/sourcegraph/conc/iter/iter.go | 85 - .../github.com/sourcegraph/conc/iter/map.go | 65 - .../sourcegraph/conc/pool/context_pool.go | 104 + .../sourcegraph/conc/pool/error_pool.go | 100 + .../github.com/sourcegraph/conc/pool/pool.go | 174 + .../conc/pool/result_context_pool.go | 85 + .../conc/pool/result_error_pool.go | 80 + .../sourcegraph/conc/pool/result_pool.go | 142 + .../github.com/spf13/afero/.editorconfig | 3 + .../github.com/spf13/afero/.golangci.yaml | 62 +- tools/vendor/github.com/spf13/afero/README.md | 679 +- .../github.com/spf13/afero/copyOnWriteFs.go | 9 +- tools/vendor/github.com/spf13/afero/iofs.go | 9 +- .../vendor/github.com/spf13/afero/lstater.go | 4 +- .../vendor/github.com/spf13/afero/mem/file.go | 22 +- .../github.com/spf13/afero/unionFile.go | 5 +- tools/vendor/github.com/spf13/afero/util.go | 4 +- .../github.com/spf13/cast/.editorconfig | 15 + .../github.com/spf13/cast/.golangci.yaml | 39 + tools/vendor/github.com/spf13/cast/README.md | 12 +- tools/vendor/github.com/spf13/cast/alias.go | 69 + tools/vendor/github.com/spf13/cast/basic.go | 131 + tools/vendor/github.com/spf13/cast/cast.go | 232 +- tools/vendor/github.com/spf13/cast/caste.go | 1510 - .../vendor/github.com/spf13/cast/indirect.go | 37 + .../github.com/spf13/cast/internal/time.go | 79 + .../cast/internal/timeformattype_string.go | 27 + tools/vendor/github.com/spf13/cast/map.go | 212 + tools/vendor/github.com/spf13/cast/number.go | 549 + tools/vendor/github.com/spf13/cast/slice.go | 106 + tools/vendor/github.com/spf13/cast/time.go | 116 + .../spf13/cast/timeformattype_string.go | 27 - .../github.com/spf13/cast/zz_generated.go | 261 + .../github.com/spf13/cobra/.golangci.yml | 28 +- tools/vendor/github.com/spf13/cobra/README.md | 24 +- .../vendor/github.com/spf13/cobra/SECURITY.md | 105 + .../vendor/github.com/spf13/cobra/command.go | 11 +- .../github.com/spf13/cobra/completions.go | 19 +- tools/vendor/github.com/spf13/pflag/README.md | 27 + .../github.com/spf13/pflag/bool_func.go | 40 + tools/vendor/github.com/spf13/pflag/count.go | 2 +- tools/vendor/github.com/spf13/pflag/errors.go | 149 + tools/vendor/github.com/spf13/pflag/flag.go | 121 +- tools/vendor/github.com/spf13/pflag/func.go | 37 + .../github.com/spf13/pflag/golangflag.go | 56 + .../github.com/spf13/pflag/ipnet_slice.go | 2 +- .../spf13/pflag/string_to_string.go | 10 +- tools/vendor/github.com/spf13/pflag/text.go | 81 + tools/vendor/github.com/spf13/pflag/time.go | 124 + .../github.com/spf13/viper/.editorconfig | 3 + .../github.com/spf13/viper/.golangci.yaml | 209 +- tools/vendor/github.com/spf13/viper/README.md | 4 +- .../spf13/viper/{UPDATES.md => UPGRADE.md} | 21 + .../vendor/github.com/spf13/viper/flake.lock | 401 +- tools/vendor/github.com/spf13/viper/flake.nix | 76 +- .../viper/internal/encoding/yaml/codec.go | 2 +- tools/vendor/github.com/spf13/viper/remote.go | 5 +- tools/vendor/github.com/spf13/viper/util.go | 5 +- tools/vendor/github.com/spf13/viper/viper.go | 74 +- .../stefanberger/go-pkcs11uri/.gitignore | 2 + .../stefanberger/go-pkcs11uri/.travis.yml | 25 + .../go-pkcs11uri}/LICENSE | 14 - .../stefanberger/go-pkcs11uri/Makefile | 28 + .../stefanberger/go-pkcs11uri/README.md | 102 + .../stefanberger/go-pkcs11uri/pkcs11uri.go | 484 + .../stoewer/go-strcase/.golangci.yml | 37 +- .../github.com/stoewer/go-strcase/camel.go | 3 + .../github.com/stoewer/go-strcase/helper.go | 6 + .../testify/assert/assertion_compare.go | 22 +- .../testify/assert/assertion_format.go | 51 +- .../testify/assert/assertion_forward.go | 102 +- .../testify/assert/assertion_order.go | 2 +- .../stretchr/testify/assert/assertions.go | 367 +- .../github.com/stretchr/testify/assert/doc.go | 4 + .../testify/assert/http_assertions.go | 4 +- .../testify/assert/yaml/yaml_custom.go | 1 - .../testify/assert/yaml/yaml_default.go | 1 - .../stretchr/testify/assert/yaml/yaml_fail.go | 1 - .../stretchr/testify/require/doc.go | 2 + .../stretchr/testify/require/require.go | 108 +- .../testify/require/require_forward.go | 102 +- .../gocapability/capability/capability.go | 133 - .../capability/capability_linux.go | 642 - .../capability/capability_noop.go | 19 - .../syndtr/gocapability/capability/enum.go | 309 - .../gocapability/capability/enum_gen.go | 138 - .../gocapability/capability/syscall_linux.go | 154 - .../github.com/titanous/rocacheck/LICENSE | 22 + .../github.com/titanous/rocacheck/README.md | 7 + .../titanous/rocacheck/rocacheck.go | 52 + .../vendor/github.com/ulikunitz/xz/.gitignore | 28 + tools/vendor/github.com/ulikunitz/xz/LICENSE | 26 + .../vendor/github.com/ulikunitz/xz/README.md | 88 + .../github.com/ulikunitz/xz/SECURITY.md | 19 + tools/vendor/github.com/ulikunitz/xz/TODO.md | 386 + tools/vendor/github.com/ulikunitz/xz/bits.go | 79 + tools/vendor/github.com/ulikunitz/xz/crc.go | 54 + .../vendor/github.com/ulikunitz/xz/format.go | 721 + .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes tools/vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 456 + .../github.com/ulikunitz/xz/lzma/bintree.go | 522 + .../github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + .../github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 + .../ulikunitz/xz/lzma/decoderdict.go | 128 + .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 + .../ulikunitz/xz/lzma/encoderdict.go | 149 + .../github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 + .../github.com/ulikunitz/xz/lzma/header.go | 170 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 + .../ulikunitz/xz/lzma/lengthcodec.go | 115 + .../ulikunitz/xz/lzma/literalcodec.go | 125 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + .../github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 + .../github.com/ulikunitz/xz/lzma/reader.go | 193 + .../github.com/ulikunitz/xz/lzma/reader2.go | 231 + .../github.com/ulikunitz/xz/lzma/state.go | 145 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + .../github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 + .../github.com/ulikunitz/xz/lzmafilter.go | 117 + .../vendor/github.com/ulikunitz/xz/make-docs | 5 + .../github.com/ulikunitz/xz/none-check.go | 23 + .../vendor/github.com/ulikunitz/xz/reader.go | 359 + .../vendor/github.com/ulikunitz/xz/writer.go | 399 + .../vbatts/tar-split/archive/tar/writer.go | 3 + .../vbatts/tar-split/tar/asm/README.md | 44 + .../vbatts/tar-split/tar/asm/assemble.go | 132 + .../vbatts/tar-split/tar/asm/disassemble.go | 156 + .../vbatts/tar-split/tar/asm/doc.go | 9 + .../vbatts/tar-split/tar/asm/iterate.go | 57 + .../vbatts/tar-split/tar/storage/doc.go | 12 + .../vbatts/tar-split/tar/storage/entry.go | 78 + .../vbatts/tar-split/tar/storage/getter.go | 105 + .../vbatts/tar-split/tar/storage/packer.go | 110 + .../github.com/vbauerster/mpb/v8/.gitignore | 5 + .../github.com/vbauerster/mpb/v8/CONTRIBUTING | 15 + .../github.com/vbauerster/mpb/v8/README.md | 117 + .../github.com/vbauerster/mpb/v8/UNLICENSE | 24 + .../github.com/vbauerster/mpb/v8/bar.go | 582 + .../vbauerster/mpb/v8/bar_filler.go | 31 + .../vbauerster/mpb/v8/bar_filler_bar.go | 274 + .../vbauerster/mpb/v8/bar_filler_nop.go | 24 + .../vbauerster/mpb/v8/bar_filler_spinner.go | 103 + .../vbauerster/mpb/v8/bar_option.go | 221 + .../vbauerster/mpb/v8/container_option.go | 146 + .../vbauerster/mpb/v8/cwriter/doc.go | 2 + .../vbauerster/mpb/v8/cwriter/util_bsd.go | 7 + .../vbauerster/mpb/v8/cwriter/util_linux.go | 7 + .../vbauerster/mpb/v8/cwriter/util_solaris.go | 7 + .../vbauerster/mpb/v8/cwriter/util_zos.go | 7 + .../vbauerster/mpb/v8/cwriter/writer.go | 59 + .../vbauerster/mpb/v8/cwriter/writer_posix.go | 48 + .../mpb/v8/cwriter/writer_windows.go | 101 + .../github.com/vbauerster/mpb/v8/decor/any.go | 21 + .../vbauerster/mpb/v8/decor/counters.go | 253 + .../vbauerster/mpb/v8/decor/decorator.go | 183 + .../github.com/vbauerster/mpb/v8/decor/doc.go | 19 + .../vbauerster/mpb/v8/decor/elapsed.go | 33 + .../github.com/vbauerster/mpb/v8/decor/eta.go | 226 + .../vbauerster/mpb/v8/decor/meta.go | 34 + .../vbauerster/mpb/v8/decor/moving_average.go | 74 + .../vbauerster/mpb/v8/decor/name.go | 11 + .../vbauerster/mpb/v8/decor/on_abort.go | 68 + .../mpb/v8/decor/on_compete_or_on_abort.go | 21 + .../vbauerster/mpb/v8/decor/on_complete.go | 67 + .../vbauerster/mpb/v8/decor/on_condition.go | 51 + .../vbauerster/mpb/v8/decor/percentage.go | 68 + .../vbauerster/mpb/v8/decor/size_type.go | 120 + .../mpb/v8/decor/sizeb1000_string.go | 41 + .../mpb/v8/decor/sizeb1024_string.go | 41 + .../vbauerster/mpb/v8/decor/speed.go | 185 + .../vbauerster/mpb/v8/decor/spinner.go | 21 + .../github.com/vbauerster/mpb/v8/doc.go | 2 + .../vbauerster/mpb/v8/heap_manager.go | 177 + .../vbauerster/mpb/v8/internal/percentage.go | 22 + .../vbauerster/mpb/v8/internal/width.go | 10 + .../vbauerster/mpb/v8/priority_queue.go | 37 + .../github.com/vbauerster/mpb/v8/progress.go | 463 + .../vbauerster/mpb/v8/proxyreader.go | 73 + .../vbauerster/mpb/v8/proxywriter.go | 96 + .../xeipuuv/gojsonpointer/README.md | 41 - .../xeipuuv/gojsonpointer/pointer.go | 211 - .../xeipuuv/gojsonreference/README.md | 10 - .../xeipuuv/gojsonreference/reference.go | 147 - .../xeipuuv/gojsonschema/.gitignore | 3 - .../xeipuuv/gojsonschema/.travis.yml | 9 - .../github.com/xeipuuv/gojsonschema/README.md | 466 - .../github.com/xeipuuv/gojsonschema/draft.go | 125 - .../github.com/xeipuuv/gojsonschema/errors.go | 364 - .../xeipuuv/gojsonschema/format_checkers.go | 368 - .../xeipuuv/gojsonschema/glide.yaml | 13 - .../xeipuuv/gojsonschema/internalLog.go | 37 - .../xeipuuv/gojsonschema/jsonContext.go | 73 - .../xeipuuv/gojsonschema/jsonLoader.go | 386 - .../xeipuuv/gojsonschema/locales.go | 472 - .../github.com/xeipuuv/gojsonschema/result.go | 220 - .../github.com/xeipuuv/gojsonschema/schema.go | 1087 - .../xeipuuv/gojsonschema/schemaLoader.go | 206 - .../xeipuuv/gojsonschema/schemaPool.go | 215 - .../gojsonschema/schemaReferencePool.go | 68 - .../xeipuuv/gojsonschema/schemaType.go | 83 - .../xeipuuv/gojsonschema/subSchema.go | 149 - .../github.com/xeipuuv/gojsonschema/types.go | 62 - .../github.com/xeipuuv/gojsonschema/utils.go | 197 - .../xeipuuv/gojsonschema/validation.go | 858 - tools/vendor/go.etcd.io/bbolt/.gitignore | 2 + tools/vendor/go.etcd.io/bbolt/.go-version | 2 +- tools/vendor/go.etcd.io/bbolt/Makefile | 62 +- tools/vendor/go.etcd.io/bbolt/OWNERS | 10 + tools/vendor/go.etcd.io/bbolt/README.md | 68 +- tools/vendor/go.etcd.io/bbolt/bolt_386.go | 7 - .../{bolt_unix_solaris.go => bolt_aix.go} | 6 +- tools/vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - tools/vendor/go.etcd.io/bbolt/bolt_android.go | 92 + tools/vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - tools/vendor/go.etcd.io/bbolt/bolt_arm64.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_loong64.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_mips64x.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_mipsx.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_ppc.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_ppc64.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_riscv64.go | 10 - tools/vendor/go.etcd.io/bbolt/bolt_s390x.go | 10 - .../{bolt_unix_aix.go => bolt_solaris.go} | 7 +- tools/vendor/go.etcd.io/bbolt/bolt_unix.go | 10 +- tools/vendor/go.etcd.io/bbolt/bolt_windows.go | 9 +- .../vendor/go.etcd.io/bbolt/boltsync_unix.go | 1 - tools/vendor/go.etcd.io/bbolt/bucket.go | 484 +- tools/vendor/go.etcd.io/bbolt/cursor.go | 99 +- tools/vendor/go.etcd.io/bbolt/db.go | 465 +- tools/vendor/go.etcd.io/bbolt/errors.go | 76 +- .../vendor/go.etcd.io/bbolt/errors/errors.go | 84 + tools/vendor/go.etcd.io/bbolt/freelist.go | 410 - .../vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 - .../bbolt/internal/common/bolt_386.go | 7 + .../bbolt/internal/common/bolt_amd64.go | 7 + .../bbolt/internal/common/bolt_arm.go | 7 + .../bbolt/internal/common/bolt_arm64.go | 9 + .../bbolt/internal/common/bolt_loong64.go | 9 + .../bbolt/internal/common/bolt_mips64x.go | 9 + .../bbolt/internal/common/bolt_mipsx.go | 9 + .../bbolt/internal/common/bolt_ppc.go | 9 + .../bbolt/internal/common/bolt_ppc64.go | 9 + .../bbolt/internal/common/bolt_ppc64le.go | 9 + .../bbolt/internal/common/bolt_riscv64.go | 9 + .../bbolt/internal/common/bolt_s390x.go | 9 + .../bbolt/internal/common/bucket.go | 54 + .../go.etcd.io/bbolt/internal/common/inode.go | 115 + .../go.etcd.io/bbolt/internal/common/meta.go | 161 + .../go.etcd.io/bbolt/internal/common/page.go | 391 + .../go.etcd.io/bbolt/internal/common/types.go | 37 + .../bbolt/{ => internal/common}/unsafe.go | 10 +- .../go.etcd.io/bbolt/internal/common/utils.go | 64 + .../bbolt/internal/common/verify.go | 67 + .../bbolt/internal/freelist/array.go | 108 + .../bbolt/internal/freelist/freelist.go | 82 + .../bbolt/internal/freelist/hashmap.go | 292 + .../bbolt/internal/freelist/shared.go | 310 + tools/vendor/go.etcd.io/bbolt/logger.go | 113 + tools/vendor/go.etcd.io/bbolt/mlock_unix.go | 1 - tools/vendor/go.etcd.io/bbolt/node.go | 252 +- tools/vendor/go.etcd.io/bbolt/page.go | 212 - tools/vendor/go.etcd.io/bbolt/tx.go | 309 +- tools/vendor/go.etcd.io/bbolt/tx_check.go | 206 +- .../auto/sdk/CONTRIBUTING.md | 27 + .../go.opentelemetry.io/auto/sdk/LICENSE | 201 + .../auto/sdk/VERSIONING.md | 15 + .../go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 + .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 + .../auto/sdk/internal/telemetry/number.go | 67 + .../auto/sdk/internal/telemetry/resource.go | 66 + .../auto/sdk/internal/telemetry/scope.go | 67 + .../auto/sdk/internal/telemetry/span.go | 456 + .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 + .../auto/sdk/internal/telemetry/value.go | 452 + .../go.opentelemetry.io/auto/sdk/limit.go | 94 + .../go.opentelemetry.io/auto/sdk/span.go | 432 + .../go.opentelemetry.io/auto/sdk/tracer.go | 124 + .../auto/sdk/tracer_provider.go | 33 + .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/config.go | 4 + .../net/http/otelhttp/handler.go | 54 +- .../otelhttp/internal/request/body_wrapper.go | 7 +- .../net/http/otelhttp/internal/request/gen.go | 10 + .../internal/request/resp_writer_wrapper.go | 5 +- .../net/http/otelhttp/internal/semconv/env.go | 188 +- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 269 +- .../http/otelhttp/internal/semconv/util.go | 43 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 63 +- .../otelhttp/internal/semconvutil/httpconv.go | 53 +- .../otelhttp/internal/semconvutil/netconv.go | 13 +- .../net/http/otelhttp/labeler.go | 6 +- .../net/http/otelhttp/transport.go | 2 +- .../net/http/otelhttp/version.go | 9 +- .../go.opentelemetry.io/otel/.clomonitor.yml | 3 + .../go.opentelemetry.io/otel/.gitignore | 1 + .../go.opentelemetry.io/otel/.golangci.yml | 517 +- .../go.opentelemetry.io/otel/CHANGELOG.md | 197 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 30 +- .../vendor/go.opentelemetry.io/otel/Makefile | 74 +- .../vendor/go.opentelemetry.io/otel/README.md | 22 +- .../go.opentelemetry.io/otel/RELEASING.md | 58 +- .../go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/attribute/filter.go | 4 +- .../internal}/attribute.go | 46 +- .../otel/attribute/rawhelpers.go | 37 + .../otel/attribute/value.go | 15 +- .../otel/baggage/baggage.go | 4 +- .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../otel/dependencies.Dockerfile | 4 + .../internal/tracetransform/attribute.go | 2 + .../otlp/otlptrace/otlptracegrpc/client.go | 2 +- .../internal/envconfig/envconfig.go | 4 +- .../otlptrace/otlptracegrpc/internal/gen.go | 1 + .../internal/otlpconfig/envconfig.go | 14 +- .../internal/otlpconfig/options.go | 16 +- .../internal/otlpconfig/optiontypes.go | 2 +- .../otlptracegrpc/internal/otlpconfig/tls.go | 2 +- .../otlptracegrpc/internal/partialsuccess.go | 2 +- .../otlptracegrpc/internal/retry/retry.go | 28 +- .../otlp/otlptrace/otlptracegrpc/options.go | 5 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 30 - .../go.opentelemetry.io/otel/internal/gen.go | 18 - .../otel/internal/global/handler.go | 1 + .../otel/internal/global/meter.go | 45 +- .../otel/internal/global/trace.go | 36 + .../otel/internal/rawhelpers.go | 48 - .../otel/metric/asyncfloat64.go | 12 +- .../otel/metric/asyncint64.go | 8 +- .../otel/metric/instrument.go | 16 +- .../go.opentelemetry.io/otel/metric/meter.go | 10 +- .../otel/metric/noop/noop.go | 25 +- .../otel/propagation/baggage.go | 36 +- .../otel/propagation/propagation.go | 30 +- .../go.opentelemetry.io/otel/renovate.json | 13 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/internal/env/env.go | 2 + .../otel/sdk/resource/builtin.go | 2 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/os_release_darwin.go | 3 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/resource/resource.go | 25 +- .../otel/sdk/trace/batch_span_processor.go | 6 +- .../otel/sdk/trace/id_generator.go | 26 +- .../otel/sdk/trace/provider.go | 12 +- .../otel/sdk/trace/sampler_env.go | 5 +- .../otel/sdk/trace/sampling.go | 8 +- .../otel/sdk/trace/simple_span_processor.go | 2 +- .../otel/sdk/trace/span.go | 103 +- .../otel/sdk/trace/tracer.go | 13 +- .../go.opentelemetry.io/otel/sdk/version.go | 3 +- .../otel/semconv/v1.34.0/MIGRATION.md | 4 + .../otel/semconv/v1.34.0/README.md | 3 + .../otel/semconv/v1.34.0/attribute_group.go | 13851 ++++++ .../otel/semconv/v1.34.0/doc.go | 9 + .../otel/semconv/v1.34.0/exception.go | 9 + .../otel/semconv/v1.34.0/schema.go | 9 + .../go.opentelemetry.io/otel/trace/auto.go | 662 + .../go.opentelemetry.io/otel/trace/config.go | 2 +- .../otel/trace/internal/telemetry/attr.go | 58 + .../otel/trace/internal/telemetry/doc.go | 8 + .../otel/trace/internal/telemetry/id.go | 103 + .../otel/trace/internal/telemetry/number.go | 67 + .../otel/trace/internal/telemetry/resource.go | 66 + .../otel/trace/internal/telemetry/scope.go | 67 + .../otel/trace/internal/telemetry/span.go | 472 + .../otel/trace/internal/telemetry/status.go | 42 + .../otel/trace/internal/telemetry/traces.go | 189 + .../otel/trace/internal/telemetry/value.go | 453 + .../go.opentelemetry.io/otel/trace/noop.go | 22 +- .../otel/verify_readmes.sh | 21 - .../go.opentelemetry.io/otel/version.go | 2 +- .../go.opentelemetry.io/otel/versions.yaml | 12 +- .../trace/v1/trace_service_grpc.pb.go | 4 - .../proto/otlp/common/v1/common.pb.go | 138 +- .../proto/otlp/resource/v1/resource.pb.go | 56 +- .../proto/otlp/trace/v1/trace.pb.go | 6 +- .../common/LICENSE | 0 .../common/pkg/auth/auth.go | 24 +- .../common/pkg/auth/cli.go | 14 +- .../common/pkg/capabilities/capabilities.go | 117 +- .../common/pkg/completion/completion.go | 18 +- .../common/pkg/password/password_supported.go | 0 .../common/pkg/password/password_windows.go | 0 .../image/v5/LICENSE | 0 .../vendor/go.podman.io/image/v5/copy/blob.go | 187 + .../go.podman.io/image/v5/copy/compression.go | 434 + .../vendor/go.podman.io/image/v5/copy/copy.go | 417 + .../image/v5/copy/digesting_reader.go | 62 + .../go.podman.io/image/v5/copy/encryption.go | 138 + .../go.podman.io/image/v5/copy/manifest.go | 253 + .../go.podman.io/image/v5/copy/multiple.go | 354 + .../image/v5/copy/progress_bars.go | 177 + .../image/v5/copy/progress_channel.go | 79 + .../vendor/go.podman.io/image/v5/copy/sign.go | 115 + .../go.podman.io/image/v5/copy/single.go | 1004 + .../v5/directory/explicitfilepath/path.go | 57 + .../image/v5/docker/body_reader.go | 16 +- .../image/v5/docker/cache.go | 4 +- .../image/v5/docker/distribution_error.go | 31 +- .../image/v5/docker/docker_client.go | 117 +- .../image/v5/docker/docker_image.go | 14 +- .../image/v5/docker/docker_image_dest.go | 52 +- .../image/v5/docker/docker_image_src.go | 101 +- .../image/v5/docker/docker_transport.go | 8 +- .../image/v5/docker/errors.go | 7 +- .../image/v5/docker/paths_common.go | 1 - .../image/v5/docker/paths_freebsd.go | 1 - .../v5/docker/policyconfiguration/naming.go | 2 +- .../image/v5/docker/reference/README.md | 0 .../image/v5/docker/reference/helpers.go | 0 .../image/v5/docker/reference/normalize.go | 0 .../image/v5/docker/reference/reference.go | 0 .../v5/docker/reference/regexp-additions.go | 0 .../image/v5/docker/reference/regexp.go | 2 +- .../image/v5/docker/registries_d.go | 18 +- .../image/v5/docker/wwwauthenticate.go | 17 +- .../image/v5/image/docker_schema2.go | 14 + .../go.podman.io/image/v5/image/sourced.go | 37 + .../go.podman.io/image/v5/image/unparsed.go | 47 + .../internal/blobinfocache/blobinfocache.go | 2 +- .../image/v5/internal/blobinfocache/types.go | 4 +- .../image/v5/internal/image/docker_list.go | 4 +- .../image/v5/internal/image/docker_schema1.go | 10 +- .../image/v5/internal/image/docker_schema2.go | 10 +- .../image/v5/internal/image/manifest.go | 6 +- .../image/v5/internal/image/memory.go | 2 +- .../image/v5/internal/image/oci.go | 12 +- .../image/v5/internal/image/oci_index.go | 4 +- .../image/v5/internal/image/sourced.go | 2 +- .../image/v5/internal/image/unparsed.go | 18 +- .../internal/imagedestination/impl/compat.go | 21 +- .../internal/imagedestination/impl/helpers.go | 4 +- .../imagedestination/impl/properties.go | 2 +- .../stubs/original_oci_config.go | 16 + .../stubs/put_blob_partial.go | 9 +- .../imagedestination/stubs/signatures.go | 2 +- .../internal/imagedestination/stubs/stubs.go | 0 .../v5/internal/imagedestination/wrapper.go | 108 + .../v5/internal/imagesource/impl/compat.go | 4 +- .../internal/imagesource/impl/layer_infos.go | 2 +- .../internal/imagesource/impl/properties.go | 0 .../internal/imagesource/impl/signatures.go | 2 +- .../internal/imagesource/stubs/get_blob_at.go | 4 +- .../v5/internal/imagesource/stubs/stubs.go | 0 .../image/v5/internal/imagesource/wrapper.go | 8 +- .../image/v5/internal/iolimits/iolimits.go | 0 .../image/v5/internal/manifest/common.go | 0 .../v5/internal/manifest/docker_schema2.go | 0 .../internal/manifest/docker_schema2_list.go | 43 +- .../image/v5/internal/manifest/errors.go | 0 .../image/v5/internal/manifest/list.go | 4 +- .../image/v5/internal/manifest/manifest.go | 2 +- .../image/v5/internal/manifest/oci_index.go | 21 +- .../image/v5/internal/multierr/multierr.go | 0 .../internal/pkg/platform/platform_matcher.go | 6 +- .../image/v5/internal/private/private.go | 68 +- .../internal/putblobdigest/put_blob_digest.go | 2 +- .../image/v5/internal/rootless/rootless.go | 0 .../image/v5/internal/set/set.go | 11 +- .../image/v5/internal/signature/signature.go | 0 .../image/v5/internal/signature/sigstore.go | 0 .../image/v5/internal/signature/simple.go | 0 .../image/v5/internal/signer/signer.go | 47 + .../v5/internal/streamdigest/stream_digest.go | 6 +- .../image/v5/internal/tmpdir/tmpdir.go | 4 +- .../v5/internal/unparsedimage/wrapper.go | 38 + .../v5/internal/uploadreader/upload_reader.go | 0 .../image/v5/internal/useragent/useragent.go | 2 +- .../image/v5/manifest/common.go | 4 +- .../image/v5/manifest/docker_schema1.go | 30 +- .../image/v5/manifest/docker_schema2.go | 8 +- .../image/v5/manifest/docker_schema2_list.go | 2 +- .../image/v5/manifest/list.go | 2 +- .../image/v5/manifest/manifest.go | 4 +- .../image/v5/manifest/oci.go | 13 +- .../image/v5/manifest/oci_index.go | 2 +- .../image/v5/oci/internal/oci_util.go | 150 + .../image/v5/oci/layout/oci_delete.go | 189 + .../image/v5/oci/layout/oci_dest.go | 414 + .../image/v5/oci/layout/oci_src.go | 248 + .../image/v5/oci/layout/oci_transport.go | 304 + .../image/v5/oci/layout/reader.go | 52 + .../image/v5/pkg/blobinfocache/default.go | 88 + .../internal/prioritize/prioritize.go | 244 + .../v5/pkg/blobinfocache/memory/memory.go | 255 + .../image/v5/pkg/blobinfocache/none/none.go | 4 +- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 682 + .../image/v5/pkg/compression/compression.go | 175 + .../v5/pkg/compression/internal/types.go | 9 + .../image/v5/pkg/compression/types/types.go | 2 +- .../image/v5/pkg/compression/zstd.go | 59 + .../image/v5/pkg/docker/config/config.go | 68 +- .../image/v5/pkg/strslice/README.md | 0 .../image/v5/pkg/strslice/strslice.go | 0 .../v5/pkg/sysregistriesv2/paths_common.go | 1 - .../v5/pkg/sysregistriesv2/paths_freebsd.go | 1 - .../v5/pkg/sysregistriesv2/shortnames.go | 24 +- .../sysregistriesv2/system_registries_v2.go | 36 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 10 +- .../go.podman.io/image/v5/signature/docker.go | 94 + .../image/v5/signature/fulcio_cert.go | 210 + .../image/v5/signature/internal/errors.go | 24 + .../image/v5/signature/internal/json.go | 90 + .../v5/signature/internal/rekor_api_types.go | 95 + .../image/v5/signature/internal/rekor_set.go | 224 + .../v5/signature/internal/sequoia/gosequoia.c | 200 + .../v5/signature/internal/sequoia/gosequoia.h | 54 + .../internal/sequoia/gosequoiafuncs.h | 21 + .../v5/signature/internal/sequoia/sequoia.go | 223 + .../v5/signature/internal/sequoia/sequoia.h | 85 + .../v5/signature/internal/sigstore_payload.go | 239 + .../image/v5/signature/mechanism.go | 110 + .../image/v5/signature/mechanism_gpgme.go | 179 + .../v5/signature/mechanism_gpgme_only.go | 64 + .../image/v5/signature/mechanism_openpgp.go | 182 + .../image/v5/signature/mechanism_sequoia.go | 84 + .../image/v5/signature/pki_cert.go | 74 + .../image/v5/signature/policy_config.go | 811 + .../v5/signature/policy_config_sigstore.go | 630 + .../image/v5/signature/policy_eval.go | 293 + .../v5/signature/policy_eval_baselayer.go | 20 + .../v5/signature/policy_eval_signedby.go | 116 + .../v5/signature/policy_eval_sigstore.go | 435 + .../image/v5/signature/policy_eval_simple.go | 29 + .../image/v5/signature/policy_paths_common.go | 7 + .../v5/signature/policy_paths_freebsd.go | 7 + .../v5/signature/policy_reference_match.go | 154 + .../image/v5/signature/policy_types.go | 253 + .../image/v5/signature/signer/signer.go | 9 + .../image/v5/signature/sigstore/copied.go | 103 + .../image/v5/signature/sigstore/generate.go | 35 + .../v5/signature/sigstore/internal/signer.go | 95 + .../image/v5/signature/sigstore/signer.go | 60 + .../go.podman.io/image/v5/signature/simple.go | 297 + .../v5/signature/simplesigning/signer.go | 105 + .../image/v5/transports/stub.go | 2 +- .../image/v5/transports/transports.go | 6 +- .../image/v5/types/types.go | 18 +- .../image/v5/version/version.go | 4 +- .../storage/AUTHORS | 0 .../storage/LICENSE | 0 .../storage/NOTICE | 0 .../internal/rawfilelock/rawfilelock.go | 64 + .../internal/rawfilelock/rawfilelock_unix.go | 49 + .../rawfilelock/rawfilelock_windows.go | 48 + .../storage/pkg/archive/README.md | 1 + .../storage/pkg/archive/archive.go | 1675 + .../storage/pkg/archive/archive_110.go | 22 + .../storage/pkg/archive/archive_19.go | 13 + .../storage/pkg/archive/archive_bsd.go | 18 + .../storage/pkg/archive/archive_linux.go | 208 + .../storage/pkg/archive/archive_other.go | 11 + .../storage/pkg/archive/archive_unix.go | 136 + .../storage/pkg/archive/archive_windows.go | 83 + .../storage/pkg/archive/archive_zstd.go | 41 + .../storage/pkg/archive/changes.go | 505 + .../storage/pkg/archive/changes_linux.go | 404 + .../storage/pkg/archive/changes_other.go | 112 + .../storage/pkg/archive/changes_unix.go | 51 + .../storage/pkg/archive/changes_windows.go | 29 + .../go.podman.io/storage/pkg/archive/copy.go | 459 + .../storage/pkg/archive/copy_unix.go | 11 + .../storage/pkg/archive/copy_windows.go | 9 + .../go.podman.io/storage/pkg/archive/diff.go | 274 + .../storage/pkg/archive/fflags_bsd.go | 166 + .../storage/pkg/archive/fflags_unsupported.go | 20 + .../storage/pkg/archive/filter.go | 73 + .../storage/pkg/archive/time_linux.go | 16 + .../storage/pkg/archive/time_unsupported.go | 16 + .../storage/pkg/archive/whiteouts.go | 23 + .../go.podman.io/storage/pkg/archive/wrap.go | 59 + .../pkg/chunked/compressor/compressor.go | 524 + .../storage/pkg/chunked/compressor/rollsum.go | 85 + .../chunked/internal/minimal/compression.go | 333 + .../storage/pkg/chunked/toc/toc.go | 41 + .../storage/pkg/fileutils/exists_freebsd.go | 0 .../storage/pkg/fileutils/exists_unix.go | 4 +- .../storage/pkg/fileutils/exists_windows.go | 0 .../storage/pkg/fileutils/fileutils.go | 0 .../storage/pkg/fileutils/fileutils_darwin.go | 0 .../pkg/fileutils/fileutils_solaris.go | 0 .../storage/pkg/fileutils/fileutils_unix.go | 0 .../pkg/fileutils/fileutils_windows.go | 0 .../storage/pkg/fileutils/reflink_linux.go | 20 + .../pkg/fileutils/reflink_unsupported.go | 15 + .../storage/pkg/homedir/homedir.go | 0 .../storage/pkg/homedir/homedir_unix.go | 2 +- .../storage/pkg/homedir/homedir_windows.go | 0 .../storage/pkg/idtools/idtools.go | 20 +- .../storage/pkg/idtools/idtools_supported.go | 13 +- .../storage/pkg/idtools/idtools_unix.go | 4 +- .../pkg/idtools/idtools_unsupported.go | 0 .../storage/pkg/idtools/idtools_windows.go | 0 .../storage/pkg/idtools/parser.go | 0 .../storage/pkg/idtools/usergroupadd_linux.go | 2 +- .../pkg/idtools/usergroupadd_unsupported.go | 0 .../storage/pkg/idtools/utils_unix.go | 0 .../storage/pkg/ioutils/buffer.go | 0 .../storage/pkg/ioutils/bytespipe.go | 7 +- .../storage/pkg/ioutils/fswriters.go | 0 .../storage/pkg/ioutils/fswriters_linux.go | 0 .../storage/pkg/ioutils/fswriters_other.go | 0 .../storage/pkg/ioutils/readers.go | 0 .../storage/pkg/ioutils/temp_unix.go | 0 .../storage/pkg/ioutils/temp_windows.go | 2 +- .../storage/pkg/ioutils/writeflusher.go | 0 .../storage/pkg/ioutils/writers.go | 0 .../storage/pkg/lockfile/lastwrite.go | 0 .../storage/pkg/lockfile/lockfile.go | 97 +- .../storage/pkg/lockfile/lockfile_unix.go | 42 +- .../storage/pkg/lockfile/lockfile_windows.go | 36 - .../storage/pkg/longpath/longpath.go | 0 .../storage/pkg/mount/flags.go | 0 .../storage/pkg/mount/flags_freebsd.go | 0 .../storage/pkg/mount/flags_linux.go | 0 .../storage/pkg/mount/flags_unsupported.go | 0 .../storage/pkg/mount/mount.go | 0 .../storage/pkg/mount/mounter_freebsd.go | 0 .../storage/pkg/mount/mounter_linux.go | 0 .../storage/pkg/mount/mounter_unsupported.go | 0 .../storage/pkg/mount/mountinfo.go | 0 .../storage/pkg/mount/mountinfo_linux.go | 0 .../storage/pkg/mount/sharedsubtree_linux.go | 0 .../storage/pkg/mount/unmount_unix.go | 0 .../storage/pkg/mount/unmount_unsupported.go | 0 .../go.podman.io/storage/pkg/pools/pools.go | 119 + .../storage/pkg/promise/promise.go | 11 + .../storage/pkg/reexec/README.md | 0 .../storage/pkg/reexec/command_freebsd.go | 0 .../storage/pkg/reexec/command_linux.go | 0 .../storage/pkg/reexec/command_unix.go | 0 .../storage/pkg/reexec/command_unsupported.go | 0 .../storage/pkg/reexec/command_windows.go | 0 .../storage/pkg/reexec/reexec.go | 2 +- .../storage/pkg/regexp/regexp.go | 0 .../pkg/regexp/regexp_dontprecompile.go | 0 .../storage/pkg/regexp/regexp_precompile.go | 0 .../storage/pkg/system/chmod.go | 0 .../storage/pkg/system/chtimes.go | 0 .../storage/pkg/system/chtimes_unix.go | 0 .../storage/pkg/system/chtimes_windows.go | 0 .../storage/pkg/system/errors.go | 0 .../storage/pkg/system/exitcode.go | 0 .../storage/pkg/system/extattr_freebsd.go | 0 .../storage/pkg/system/extattr_unsupported.go | 0 .../storage/pkg/system/init.go | 0 .../storage/pkg/system/init_windows.go | 0 .../storage/pkg/system/lchflags_bsd.go | 0 .../storage/pkg/system/lchown.go | 0 .../storage/pkg/system/lcow_unix.go | 0 .../storage/pkg/system/lcow_windows.go | 0 .../storage/pkg/system/lstat_unix.go | 0 .../storage/pkg/system/lstat_windows.go | 0 .../storage/pkg/system/meminfo.go | 0 .../storage/pkg/system/meminfo_freebsd.go | 0 .../storage/pkg/system/meminfo_linux.go | 0 .../storage/pkg/system/meminfo_solaris.go | 0 .../storage/pkg/system/meminfo_unsupported.go | 0 .../storage/pkg/system/meminfo_windows.go | 0 .../storage/pkg/system/mknod.go | 0 .../storage/pkg/system/mknod_freebsd.go | 0 .../storage/pkg/system/mknod_windows.go | 0 .../storage/pkg/system/path.go | 0 .../storage/pkg/system/path_unix.go | 0 .../storage/pkg/system/path_windows.go | 0 .../storage/pkg/system/process_unix.go | 0 .../storage/pkg/system/rm.go | 2 +- .../storage/pkg/system/rm_common.go | 0 .../storage/pkg/system/rm_freebsd.go | 0 .../storage/pkg/system/stat_common.go | 0 .../storage/pkg/system/stat_darwin.go | 0 .../storage/pkg/system/stat_freebsd.go | 0 .../storage/pkg/system/stat_linux.go | 4 +- .../storage/pkg/system/stat_netbsd.go | 0 .../storage/pkg/system/stat_openbsd.go | 0 .../storage/pkg/system/stat_solaris.go | 0 .../storage/pkg/system/stat_unix.go | 0 .../storage/pkg/system/stat_windows.go | 0 .../storage/pkg/system/syscall_unix.go | 0 .../storage/pkg/system/syscall_windows.go | 0 .../storage/pkg/system/umask.go | 0 .../storage/pkg/system/umask_windows.go | 0 .../storage/pkg/system/utimes_freebsd.go | 0 .../storage/pkg/system/utimes_linux.go | 0 .../storage/pkg/system/utimes_unsupported.go | 0 .../storage/pkg/system/xattrs_darwin.go | 0 .../storage/pkg/system/xattrs_freebsd.go | 0 .../storage/pkg/system/xattrs_linux.go | 0 .../storage/pkg/system/xattrs_unsupported.go | 0 .../storage/pkg/unshare/getenv_linux_cgo.go | 0 .../storage/pkg/unshare/getenv_linux_nocgo.go | 0 .../storage/pkg/unshare/unshare.c | 0 .../storage/pkg/unshare/unshare.go | 0 .../storage/pkg/unshare/unshare_cgo.go | 0 .../storage/pkg/unshare/unshare_darwin.go | 2 +- .../storage/pkg/unshare/unshare_freebsd.c | 0 .../storage/pkg/unshare/unshare_freebsd.go | 2 +- .../storage/pkg/unshare/unshare_gccgo.go | 0 .../storage/pkg/unshare/unshare_linux.go | 29 +- .../pkg/unshare/unshare_unsupported.go | 2 +- .../pkg/unshare/unshare_unsupported_cgo.go | 0 tools/vendor/go.yaml.in/yaml/v2/.travis.yml | 17 + .../goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE | 0 .../yaml/v2}/LICENSE.libyaml | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE | 0 tools/vendor/go.yaml.in/yaml/v2/README.md | 131 + .../goyaml.v2 => go.yaml.in/yaml/v2}/apic.go | 0 .../yaml/v2}/decode.go | 0 .../yaml/v2}/emitterc.go | 0 .../yaml/v2}/encode.go | 0 .../yaml/v2}/parserc.go | 0 .../yaml/v2}/readerc.go | 0 .../yaml/v2}/resolve.go | 0 .../yaml/v2}/scannerc.go | 0 .../yaml/v2}/sorter.go | 0 .../yaml/v2}/writerc.go | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go | 2 +- .../goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go | 0 .../yaml/v2}/yamlprivateh.go | 0 .../goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE | 0 .../goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE | 0 tools/vendor/go.yaml.in/yaml/v3/README.md | 171 + .../goyaml.v3 => go.yaml.in/yaml/v3}/apic.go | 8 +- .../yaml/v3}/decode.go | 24 +- .../yaml/v3}/emitterc.go | 19 +- .../yaml/v3}/encode.go | 0 .../yaml/v3}/parserc.go | 140 +- .../yaml/v3}/readerc.go | 8 +- .../yaml/v3}/resolve.go | 0 .../yaml/v3}/scannerc.go | 42 +- .../yaml/v3}/sorter.go | 0 .../yaml/v3}/writerc.go | 8 +- .../goyaml.v3 => go.yaml.in/yaml/v3}/yaml.go | 85 +- .../goyaml.v3 => go.yaml.in/yaml/v3}/yamlh.go | 10 +- .../yaml/v3}/yamlprivateh.go | 20 +- .../golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- .../golang.org/x/crypto/cryptobyte/asn1.go | 825 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 350 + .../golang.org/x/crypto/cryptobyte/string.go | 183 + .../x/crypto/internal/alias/alias.go | 31 + .../x/crypto/internal/alias/alias_purego.go | 34 + .../x/crypto/internal/poly1305/mac_noasm.go | 9 + .../x/crypto/internal/poly1305/poly1305.go | 99 + .../x/crypto/internal/poly1305/sum_amd64.s | 93 + .../x/crypto/internal/poly1305/sum_asm.go | 47 + .../x/crypto/internal/poly1305/sum_generic.go | 312 + .../x/crypto/internal/poly1305/sum_loong64.s | 123 + .../x/crypto/internal/poly1305/sum_ppc64x.s | 187 + .../x/crypto/internal/poly1305/sum_s390x.go | 76 + .../x/crypto/internal/poly1305/sum_s390x.s | 503 + .../x/crypto/nacl/secretbox/secretbox.go | 173 + tools/vendor/golang.org/x/crypto/ocsp/ocsp.go | 793 + .../x/crypto/salsa20/salsa/hsalsa20.go | 150 + .../x/crypto/salsa20/salsa/salsa208.go | 201 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 880 + .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 + tools/vendor/golang.org/x/crypto/sha3/doc.go | 66 + .../vendor/golang.org/x/crypto/sha3/hashes.go | 128 + .../golang.org/x/crypto/sha3/hashes_noasm.go | 23 + .../golang.org/x/crypto/sha3/keccakf.go | 414 + .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5419 +++ tools/vendor/golang.org/x/crypto/sha3/sha3.go | 244 + .../golang.org/x/crypto/sha3/sha3_s390x.go | 303 + .../golang.org/x/crypto/sha3/sha3_s390x.s | 33 + .../vendor/golang.org/x/crypto/sha3/shake.go | 193 + .../golang.org/x/crypto/sha3/shake_noasm.go | 15 + tools/vendor/golang.org/x/exp/maps/maps.go | 30 +- .../vendor/golang.org/x/exp/slices/slices.go | 41 +- tools/vendor/golang.org/x/exp/slices/sort.go | 25 +- tools/vendor/golang.org/x/mod/modfile/rule.go | 126 +- tools/vendor/golang.org/x/mod/modfile/work.go | 8 +- .../vendor/golang.org/x/mod/module/module.go | 19 +- .../vendor/golang.org/x/mod/semver/semver.go | 30 +- .../golang.org/x/net/context/context.go | 35 +- tools/vendor/golang.org/x/net/html/escape.go | 2 +- tools/vendor/golang.org/x/net/html/parse.go | 57 +- tools/vendor/golang.org/x/net/html/render.go | 2 +- tools/vendor/golang.org/x/net/http2/config.go | 63 +- .../golang.org/x/net/http2/config_go124.go | 61 - .../golang.org/x/net/http2/config_go125.go | 15 + .../golang.org/x/net/http2/config_go126.go | 15 + .../x/net/http2/config_pre_go124.go | 16 - tools/vendor/golang.org/x/net/http2/frame.go | 41 +- .../vendor/golang.org/x/net/http2/gotrack.go | 17 +- tools/vendor/golang.org/x/net/http2/http2.go | 37 +- tools/vendor/golang.org/x/net/http2/server.go | 143 +- tools/vendor/golang.org/x/net/http2/timer.go | 20 - .../golang.org/x/net/http2/transport.go | 100 +- .../golang.org/x/net/http2/writesched.go | 2 + ...rity.go => writesched_priority_rfc7540.go} | 104 +- .../net/http2/writesched_priority_rfc9128.go | 209 + .../x/net/http2/writesched_roundrobin.go | 2 +- .../x/net/internal/httpcommon/request.go | 4 +- .../golang.org/x/net/internal/socks/socks.go | 2 +- tools/vendor/golang.org/x/net/trace/events.go | 2 +- .../golang.org/x/oauth2/internal/doc.go | 2 +- .../golang.org/x/oauth2/internal/oauth2.go | 2 +- .../golang.org/x/oauth2/internal/token.go | 50 +- .../golang.org/x/oauth2/internal/transport.go | 4 +- tools/vendor/golang.org/x/oauth2/oauth2.go | 55 +- tools/vendor/golang.org/x/oauth2/pkce.go | 15 +- tools/vendor/golang.org/x/oauth2/token.go | 15 +- tools/vendor/golang.org/x/oauth2/transport.go | 24 +- .../golang.org/x/sync/errgroup/errgroup.go | 20 +- .../golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + .../golang.org/x/sys/cpu/asm_darwin_x86_gc.s | 17 + .../vendor/golang.org/x/sys/cpu/byteorder.go | 66 + tools/vendor/golang.org/x/sys/cpu/cpu.go | 338 + tools/vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + tools/vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 194 + tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s | 39 + .../golang.org/x/sys/cpu/cpu_darwin_x86.go | 61 + .../golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.s | 26 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.go | 25 + .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 120 + .../golang.org/x/sys/cpu/cpu_linux_loong64.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 160 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + .../golang.org/x/sys/cpu/cpu_loong64.go | 50 + .../vendor/golang.org/x/sys/cpu/cpu_loong64.s | 13 + .../golang.org/x/sys/cpu/cpu_mips64x.go | 15 + .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + .../golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + .../golang.org/x/sys/cpu/cpu_other_x86.go | 11 + .../vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + .../golang.org/x/sys/cpu/cpu_riscv64.go | 32 + .../vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + tools/vendor/golang.org/x/sys/cpu/cpu_x86.go | 162 + tools/vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + .../golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + .../vendor/golang.org/x/sys/cpu/endian_big.go | 10 + .../golang.org/x/sys/cpu/endian_little.go | 10 + .../golang.org/x/sys/cpu/hwcap_linux.go | 71 + tools/vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + .../golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + .../x/sys/cpu/syscall_darwin_x86_gc.go | 98 + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - .../golang.org/x/sys/plan9/pwd_plan9.go | 14 +- .../golang.org/x/sys/unix/affinity_linux.go | 9 +- tools/vendor/golang.org/x/sys/unix/fdset.go | 4 +- .../golang.org/x/sys/unix/ifreq_linux.go | 4 +- tools/vendor/golang.org/x/sys/unix/mkall.sh | 1 + .../vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- .../golang.org/x/sys/unix/syscall_linux.go | 4 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 17 + .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- .../golang.org/x/sys/unix/zerrors_linux.go | 63 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 8 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../golang.org/x/sys/unix/ztypes_linux.go | 180 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 20 +- .../x/sys/unix/ztypes_linux_arm64.go | 16 + .../x/sys/unix/ztypes_linux_loong64.go | 16 + .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 16 + .../x/sys/unix/ztypes_linux_mips64le.go | 16 + .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 20 +- .../x/sys/unix/ztypes_linux_ppc64.go | 16 + .../x/sys/unix/ztypes_linux_ppc64le.go | 16 + .../x/sys/unix/ztypes_linux_riscv64.go | 16 + .../x/sys/unix/ztypes_linux_s390x.go | 16 + .../x/sys/unix/ztypes_linux_sparc64.go | 16 + .../sys/windows/registry/zsyscall_windows.go | 16 +- .../x/sys/windows/security_windows.go | 49 +- .../x/sys/windows/syscall_windows.go | 8 +- .../golang.org/x/sys/windows/types_windows.go | 234 + .../x/sys/windows/zsyscall_windows.go | 991 +- .../vendor/golang.org/x/term/term_windows.go | 4 +- tools/vendor/golang.org/x/term/terminal.go | 77 +- .../golang.org/x/text/unicode/bidi/core.go | 11 +- tools/vendor/golang.org/x/time/rate/rate.go | 28 +- .../golang.org/x/time/rate/sometimes.go | 4 +- .../x/tools/go/ast/astutil/enclosing.go | 23 +- .../x/tools/go/ast/astutil/imports.go | 3 +- .../x/tools/go/ast/astutil/rewrite.go | 4 + .../x/tools/go/gcexportdata/gcexportdata.go | 5 +- .../golang.org/x/tools/go/packages/doc.go | 2 + .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 20 +- .../x/tools/go/packages/golist_overlay.go | 2 +- .../x/tools/go/packages/packages.go | 14 - .../golang.org/x/tools/go/packages/visit.go | 85 +- .../x/tools/go/types/objectpath/objectpath.go | 7 +- .../x/tools/go/types/typeutil/callee.go | 83 +- .../x/tools/go/types/typeutil/map.go | 19 +- .../x/tools/internal/event/label/label.go | 7 +- .../x/tools/internal/gcimporter/iexport.go | 34 +- .../x/tools/internal/gcimporter/iimport.go | 3 +- .../internal/gcimporter/iimport_go122.go | 53 - .../x/tools/internal/gocommand/invoke.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 7 +- .../x/tools/internal/imports/fix.go | 18 +- .../x/tools/internal/imports/imports.go | 2 +- .../x/tools/internal/imports/mod.go | 5 +- .../x/tools/internal/imports/mod_cache.go | 4 +- .../x/tools/internal/imports/sortimports.go | 5 +- .../tools/internal/imports/source_modindex.go | 47 +- .../x/tools/internal/modindex/directories.go | 148 +- .../x/tools/internal/modindex/index.go | 233 +- .../x/tools/internal/modindex/lookup.go | 22 +- .../x/tools/internal/modindex/modindex.go | 205 +- .../x/tools/internal/modindex/symbols.go | 73 +- .../x/tools/internal/modindex/types.go | 25 - .../internal/packagesinternal/packages.go | 9 +- .../x/tools/internal/pkgbits/decoder.go | 2 +- .../x/tools/internal/stdlib/deps.go | 602 +- .../x/tools/internal/stdlib/manifest.go | 34676 ++++++++-------- .../x/tools/internal/stdlib/stdlib.go | 8 + .../x/tools/internal/typeparams/free.go | 2 +- .../x/tools/internal/typeparams/termlist.go | 12 +- .../x/tools/internal/typeparams/typeterm.go | 3 + .../internal/typesinternal/classify_call.go | 137 + .../x/tools/internal/typesinternal/types.go | 76 +- .../api/annotations/annotations.pb.go | 119 + .../googleapis/api/annotations/client.pb.go | 2103 + .../api/annotations/field_behavior.pb.go | 266 + .../api/annotations/field_info.pb.go | 392 + .../googleapis/api/annotations/http.pb.go | 774 + .../googleapis/api/annotations/resource.pb.go | 659 + .../googleapis/api/annotations/routing.pb.go | 693 + .../api/expr/v1alpha1/checked.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/eval.pb.go | 2 +- .../api/expr/v1alpha1/explain.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/syntax.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/value.pb.go | 2 +- .../googleapis/api/httpbody/httpbody.pb.go | 8 +- .../googleapis/api/launch_stage.pb.go | 203 + .../rpc/errdetails/error_details.pb.go | 339 +- .../googleapis/rpc/status/status.pb.go | 2 +- .../google.golang.org/grpc/CONTRIBUTING.md | 129 +- .../google.golang.org/grpc/MAINTAINERS.md | 8 +- tools/vendor/google.golang.org/grpc/README.md | 1 + .../grpc/balancer/balancer.go | 123 +- .../grpc/balancer/base/balancer.go | 12 +- .../endpointsharding/endpointsharding.go | 389 + .../balancer/pickfirst/internal/internal.go | 17 +- .../grpc/balancer/pickfirst/pickfirst.go | 2 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 497 +- .../grpc/balancer/roundrobin/roundrobin.go | 65 +- .../grpc/balancer/subconn.go | 134 + .../grpc/balancer_wrapper.go | 151 +- .../grpc_binarylog_v1/binarylog.pb.go | 481 +- .../google.golang.org/grpc/clientconn.go | 97 +- tools/vendor/google.golang.org/grpc/codec.go | 2 +- .../grpc/credentials/credentials.go | 59 +- .../grpc/credentials/insecure/insecure.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 52 +- .../google.golang.org/grpc/dialoptions.go | 80 +- .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 78 +- .../grpc/grpclog/internal/loggerv2.go | 107 +- .../grpc/health/grpc_health_v1/health.pb.go | 240 +- .../health/grpc_health_v1/health_grpc.pb.go | 68 +- .../grpc/internal/backoff/backoff.go | 2 +- .../balancer/gracefulswitch/gracefulswitch.go | 20 +- .../grpc/internal/credentials/credentials.go | 14 - .../grpc/internal/envconfig/envconfig.go | 44 +- .../grpc/internal/envconfig/xds.go | 15 + .../grpc/internal/grpcsync/event.go | 19 +- .../grpc/internal/internal.go | 86 +- .../grpc/internal/metadata/metadata.go | 26 +- .../proxyattributes/proxyattributes.go | 54 + .../delegatingresolver/delegatingresolver.go | 427 + .../internal/resolver/dns/dns_resolver.go | 61 +- .../grpc/internal/status/status.go | 8 + .../grpc/internal/transport/client_stream.go | 144 + .../grpc/internal/transport/controlbuf.go | 68 +- .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 40 +- .../grpc/internal/transport/http2_client.go | 152 +- .../grpc/internal/transport/http2_server.go | 143 +- .../grpc/internal/transport/http_util.go | 8 +- .../grpc/internal/transport/proxy.go | 62 +- .../grpc/internal/transport/server_stream.go | 180 + .../grpc/internal/transport/transport.go | 331 +- .../grpc/mem/buffer_slice.go | 70 +- .../google.golang.org/grpc/picker_wrapper.go | 38 +- .../google.golang.org/grpc/preloader.go | 4 +- .../google.golang.org/grpc/resolver/map.go | 174 +- .../grpc/resolver/resolver.go | 30 +- .../grpc/resolver_wrapper.go | 36 +- .../vendor/google.golang.org/grpc/rpc_util.go | 150 +- tools/vendor/google.golang.org/grpc/server.go | 145 +- .../google.golang.org/grpc/service_config.go | 22 +- .../google.golang.org/grpc/stats/handlers.go | 9 + .../google.golang.org/grpc/stats/metrics.go | 81 + .../google.golang.org/grpc/stats/stats.go | 129 +- tools/vendor/google.golang.org/grpc/stream.go | 429 +- .../vendor/google.golang.org/grpc/version.go | 2 +- .../helm/v3/internal/version/version.go | 2 +- .../helm.sh/helm/v3/pkg/action/action.go | 12 +- .../helm.sh/helm/v3/pkg/action/dependency.go | 15 +- .../helm/v3/pkg/action/get_metadata.go | 55 +- .../helm.sh/helm/v3/pkg/action/hooks.go | 80 +- .../helm.sh/helm/v3/pkg/action/install.go | 7 +- .../helm.sh/helm/v3/pkg/action/package.go | 11 +- .../vendor/helm.sh/helm/v3/pkg/action/push.go | 2 +- .../helm/v3/pkg/action/registry_login.go | 25 +- .../helm.sh/helm/v3/pkg/chart/dependency.go | 16 +- .../helm/v3/pkg/chart/loader/archive.go | 32 +- .../helm/v3/pkg/chart/loader/directory.go | 4 + .../helm.sh/helm/v3/pkg/chart/loader/load.go | 4 +- .../helm.sh/helm/v3/pkg/chartutil/coalesce.go | 12 + .../helm.sh/helm/v3/pkg/chartutil/create.go | 32 +- .../helm/v3/pkg/chartutil/dependencies.go | 5 +- .../helm.sh/helm/v3/pkg/chartutil/expand.go | 3 + .../helm/v3/pkg/chartutil/jsonschema.go | 93 +- .../helm.sh/helm/v3/pkg/cli/environment.go | 3 +- .../v3/pkg/downloader/chart_downloader.go | 38 +- .../helm.sh/helm/v3/pkg/downloader/manager.go | 14 + .../helm.sh/helm/v3/pkg/engine/engine.go | 4 +- .../helm.sh/helm/v3/pkg/engine/funcs.go | 31 + .../helm.sh/helm/v3/pkg/getter/getter.go | 8 + .../helm.sh/helm/v3/pkg/getter/httpgetter.go | 4 + .../helm.sh/helm/v3/pkg/getter/ocigetter.go | 4 + .../vendor/helm.sh/helm/v3/pkg/kube/client.go | 155 +- .../helm.sh/helm/v3/pkg/kube/fake/fake.go | 12 + .../helm.sh/helm/v3/pkg/kube/fake/printer.go | 15 +- .../helm.sh/helm/v3/pkg/kube/interface.go | 24 +- .../helm.sh/helm/v3/pkg/kube/resource.go | 2 +- .../helm/v3/pkg/{cli => kube}/roundtripper.go | 12 +- tools/vendor/helm.sh/helm/v3/pkg/kube/wait.go | 14 +- .../helm/v3/pkg/lint/rules/chartfile.go | 3 + .../helm.sh/helm/v3/pkg/plugin/hooks.go | 3 + .../helm.sh/helm/v3/pkg/plugin/plugin.go | 181 +- .../helm.sh/helm/v3/pkg/pusher/ocipusher.go | 5 +- .../helm.sh/helm/v3/pkg/registry/client.go | 654 +- .../helm.sh/helm/v3/pkg/registry/fallback.go | 60 + .../helm.sh/helm/v3/pkg/registry/reference.go | 78 + .../helm.sh/helm/v3/pkg/registry/transport.go | 187 + .../helm.sh/helm/v3/pkg/registry/util.go | 51 +- .../helm.sh/helm/v3/pkg/release/hook.go | 16 + .../helm.sh/helm/v3/pkg/release/mock.go | 18 + .../v3/pkg/releaseutil/manifest_sorter.go | 27 +- .../vendor/helm.sh/helm/v3/pkg/repo/index.go | 1 + .../helm/v3/pkg/storage/driver/cfgmaps.go | 5 +- .../helm/v3/pkg/storage/driver/secrets.go | 5 +- .../helm.sh/helm/v3/pkg/time/ctime/ctime.go | 6 +- .../helm/v3/pkg/time/ctime/ctime_linux.go | 4 +- .../helm/v3/pkg/time/ctime/ctime_other.go | 2 +- tools/vendor/k8s.io/api/admission/v1/doc.go | 2 +- .../k8s.io/api/admission/v1beta1/doc.go | 2 +- .../api/admissionregistration/v1/doc.go | 2 +- .../api/admissionregistration/v1alpha1/doc.go | 2 +- .../v1alpha1/generated.proto | 13 +- .../admissionregistration/v1alpha1/types.go | 23 +- .../v1alpha1/types_swagger_doc_generated.go | 8 +- .../api/admissionregistration/v1beta1/doc.go | 2 +- .../vendor/k8s.io/api/apidiscovery/v2/doc.go | 2 +- .../k8s.io/api/apidiscovery/v2beta1/doc.go | 2 +- .../api/apiserverinternal/v1alpha1/doc.go | 2 +- tools/vendor/k8s.io/api/apps/v1/doc.go | 2 +- .../vendor/k8s.io/api/apps/v1/generated.pb.go | 336 +- .../vendor/k8s.io/api/apps/v1/generated.proto | 41 +- tools/vendor/k8s.io/api/apps/v1/types.go | 41 +- .../apps/v1/types_swagger_doc_generated.go | 24 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 + tools/vendor/k8s.io/api/apps/v1beta1/doc.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 286 +- .../k8s.io/api/apps/v1beta1/generated.proto | 22 +- tools/vendor/k8s.io/api/apps/v1beta1/types.go | 22 +- .../v1beta1/types_swagger_doc_generated.go | 15 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 5 + tools/vendor/k8s.io/api/apps/v1beta2/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 352 +- .../k8s.io/api/apps/v1beta2/generated.proto | 41 +- tools/vendor/k8s.io/api/apps/v1beta2/types.go | 41 +- .../v1beta2/types_swagger_doc_generated.go | 24 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 + .../k8s.io/api/authentication/v1/doc.go | 2 +- .../k8s.io/api/authentication/v1alpha1/doc.go | 2 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 +- .../vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 +- tools/vendor/k8s.io/api/autoscaling/v1/doc.go | 2 +- tools/vendor/k8s.io/api/autoscaling/v2/doc.go | 2 +- .../k8s.io/api/autoscaling/v2/generated.pb.go | 272 +- .../k8s.io/api/autoscaling/v2/generated.proto | 30 +- .../vendor/k8s.io/api/autoscaling/v2/types.go | 30 +- .../v2/types_swagger_doc_generated.go | 5 +- .../autoscaling/v2/zz_generated.deepcopy.go | 5 + .../k8s.io/api/autoscaling/v2beta1/doc.go | 2 +- .../k8s.io/api/autoscaling/v2beta2/doc.go | 2 +- tools/vendor/k8s.io/api/batch/v1/doc.go | 2 +- .../k8s.io/api/batch/v1/generated.proto | 10 - tools/vendor/k8s.io/api/batch/v1/types.go | 15 - .../batch/v1/types_swagger_doc_generated.go | 10 +- tools/vendor/k8s.io/api/batch/v1beta1/doc.go | 2 +- .../vendor/k8s.io/api/certificates/v1/doc.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 2 +- .../k8s.io/api/certificates/v1beta1/doc.go | 2 +- .../api/certificates/v1beta1/generated.pb.go | 761 +- .../api/certificates/v1beta1/generated.proto | 73 + .../api/certificates/v1beta1/register.go | 2 + .../k8s.io/api/certificates/v1beta1/types.go | 85 + .../v1beta1/types_swagger_doc_generated.go | 30 + .../v1beta1/zz_generated.deepcopy.go | 76 + .../zz_generated.prerelease-lifecycle.go | 36 + .../vendor/k8s.io/api/coordination/v1/doc.go | 2 +- .../k8s.io/api/coordination/v1alpha2/doc.go | 2 +- .../api/coordination/v1alpha2/generated.proto | 2 - .../k8s.io/api/coordination/v1alpha2/types.go | 2 - .../v1alpha2/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/coordination/v1beta1/doc.go | 2 +- .../api/coordination/v1beta1/generated.pb.go | 915 +- .../api/coordination/v1beta1/generated.proto | 69 + .../api/coordination/v1beta1/register.go | 2 + .../k8s.io/api/coordination/v1beta1/types.go | 73 + .../v1beta1/types_swagger_doc_generated.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 84 + .../zz_generated.prerelease-lifecycle.go | 36 + tools/vendor/k8s.io/api/core/v1/doc.go | 2 +- .../vendor/k8s.io/api/core/v1/generated.pb.go | 2767 +- .../vendor/k8s.io/api/core/v1/generated.proto | 77 +- tools/vendor/k8s.io/api/core/v1/lifecycle.go | 24 + tools/vendor/k8s.io/api/core/v1/types.go | 201 +- .../core/v1/types_swagger_doc_generated.go | 50 +- .../api/core/v1/zz_generated.deepcopy.go | 38 +- tools/vendor/k8s.io/api/discovery/v1/doc.go | 2 +- .../k8s.io/api/discovery/v1/generated.pb.go | 336 +- .../k8s.io/api/discovery/v1/generated.proto | 78 +- tools/vendor/k8s.io/api/discovery/v1/types.go | 78 +- .../v1/types_swagger_doc_generated.go | 28 +- .../api/discovery/v1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/discovery/v1beta1/doc.go | 2 +- .../api/discovery/v1beta1/generated.pb.go | 329 +- .../api/discovery/v1beta1/generated.proto | 13 + .../k8s.io/api/discovery/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 10 + .../v1beta1/zz_generated.deepcopy.go | 21 + tools/vendor/k8s.io/api/events/v1/doc.go | 2 +- tools/vendor/k8s.io/api/events/v1beta1/doc.go | 2 +- .../k8s.io/api/extensions/v1beta1/doc.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 418 +- .../api/extensions/v1beta1/generated.proto | 40 +- .../k8s.io/api/extensions/v1beta1/types.go | 40 +- .../v1beta1/types_swagger_doc_generated.go | 24 +- .../v1beta1/zz_generated.deepcopy.go | 10 + tools/vendor/k8s.io/api/flowcontrol/v1/doc.go | 2 +- .../k8s.io/api/flowcontrol/v1beta1/doc.go | 2 +- .../k8s.io/api/flowcontrol/v1beta2/doc.go | 2 +- .../k8s.io/api/flowcontrol/v1beta3/doc.go | 2 +- .../k8s.io/api/imagepolicy/v1alpha1/doc.go | 2 +- tools/vendor/k8s.io/api/networking/v1/doc.go | 2 +- .../k8s.io/api/networking/v1/generated.pb.go | 3729 +- .../k8s.io/api/networking/v1/generated.proto | 109 + .../k8s.io/api/networking/v1/register.go | 4 + .../vendor/k8s.io/api/networking/v1/types.go | 130 + .../v1/types_swagger_doc_generated.go | 80 + .../api/networking/v1/well_known_labels.go | 33 + .../networking/v1/zz_generated.deepcopy.go | 202 + .../v1/zz_generated.prerelease-lifecycle.go | 24 + .../k8s.io/api/networking/v1alpha1/doc.go | 2 +- .../k8s.io/api/networking/v1beta1/doc.go | 2 +- tools/vendor/k8s.io/api/node/v1/doc.go | 2 +- tools/vendor/k8s.io/api/node/v1alpha1/doc.go | 2 +- tools/vendor/k8s.io/api/node/v1beta1/doc.go | 2 +- tools/vendor/k8s.io/api/policy/v1/doc.go | 2 +- .../k8s.io/api/policy/v1/generated.proto | 3 - tools/vendor/k8s.io/api/policy/v1/types.go | 3 - .../policy/v1/types_swagger_doc_generated.go | 2 +- tools/vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 3 - .../vendor/k8s.io/api/policy/v1beta1/types.go | 3 - .../v1beta1/types_swagger_doc_generated.go | 2 +- tools/vendor/k8s.io/api/rbac/v1/doc.go | 2 +- tools/vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 +- tools/vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 +- .../k8s.io/api/resource/v1alpha3/doc.go | 2 +- .../api/resource/v1alpha3/generated.pb.go | 6403 ++- .../api/resource/v1alpha3/generated.proto | 514 +- .../k8s.io/api/resource/v1alpha3/register.go | 2 + .../k8s.io/api/resource/v1alpha3/types.go | 613 +- .../v1alpha3/types_swagger_doc_generated.go | 165 +- .../v1alpha3/zz_generated.deepcopy.go | 327 +- .../zz_generated.prerelease-lifecycle.go | 36 + .../api/resource/v1beta1/devicetaint.go | 35 + .../vendor/k8s.io/api/resource/v1beta1/doc.go | 2 +- .../api/resource/v1beta1/generated.pb.go | 3890 +- .../api/resource/v1beta1/generated.proto | 428 +- .../k8s.io/api/resource/v1beta1/types.go | 510 +- .../v1beta1/types_swagger_doc_generated.go | 124 +- .../resource/v1beta1/zz_generated.deepcopy.go | 202 +- .../api/resource/v1beta2/devicetaint.go | 35 + .../vendor/k8s.io/api/resource/v1beta2/doc.go | 24 + .../api/resource/v1beta2/generated.pb.go | 11047 +++++ .../api/resource/v1beta2/generated.proto | 1278 + .../k8s.io/api/resource/v1beta2/register.go | 60 + .../k8s.io/api/resource/v1beta2/types.go | 1552 + .../v1beta2/types_swagger_doc_generated.go | 464 + .../resource/v1beta2/zz_generated.deepcopy.go | 1092 + .../zz_generated.prerelease-lifecycle.go | 166 + tools/vendor/k8s.io/api/scheduling/v1/doc.go | 2 +- .../k8s.io/api/scheduling/v1alpha1/doc.go | 2 +- .../k8s.io/api/scheduling/v1beta1/doc.go | 2 +- tools/vendor/k8s.io/api/storage/v1/doc.go | 2 +- .../k8s.io/api/storage/v1/generated.pb.go | 271 +- .../k8s.io/api/storage/v1/generated.proto | 22 + tools/vendor/k8s.io/api/storage/v1/types.go | 22 + .../storage/v1/types_swagger_doc_generated.go | 26 +- .../api/storage/v1/zz_generated.deepcopy.go | 10 + .../vendor/k8s.io/api/storage/v1alpha1/doc.go | 2 +- .../api/storage/v1alpha1/generated.pb.go | 160 +- .../api/storage/v1alpha1/generated.proto | 8 + .../k8s.io/api/storage/v1alpha1/types.go | 8 + .../v1alpha1/types_swagger_doc_generated.go | 7 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 5 + .../vendor/k8s.io/api/storage/v1beta1/doc.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 280 +- .../api/storage/v1beta1/generated.proto | 22 + .../k8s.io/api/storage/v1beta1/types.go | 22 + .../v1beta1/types_swagger_doc_generated.go | 26 +- .../storage/v1beta1/zz_generated.deepcopy.go | 10 + .../api/storagemigration/v1alpha1/doc.go | 2 +- .../pkg/apis/apiextensions/doc.go | 2 +- .../pkg/apis/apiextensions/v1/doc.go | 2 +- .../pkg/apis/apiextensions/v1beta1/doc.go | 2 +- .../apiextensions/validation/validation.go | 3 +- .../pkg/apiserver/validation/formats.go | 133 +- .../pkg/apiserver/validation/validation.go | 4 +- .../pkg/features/kube_features.go | 1 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../pkg/api/operation/operation.go | 56 + .../apimachinery/pkg/api/validation/doc.go | 2 +- .../pkg/api/validation/generic.go | 2 +- .../pkg/apis/meta/internalversion/doc.go | 2 +- .../apis/meta/internalversion/scheme/doc.go | 2 +- .../pkg/apis/meta/internalversion/types.go | 2 - .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 13 +- .../pkg/apis/meta/v1/time_fuzz.go | 13 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 31 +- .../apis/meta/v1/unstructured/unstructured.go | 4 +- .../pkg/apis/meta/v1/validation/validation.go | 2 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 2 +- .../k8s.io/apimachinery/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- .../k8s.io/apimachinery/pkg/fields/doc.go | 2 +- .../k8s.io/apimachinery/pkg/labels/doc.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/doc.go | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 1 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 39 + .../serializer/cbor/internal/modes/custom.go | 4 +- .../pkg/runtime/serializer/codec_factory.go | 23 +- .../runtime/serializer/json/collections.go | 230 + .../pkg/runtime/serializer/json/json.go | 16 +- .../serializer/protobuf/collections.go | 174 + .../pkg/runtime/serializer/protobuf/doc.go | 2 +- .../runtime/serializer/protobuf/protobuf.go | 87 +- .../apimachinery/pkg/runtime/types_proto.go | 127 +- .../k8s.io/apimachinery/pkg/types/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 2 +- .../apimachinery/pkg/util/errors/doc.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 6 +- .../apimachinery/pkg/util/httpstream/doc.go | 2 +- .../pkg/util/httpstream/wsstream/doc.go | 2 +- .../pkg/util/intstr/instr_fuzz.go | 14 +- .../pkg/util/jsonmergepatch/patch.go | 160 + .../k8s.io/apimachinery/pkg/util/proxy/doc.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 46 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../util/validation/field/error_matcher.go | 212 + .../pkg/util/validation/field/errors.go | 132 +- .../apimachinery/pkg/util/validation/ip.go | 278 + .../pkg/util/validation/validation.go | 40 - .../apimachinery/pkg/util/version/doc.go | 2 +- .../apimachinery/pkg/util/version/version.go | 18 +- .../apimachinery/pkg/util/wait/backoff.go | 50 +- .../k8s.io/apimachinery/pkg/util/wait/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/loop.go | 4 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 9 +- .../apimachinery/pkg/util/yaml/decoder.go | 170 +- .../pkg/util/yaml/stream_reader.go | 130 + .../k8s.io/apimachinery/pkg/version/doc.go | 4 +- .../k8s.io/apimachinery/pkg/version/types.go | 28 +- .../k8s.io/apimachinery/pkg/watch/doc.go | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 15 +- .../k8s.io/apimachinery/pkg/watch/watch.go | 35 +- .../apiserver/pkg/apis/apiserver/doc.go | 2 +- .../apiserver/pkg/apis/apiserver/v1/doc.go | 2 +- .../pkg/apis/apiserver/v1alpha1/doc.go | 2 +- .../pkg/apis/apiserver/v1alpha1/types.go | 4 +- .../pkg/apis/apiserver/v1beta1/doc.go | 2 +- .../pkg/apis/apiserver/v1beta1/types.go | 4 +- .../k8s.io/apiserver/pkg/apis/audit/doc.go | 2 +- .../k8s.io/apiserver/pkg/apis/audit/v1/doc.go | 2 +- .../apiserver/pkg/authentication/user/doc.go | 2 +- .../apiserver/pkg/cel/environment/base.go | 17 +- .../k8s.io/apiserver/pkg/cel/library/cidr.go | 7 +- .../k8s.io/apiserver/pkg/cel/library/cost.go | 11 +- .../apiserver/pkg/cel/library/format.go | 12 +- .../k8s.io/apiserver/pkg/cel/library/ip.go | 4 +- .../apiserver/pkg/cel/library/semverlib.go | 115 +- .../apiserver/pkg/endpoints/request/doc.go | 2 +- .../pkg/endpoints/request/webhook_duration.go | 32 + .../apiserver/pkg/features/kube_features.go | 117 +- .../pkg/util/compatibility/registry.go | 53 + .../pkg/util/compatibility/version.go | 65 + .../cli-runtime/pkg/genericclioptions/doc.go | 2 +- .../k8s.io/cli-runtime/pkg/printers/doc.go | 2 +- .../cli-runtime/pkg/resource/builder.go | 2 +- .../k8s.io/cli-runtime/pkg/resource/doc.go | 2 +- .../pkg/resource/query_param_verifier.go | 2 +- .../k8s.io/cli-runtime/pkg/resource/result.go | 6 +- .../apps/v1/deploymentstatus.go | 9 + .../apps/v1/replicasetstatus.go | 9 + .../apps/v1beta1/deploymentstatus.go | 9 + .../apps/v1beta2/deploymentstatus.go | 9 + .../apps/v1beta2/replicasetstatus.go | 9 + .../autoscaling/v2/hpascalingrules.go | 10 + .../v1beta1/clustertrustbundle.go | 253 + .../v1beta1/clustertrustbundlespec.go | 48 + .../coordination/v1beta1/leasecandidate.go | 255 + .../v1beta1/leasecandidatespec.go | 89 + .../core/v1/containerstatus.go | 9 + .../applyconfigurations/core/v1/lifecycle.go | 17 +- .../core/v1/nodeswapstatus.go | 39 + .../core/v1/nodesysteminfo.go | 29 +- .../core/v1/podcondition.go | 9 + .../applyconfigurations/core/v1/podstatus.go | 9 + .../discovery/v1/endpointhints.go | 14 + .../discovery/v1/fornode.go | 39 + .../discovery/v1beta1/endpointhints.go | 14 + .../discovery/v1beta1/fornode.go | 39 + .../extensions/v1beta1/deploymentstatus.go | 9 + .../extensions/v1beta1/replicasetstatus.go | 9 + .../applyconfigurations/internal/internal.go | 1344 +- .../networking/v1/ipaddress.go | 253 + .../networking/v1/ipaddressspec.go | 39 + .../networking/v1/parentreference.go | 66 + .../networking/v1/servicecidr.go | 262 + .../networking/v1/servicecidrspec.go | 41 + .../networking/v1/servicecidrstatus.go | 48 + .../resource/v1alpha3/basicdevice.go | 60 +- .../resource/v1alpha3/counter.go | 43 + .../resource/v1alpha3/counterset.go | 54 + .../v1alpha3/devicecounterconsumption.go | 54 + .../resource/v1alpha3/devicerequest.go | 28 + .../v1alpha3/devicerequestallocationresult.go | 24 +- .../resource/v1alpha3/devicesubrequest.go | 98 + .../resource/v1alpha3/devicetaint.go | 71 + .../resource/v1alpha3/devicetaintrule.go | 253 + .../resource/v1alpha3/devicetaintrulespec.go | 48 + .../resource/v1alpha3/devicetaintselector.go | 80 + .../resource/v1alpha3/devicetoleration.go | 79 + .../resource/v1alpha3/resourceslicespec.go | 35 +- .../resource/v1beta1/basicdevice.go | 60 +- .../resource/v1beta1/counter.go | 43 + .../resource/v1beta1/counterset.go | 54 + .../v1beta1/devicecounterconsumption.go | 54 + .../resource/v1beta1/devicerequest.go | 28 + .../v1beta1/devicerequestallocationresult.go | 24 +- .../resource/v1beta1/devicesubrequest.go | 98 + .../resource/v1beta1/devicetaint.go | 71 + .../resource/v1beta1/devicetoleration.go | 79 + .../resource/v1beta1/resourceslicespec.go | 35 +- .../resource/v1beta2/allocateddevicestatus.go | 94 + .../resource/v1beta2/allocationresult.go | 52 + .../resource/v1beta2/celdeviceselector.go | 39 + .../resource/v1beta2/counter.go | 43 + .../resource/v1beta2/counterset.go | 54 + .../resource/v1beta2/device.go | 129 + .../v1beta2/deviceallocationconfiguration.go | 63 + .../v1beta2/deviceallocationresult.go | 58 + .../resource/v1beta2/deviceattribute.go | 66 + .../resource/v1beta2/devicecapacity.go | 43 + .../resource/v1beta2/deviceclaim.go | 72 + .../v1beta2/deviceclaimconfiguration.go | 50 + .../resource/v1beta2/deviceclass.go | 253 + .../v1beta2/deviceclassconfiguration.go | 39 + .../resource/v1beta2/deviceclassspec.go | 58 + .../resource/v1beta2/deviceconfiguration.go | 39 + .../resource/v1beta2/deviceconstraint.go | 54 + .../v1beta2/devicecounterconsumption.go | 54 + .../resource/v1beta2/devicerequest.go | 62 + .../v1beta2/devicerequestallocationresult.go | 89 + .../resource/v1beta2/deviceselector.go | 39 + .../resource/v1beta2/devicesubrequest.go | 98 + .../resource/v1beta2/devicetaint.go | 71 + .../resource/v1beta2/devicetoleration.go | 79 + .../resource/v1beta2/exactdevicerequest.go | 98 + .../resource/v1beta2/networkdevicedata.go | 59 + .../v1beta2/opaquedeviceconfiguration.go | 52 + .../resource/v1beta2/resourceclaim.go | 264 + .../v1beta2/resourceclaimconsumerreference.go | 70 + .../resource/v1beta2/resourceclaimspec.go | 39 + .../resource/v1beta2/resourceclaimstatus.go | 67 + .../resource/v1beta2/resourceclaimtemplate.go | 255 + .../v1beta2/resourceclaimtemplatespec.go | 194 + .../resource/v1beta2/resourcepool.go | 57 + .../resource/v1beta2/resourceslice.go | 253 + .../resource/v1beta2/resourceslicespec.go | 116 + .../storage/v1/csidriverspec.go | 25 +- .../storage/v1/volumeerror.go | 13 +- .../storage/v1alpha1/volumeerror.go | 13 +- .../storage/v1beta1/csidriverspec.go | 25 +- .../storage/v1beta1/volumeerror.go | 13 +- .../discovery/aggregated_discovery.go | 2 +- .../client-go/discovery/discovery_client.go | 3 +- .../vendor/k8s.io/client-go/discovery/doc.go | 2 +- .../client-go/features/known_features.go | 7 + tools/vendor/k8s.io/client-go/gentype/fake.go | 1 + .../k8s.io/client-go/kubernetes/clientset.go | 13 + .../vendor/k8s.io/client-go/kubernetes/doc.go | 2 +- .../k8s.io/client-go/kubernetes/import.go | 2 +- .../client-go/kubernetes/scheme/register.go | 2 + .../v1/admissionregistration_client.go | 12 +- .../v1alpha1/admissionregistration_client.go | 12 +- .../v1beta1/admissionregistration_client.go | 12 +- .../v1alpha1/apiserverinternal_client.go | 12 +- .../kubernetes/typed/apps/v1/apps_client.go | 12 +- .../typed/apps/v1beta1/apps_client.go | 12 +- .../typed/apps/v1beta2/apps_client.go | 12 +- .../v1/authentication_client.go | 12 +- .../v1alpha1/authentication_client.go | 12 +- .../v1beta1/authentication_client.go | 12 +- .../authorization/v1/authorization_client.go | 12 +- .../v1beta1/authorization_client.go | 12 +- .../autoscaling/v1/autoscaling_client.go | 12 +- .../autoscaling/v2/autoscaling_client.go | 12 +- .../autoscaling/v2beta1/autoscaling_client.go | 12 +- .../autoscaling/v2beta2/autoscaling_client.go | 12 +- .../kubernetes/typed/batch/v1/batch_client.go | 12 +- .../typed/batch/v1beta1/batch_client.go | 12 +- .../certificates/v1/certificates_client.go | 12 +- .../v1alpha1/certificates_client.go | 12 +- .../v1beta1/certificates_client.go | 17 +- .../v1beta1/clustertrustbundle.go | 73 + .../v1beta1/generated_expansion.go | 2 + .../coordination/v1/coordination_client.go | 12 +- .../v1alpha2/coordination_client.go | 12 +- .../v1beta1/coordination_client.go | 17 +- .../v1beta1/generated_expansion.go | 2 + .../coordination/v1beta1/leasecandidate.go | 71 + .../kubernetes/typed/core/v1/core_client.go | 12 +- .../typed/core/v1/event_expansion.go | 65 +- .../typed/discovery/v1/discovery_client.go | 12 +- .../discovery/v1beta1/discovery_client.go | 12 +- .../typed/events/v1/events_client.go | 12 +- .../typed/events/v1beta1/event_expansion.go | 6 +- .../typed/events/v1beta1/events_client.go | 12 +- .../extensions/v1beta1/extensions_client.go | 12 +- .../flowcontrol/v1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta2/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta3/flowcontrol_client.go | 12 +- .../networking/v1/generated_expansion.go | 4 + .../typed/networking/v1/ipaddress.go | 71 + .../typed/networking/v1/networking_client.go | 22 +- .../typed/networking/v1/servicecidr.go | 75 + .../networking/v1alpha1/networking_client.go | 12 +- .../networking/v1beta1/networking_client.go | 12 +- .../kubernetes/typed/node/v1/node_client.go | 12 +- .../typed/node/v1alpha1/node_client.go | 12 +- .../typed/node/v1beta1/node_client.go | 12 +- .../typed/policy/v1/policy_client.go | 12 +- .../typed/policy/v1beta1/policy_client.go | 12 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 12 +- .../typed/rbac/v1alpha1/rbac_client.go | 12 +- .../typed/rbac/v1beta1/rbac_client.go | 12 +- .../resource/v1alpha3/devicetaintrule.go | 71 + .../resource/v1alpha3/generated_expansion.go | 2 + .../resource/v1alpha3/resource_client.go | 17 +- .../typed/resource/v1beta1/resource_client.go | 12 +- .../typed/resource/v1beta2/deviceclass.go | 71 + .../kubernetes/typed/resource/v1beta2/doc.go | 20 + .../resource/v1beta2/generated_expansion.go | 27 + .../typed/resource/v1beta2/resource_client.go | 116 + .../typed/resource/v1beta2/resourceclaim.go | 75 + .../resource/v1beta2/resourceclaimtemplate.go | 71 + .../typed/resource/v1beta2/resourceslice.go | 71 + .../typed/scheduling/v1/scheduling_client.go | 12 +- .../scheduling/v1alpha1/scheduling_client.go | 12 +- .../scheduling/v1beta1/scheduling_client.go | 12 +- .../typed/storage/v1/storage_client.go | 12 +- .../typed/storage/v1alpha1/storage_client.go | 12 +- .../typed/storage/v1beta1/storage_client.go | 12 +- .../v1alpha1/storagemigration_client.go | 12 +- .../pkg/apis/clientauthentication/doc.go | 2 +- .../pkg/apis/clientauthentication/v1/doc.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 2 +- .../k8s.io/client-go/pkg/version/doc.go | 2 +- .../k8s.io/client-go/rest/.mockery.yaml | 10 + tools/vendor/k8s.io/client-go/rest/client.go | 6 +- tools/vendor/k8s.io/client-go/rest/config.go | 85 +- tools/vendor/k8s.io/client-go/rest/plugin.go | 7 +- tools/vendor/k8s.io/client-go/rest/request.go | 138 +- .../k8s.io/client-go/rest/urlbackoff.go | 101 +- .../vendor/k8s.io/client-go/rest/warnings.go | 57 +- .../k8s.io/client-go/rest/with_retry.go | 12 +- tools/vendor/k8s.io/client-go/scale/doc.go | 2 +- .../client-go/scale/scheme/appsint/doc.go | 2 +- .../client-go/scale/scheme/appsv1beta1/doc.go | 2 +- .../client-go/scale/scheme/appsv1beta2/doc.go | 2 +- .../scale/scheme/autoscalingv1/doc.go | 2 +- .../k8s.io/client-go/scale/scheme/doc.go | 2 +- .../scale/scheme/extensionsint/doc.go | 2 +- .../scale/scheme/extensionsv1beta1/doc.go | 2 +- .../client-go/tools/cache/controller.go | 84 +- .../client-go/tools/cache/delta_fifo.go | 125 +- .../k8s.io/client-go/tools/cache/doc.go | 2 +- .../k8s.io/client-go/tools/cache/fifo.go | 97 +- .../k8s.io/client-go/tools/cache/listers.go | 7 +- .../k8s.io/client-go/tools/cache/listwatch.go | 174 +- .../client-go/tools/cache/mutation_cache.go | 8 +- .../tools/cache/mutation_detector.go | 1 + .../k8s.io/client-go/tools/cache/reflector.go | 238 +- .../client-go/tools/cache/shared_informer.go | 194 +- .../client-go/tools/cache/the_real_fifo.go | 407 + .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../k8s.io/client-go/tools/clientcmd/doc.go | 2 +- .../k8s.io/client-go/tools/record/doc.go | 2 +- .../k8s.io/client-go/tools/record/event.go | 2 +- .../client-go/tools/record/util/util.go | 17 + .../client-go/tools/remotecommand/doc.go | 2 +- .../tools/remotecommand/errorstream.go | 2 +- .../tools/remotecommand/websocket.go | 19 +- .../client-go/tools/watch/informerwatcher.go | 18 +- .../client-go/tools/watch/retrywatcher.go | 103 +- .../k8s.io/client-go/tools/watch/until.go | 6 +- .../k8s.io/client-go/transport/cache.go | 8 +- .../client-go/transport/cert_rotation.go | 17 +- .../client-go/transport/round_trippers.go | 192 +- .../client-go/transport/token_source.go | 5 +- .../k8s.io/client-go/transport/transport.go | 2 +- .../vendor/k8s.io/client-go/util/cert/cert.go | 48 +- .../data_consistency_detector.go | 2 +- .../client-go/util/flowcontrol/backoff.go | 5 +- .../k8s.io/client-go/util/jsonpath/doc.go | 2 +- .../util/workqueue/delaying_queue.go | 19 +- .../k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/parallelizer.go | 2 +- tools/vendor/k8s.io/code-generator/LICENSE | 202 + .../cmd/applyconfiguration-gen/args/args.go | 88 + .../args/externaltypes.go | 122 + .../generators/applyconfiguration.go | 479 + .../generators/internal.go | 98 + .../generators/jsontagutil.go | 99 + .../generators/openapi.go | 192 + .../generators/refgraph.go | 175 + .../generators/targets.go | 314 + .../generators/types.go | 36 + .../applyconfiguration-gen/generators/util.go | 176 + .../client-gen/generators/util/gvpackages.go} | 23 +- .../cmd/client-gen/generators/util/tags.go | 344 + .../cmd/client-gen/types/helpers.go | 121 + .../cmd/client-gen/types/types.go | 109 + .../component-base/cli/flag/tracker_flag.go | 82 + .../component-base/compatibility/OWNERS | 13 + .../registry.go | 96 +- .../component-base/compatibility/version.go | 239 + .../featuregate/feature_gate.go | 41 +- .../component-base/tracing/api/v1/doc.go | 2 +- .../k8s.io/component-base/version/base.go | 2 +- .../k8s.io/component-base/version/version.go | 159 +- .../gengo/v2/LICENSE} | 2 +- tools/vendor/k8s.io/gengo/v2/Makefile | 14 + tools/vendor/k8s.io/gengo/v2/README.md | 53 + tools/vendor/k8s.io/gengo/v2/comments.go | 290 + tools/vendor/k8s.io/gengo/v2/execute.go | 98 + tools/vendor/k8s.io/gengo/v2/generator/doc.go | 31 + .../gengo/v2/generator/error_tracker.go | 50 + .../k8s.io/gengo/v2/generator/execute.go | 273 + .../k8s.io/gengo/v2/generator/generator.go | 214 + .../k8s.io/gengo/v2/generator/go_generator.go | 61 + .../gengo/v2/generator/import_tracker.go | 96 + .../gengo/v2/generator/simple_target.go | 77 + .../gengo/v2/generator/snippet_writer.go | 188 + tools/vendor/k8s.io/gengo/v2/namer/doc.go | 31 + .../k8s.io/gengo/v2/namer/import_tracker.go | 121 + tools/vendor/k8s.io/gengo/v2/namer/namer.go | 399 + tools/vendor/k8s.io/gengo/v2/namer/order.go | 72 + .../k8s.io/gengo/v2/namer/plural_namer.go | 120 + tools/vendor/k8s.io/gengo/v2/parser/doc.go | 19 + tools/vendor/k8s.io/gengo/v2/parser/parse.go | 888 + .../k8s.io/gengo/v2/parser/parse_122.go | 33 + .../k8s.io/gengo/v2/parser/parse_pre_122.go | 30 + tools/vendor/k8s.io/gengo/v2/types/doc.go | 19 + tools/vendor/k8s.io/gengo/v2/types/types.go | 575 + .../k8s.io/kube-openapi/pkg/spec3/fuzz.go | 146 +- .../pkg/validation/strfmt/format.go | 24 + .../strfmt/kubernetes-extensions.go | 143 + .../k8s.io/kubectl/pkg/cmd/util/helpers.go | 20 +- .../pkg/util/i18n/translations/README.md | 2 +- .../k8s.io/kubectl/pkg/util/openapi/doc.go | 2 +- .../k8s.io/kubectl/pkg/util/slice/slice.go | 15 + .../k8s.io/utils/buffer/ring_growing.go | 122 +- .../k8s.io/utils/clock/testing/fake_clock.go | 361 - .../clock/testing/simple_interval_clock.go | 44 - tools/vendor/k8s.io/utils/lru/lru.go | 2 + tools/vendor/k8s.io/utils/net/multi_listen.go | 6 +- tools/vendor/modules.txt | 769 +- .../oras.land/oras-go/pkg/auth/client.go | 45 - .../oras.land/oras-go/pkg/auth/client_opts.go | 123 - .../oras-go/pkg/auth/docker/client.go | 123 - .../oras-go/pkg/auth/docker/login.go | 103 - .../oras-go/pkg/auth/docker/login_tls.go | 220 - .../oras-go/pkg/auth/docker/logout.go | 42 - .../oras-go/pkg/auth/docker/resolver.go | 86 - .../oras.land/oras-go/pkg/content/consts.go | 57 - .../oras-go/pkg/content/decompress.go | 151 - .../oras.land/oras-go/pkg/content/file.go | 534 - .../oras.land/oras-go/pkg/content/gunzip.go | 72 - .../oras.land/oras-go/pkg/content/iowriter.go | 112 - .../oras.land/oras-go/pkg/content/manifest.go | 95 - .../oras.land/oras-go/pkg/content/memory.go | 284 - .../oras-go/pkg/content/multireader.go | 56 - .../oras-go/pkg/content/multiwriter.go | 42 - .../oras.land/oras-go/pkg/content/oci.go | 335 - .../oras.land/oras-go/pkg/content/opts.go | 112 - .../oras-go/pkg/content/passthrough.go | 286 - .../oras.land/oras-go/pkg/content/readerat.go | 68 - .../oras.land/oras-go/pkg/content/registry.go | 84 - .../oras.land/oras-go/pkg/content/untar.go | 157 - .../oras.land/oras-go/pkg/content/utils.go | 223 - .../oras.land/oras-go/pkg/context/logger.go | 50 - .../vendor/oras.land/oras-go/pkg/oras/copy.go | 213 - .../oras.land/oras-go/pkg/oras/errors.go | 42 - .../vendor/oras.land/oras-go/pkg/oras/opts.go | 254 - .../oras.land/oras-go/pkg/oras/provider.go | 79 - .../oras.land/oras-go/pkg/oras/store.go | 213 - .../oras-go/pkg/registry/reference.go | 177 - .../remote/internal/errutil/errors.go | 83 - .../oras-go/pkg/registry/remote/repository.go | 171 - .../oras-go/pkg/registry/remote/url.go | 42 - .../oras-go/pkg/registry/repository.go | 57 - tools/vendor/oras.land/oras-go/v2/.gitignore | 41 + tools/vendor/oras.land/oras-go/v2/CODEOWNERS | 2 + .../oras.land/oras-go/v2/CODE_OF_CONDUCT.md | 3 + .../vendor/oras.land/oras-go/{ => v2}/LICENSE | 0 .../oras.land/oras-go/v2/MIGRATION_GUIDE.md | 61 + tools/vendor/oras.land/oras-go/v2/Makefile | 38 + tools/vendor/oras.land/oras-go/v2/OWNERS.md | 11 + tools/vendor/oras.land/oras-go/v2/README.md | 66 + tools/vendor/oras.land/oras-go/v2/SECURITY.md | 3 + tools/vendor/oras.land/oras-go/v2/content.go | 411 + .../oras-go/v2/content/descriptor.go | 40 + .../oras.land/oras-go/v2/content/graph.go | 122 + .../oras-go/v2/content/limitedstorage.go | 50 + .../oras-go/v2/content/memory/memory.go | 96 + .../oras.land/oras-go/v2/content/oci/oci.go | 636 + .../oras-go/v2/content/oci/readonlyoci.go | 259 + .../oras-go/v2/content/oci/readonlystorage.go | 99 + .../oras-go/v2/content/oci/storage.go | 172 + .../oras.land/oras-go/v2/content/reader.go | 149 + .../oras.land/oras-go/v2/content/resolver.go | 47 + .../oras.land/oras-go/v2/content/storage.go | 80 + tools/vendor/oras.land/oras-go/v2/copy.go | 533 + .../vendor/oras.land/oras-go/v2/copyerror.go | 78 + .../oras.land/oras-go/v2/errdef/errors.go | 31 + .../oras.land/oras-go/v2/extendedcopy.go | 404 + .../oras-go/v2/internal/cas/memory.go | 88 + .../oras-go/v2/internal/cas/proxy.go | 125 + .../internal/container/set/set.go} | 43 +- .../oras-go/v2/internal/copyutil/stack.go | 55 + .../v2/internal/descriptor/descriptor.go | 89 + .../oras-go/v2/internal/docker/mediatype.go | 24 + .../oras-go/v2/internal/fs/tarfs/tarfs.go | 167 + .../oras-go/v2/internal/graph/memory.go | 201 + .../oras-go/v2/internal/httputil/seek.go | 116 + .../internal/interfaces/registry.go} | 14 +- .../oras-go/v2/internal/ioutil/io.go | 66 + .../v2/internal/manifestutil/parser.go | 84 + .../oras-go/v2/internal/platform/platform.go | 145 + .../oras-go/v2/internal/registryutil/proxy.go | 102 + .../oras-go/v2/internal/resolver/memory.go | 105 + .../oras-go/v2/internal/spec/artifact.go | 57 + .../oras-go/v2/internal/status/tracker.go | 43 + .../oras-go/v2/internal/syncutil/limit.go | 107 + .../v2/internal/syncutil/limitgroup.go | 67 + .../oras-go/v2/internal/syncutil/merge.go | 140 + .../remote => v2}/internal/syncutil/once.go | 37 +- .../oras-go/v2/internal/syncutil/pool.go | 64 + tools/vendor/oras.land/oras-go/v2/pack.go | 448 + .../oras-go/v2/registry/reference.go | 276 + .../oras.land/oras-go/v2/registry/registry.go | 52 + .../{pkg => v2}/registry/remote/auth/cache.go | 78 +- .../registry/remote/auth/challenge.go | 3 +- .../registry/remote/auth/client.go | 125 +- .../registry/remote/auth/credential.go | 5 +- .../{pkg => v2}/registry/remote/auth/scope.go | 114 +- .../registry/remote/credentials/file_store.go | 97 + .../credentials/internal/config/config.go | 332 + .../credentials/internal/executer/executer.go | 80 + .../credentials/internal/ioutil/ioutil.go | 49 + .../remote/credentials/memory_store.go | 81 + .../remote/credentials/native_store.go | 139 + .../credentials/native_store_darwin.go} | 13 +- .../credentials/native_store_generic.go} | 17 +- .../remote/credentials/native_store_linux.go | 29 + .../credentials/native_store_windows.go} | 13 +- .../registry/remote/credentials/registry.go | 102 + .../v2/registry/remote/credentials/store.go | 262 + .../remote/credentials/trace/trace.go | 94 + .../v2/registry/remote/errcode/errors.go | 128 + .../remote/internal/errutil/errutil.go | 54 + .../oras-go/v2/registry/remote/manifest.go | 59 + .../oras-go/v2/registry/remote/referrers.go | 225 + .../oras-go/v2/registry/remote/registry.go | 190 + .../oras-go/v2/registry/remote/repository.go | 1681 + .../v2/registry/remote/retry/client.go | 114 + .../v2/registry/remote/retry/policy.go | 154 + .../oras-go/v2/registry/remote/url.go | 119 + .../{pkg => v2}/registry/remote/utils.go | 40 +- .../oras-go/v2/registry/remote/warning.go | 100 + .../oras-go/v2/registry/repository.go | 226 + tools/vendor/oras.land/oras-go/v2/target.go | 43 + .../proto/client/client.pb.go | 1 - .../pkg/client/apiutil/restmapper.go | 137 +- .../controller-runtime/pkg/client/client.go | 13 +- .../pkg/client/config/config.go | 16 +- .../pkg/client/interfaces.go | 14 +- .../controllerutil/controllerutil.go | 76 +- .../pkg/log/warning_handler.go | 27 +- .../cmd/controller-gen/main.go | 21 +- .../pkg/applyconfiguration/doc.go | 18 + .../pkg/applyconfiguration/gen.go | 236 + .../zz_generated.markerhelp.go | 41 + .../pkg/crd/markers/validation.go | 27 + .../crd/markers/zz_generated.markerhelp.go | 16 + .../controller-tools/pkg/genall/output.go | 6 + .../controller-tools/pkg/rbac/parser.go | 10 +- .../pkg/rbac/zz_generated.markerhelp.go | 4 + .../kubebuilder/v4/pkg/cli/alpha.go | 16 +- .../kubebuilder/v4/pkg/cli/alpha/command.go | 65 + .../kubebuilder/v4/pkg/cli/alpha/generate.go | 50 - .../v4/pkg/cli/alpha/internal/generate.go | 490 + .../sigs.k8s.io/kubebuilder/v4/pkg/cli/api.go | 2 +- .../sigs.k8s.io/kubebuilder/v4/pkg/cli/cli.go | 28 +- .../kubebuilder/v4/pkg/cli/cmd_helpers.go | 53 +- .../kubebuilder/v4/pkg/cli/edit.go | 2 +- .../kubebuilder/v4/pkg/cli/options.go | 66 +- .../kubebuilder/v4/pkg/cli/resource.go | 8 +- .../kubebuilder/v4/pkg/cli/root.go | 4 +- .../kubebuilder/v4/pkg/cli/webhook.go | 2 +- .../kubebuilder/v4/pkg/config/interface.go | 10 +- .../kubebuilder/v4/pkg/config/registry.go | 4 +- .../v4/pkg/config/store/yaml/store.go | 16 +- .../kubebuilder/v4/pkg/config/v3/config.go | 43 +- .../kubebuilder/v4/pkg/config/version.go | 25 +- .../v4/pkg/internal/validation/dns.go | 6 +- .../kubebuilder/v4/pkg/machinery/scaffold.go | 67 +- .../kubebuilder/v4/pkg/model/resource/gvk.go | 16 +- .../v4/pkg/model/resource/resource.go | 13 +- .../v4/pkg/model/resource/utils.go | 20 +- .../v4/pkg/model/resource/webhooks.go | 58 +- .../kubebuilder/v4/pkg/plugin/bundle.go | 6 +- .../kubebuilder/v4/pkg/plugin/helpers.go | 10 +- .../kubebuilder/v4/pkg/plugin/util/exec.go | 8 +- .../kubebuilder/v4/pkg/plugin/util/util.go | 159 +- .../kubebuilder/v4/pkg/plugin/version.go | 14 +- .../v4/pkg/plugins/common/kustomize/v2/api.go | 8 +- .../pkg/plugins/common/kustomize/v2/create.go | 4 +- .../pkg/plugins/common/kustomize/v2/init.go | 21 +- .../pkg/plugins/common/kustomize/v2/plugin.go | 3 +- .../common/kustomize/v2/scaffolds/api.go | 32 +- .../common/kustomize/v2/scaffolds/init.go | 20 +- .../config/certmanager/certificate_metrics.go | 68 + ...{certificate.go => certificate_webhook.go} | 25 +- .../templates/config/certmanager/issuer.go | 60 + .../config/certmanager/kustomization.go | 6 +- .../config/certmanager/kustomizeconfig.go | 3 +- .../templates/config/crd/kustomization.go | 38 +- .../templates/config/crd/kustomizeconfig.go | 2 +- .../crd/patches/enablecainjection_patch.go | 3 +- .../config/crd/patches/enablewebhook_patch.go | 3 +- .../kdefault/cert_metrics_manager_patch.go | 84 + .../config/kdefault/enablecainection_patch.go | 73 - .../config/kdefault/kustomization.go | 289 +- .../kustomization_conversion_updater.go | 100 + .../config/kdefault/manager_metrics_patch.go | 2 +- .../config/kdefault/metrics_service.go | 3 +- .../config/kdefault/webhook_manager_patch.go | 60 +- .../templates/config/manager/config.go | 19 +- .../templates/config/manager/kustomization.go | 2 +- .../network-policy/allow-metrics-traffic.go | 15 +- .../network-policy/allow-webhook-traffic.go | 13 +- .../config/network-policy/kustomization.go | 4 +- .../config/prometheus/kustomization.go | 11 +- .../templates/config/prometheus/monitor.go | 16 +- .../config/prometheus/monitor_tls_patch.go | 64 + .../templates/config/rbac/crd_admin_role.go | 94 + .../templates/config/rbac/crd_editor_role.go | 11 +- .../templates/config/rbac/crd_viewer_role.go | 11 +- .../templates/config/rbac/kustomization.go | 2 +- .../config/rbac/leader_election_role.go | 2 +- .../rbac/leader_election_role_binding.go | 2 +- .../config/rbac/metrics_auth_role.go | 2 +- .../config/rbac/metrics_auth_role_binding.go | 2 +- .../config/rbac/metrics_reader_role.go | 2 +- .../internal/templates/config/rbac/role.go | 2 +- .../templates/config/rbac/role_binding.go | 2 +- .../templates/config/rbac/service_account.go | 2 +- .../templates/config/samples/crd_sample.go | 2 +- .../templates/config/samples/kustomization.go | 1 - .../templates/config/webhook/kustomization.go | 2 +- .../config/webhook/kustomizeconfig.go | 2 +- .../templates/config/webhook/service.go | 3 +- .../common/kustomize/v2/scaffolds/webhook.go | 155 +- .../plugins/common/kustomize/v2/webhook.go | 8 +- .../v4/pkg/plugins/external/helpers.go | 55 +- .../v4/pkg/plugins/external/plugin.go | 1 + .../golang/deploy-image/v1alpha1/api.go | 83 +- .../golang/deploy-image/v1alpha1/plugin.go | 3 + .../deploy-image/v1alpha1/scaffolds/api.go | 81 +- .../scaffolds/internal/templates/api/types.go | 5 +- .../templates/config/samples/crd_sample.go | 9 +- .../templates/controllers/controller-test.go | 92 +- .../templates/controllers/controller.go | 107 +- .../v4/pkg/plugins/golang/go_version.go | 28 +- .../v4/pkg/plugins/golang/options.go | 84 +- .../v4/pkg/plugins/golang/repository.go | 13 +- .../v4/pkg/plugins/golang/v4/api.go | 32 +- .../v4/pkg/plugins/golang/v4/edit.go | 6 +- .../v4/pkg/plugins/golang/v4/init.go | 52 +- .../v4/pkg/plugins/golang/v4/plugin.go | 1 + .../v4/pkg/plugins/golang/v4/scaffolds/api.go | 21 +- .../pkg/plugins/golang/v4/scaffolds/edit.go | 17 +- .../pkg/plugins/golang/v4/scaffolds/init.go | 49 +- .../scaffolds/internal/templates/api/group.go | 8 +- .../scaffolds/internal/templates/api/hub.go | 73 + .../scaffolds/internal/templates/api/spoke.go | 96 + .../scaffolds/internal/templates/api/types.go | 13 +- .../internal/templates/api/webhook.go | 158 - .../templates/api/webhook_suitetest.go | 263 - .../templates/api/webhook_test_template.go | 120 - .../internal/templates/{ => cmd}/main.go | 155 +- .../templates/controllers/controller.go | 14 +- .../controllers/controller_suitetest.go | 76 +- .../controllers/controller_test_template.go | 5 +- .../internal/templates/devcontainer.go | 110 + .../internal/templates/dockerfile.go | 6 +- .../internal/templates/dockerignore.go | 2 +- .../internal/templates/github/lint.go | 72 + .../internal/templates/github/test-e2e.go | 79 + .../internal/templates/github/test.go | 69 + .../scaffolds/internal/templates/gitignore.go | 2 +- .../scaffolds/internal/templates/golangci.go | 62 +- .../v4/scaffolds/internal/templates/gomod.go | 4 +- .../internal/templates/hack/boilerplate.go | 19 +- .../scaffolds/internal/templates/makefile.go | 56 +- .../v4/scaffolds/internal/templates/readme.go | 38 +- .../internal/templates/test/e2e/suite.go | 67 +- .../internal/templates/test/e2e/test.go | 465 +- .../internal/templates/test/utils/utils.go | 155 +- .../internal/templates/webhooks/webhook.go | 267 + .../templates/webhooks/webhook_suitetest.go | 457 + .../webhooks/webhook_test_template.go | 220 + .../plugins/golang/v4/scaffolds/webhook.go | 92 +- .../v4/pkg/plugins/golang/v4/webhook.go | 95 +- .../optional/grafana/v1alpha/commons.go | 8 +- .../optional/grafana/v1alpha/constants.go | 4 +- .../plugins/optional/grafana/v1alpha/edit.go | 15 +- .../plugins/optional/grafana/v1alpha/init.go | 15 +- .../optional/grafana/v1alpha/plugin.go | 5 +- .../grafana/v1alpha/scaffolds/edit.go | 27 +- .../grafana/v1alpha/scaffolds/init.go | 10 +- .../scaffolds/internal/templates/custom.go | 5 +- .../internal/templates/custom_metrics.go | 7 +- .../scaffolds/internal/templates/resources.go | 5 +- .../scaffolds/internal/templates/runtime.go | 6 +- .../plugins/optional/helm/v1alpha/commons.go | 38 + .../pkg/plugins/optional/helm/v1alpha/edit.go | 86 + .../pkg/plugins/optional/helm/v1alpha/init.go | 59 + .../plugins/optional/helm/v1alpha/plugin.go | 66 + .../optional/helm/v1alpha/scaffolds/init.go | 565 + .../cert-manager/certificate.go | 110 + .../templates/chart-templates/helpers_tpl.go | 104 + .../chart-templates/manager/manager.go | 159 + .../metrics/metrics_service.go | 63 + .../chart-templates/prometheus/monitor.go | 85 + .../chart-templates/webhook/service.go | 64 + .../chart-templates/webhook/webhook.go | 166 + .../scaffolds/internal/templates/chart.go | 53 + .../internal/templates/github/test_chart.go | 137 + .../internal/templates/helmignore.go | 70 + .../scaffolds/internal/templates/values.go | 144 + .../kubebuilder/v4/pkg/rescaffold/migrate.go | 355 - .../sigs.k8s.io/randfill/CONTRIBUTING.md | 43 + tools/vendor/sigs.k8s.io/randfill/LICENSE | 202 + tools/vendor/sigs.k8s.io/randfill/NOTICE | 24 + tools/vendor/sigs.k8s.io/randfill/OWNERS | 8 + .../sigs.k8s.io/randfill/OWNERS_ALIASES | 14 + .../gofuzz => sigs.k8s.io/randfill}/README.md | 45 +- .../sigs.k8s.io/randfill/SECURITY_CONTACTS | 16 + .../randfill}/bytesource/bytesource.go | 0 .../sigs.k8s.io/randfill/code-of-conduct.md | 3 + tools/vendor/sigs.k8s.io/randfill/randfill.go | 682 + .../structured-merge-diff/v4/merge/update.go | 50 +- .../structured-merge-diff/v4/typed/typed.go | 47 +- .../v4/typed/validate.go | 4 +- .../v4/value/jsontagutil.go | 63 +- .../v4/value/reflectcache.go | 14 +- .../structured-merge-diff/v4/value/scalar.go | 2 +- tools/vendor/sigs.k8s.io/yaml/.travis.yml | 12 - .../vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 - .../sigs.k8s.io/yaml/goyaml.v2/README.md | 200 +- .../yaml/goyaml.v2/yaml_aliases.go | 85 + .../vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS | 24 - .../sigs.k8s.io/yaml/goyaml.v3/README.md | 210 +- .../sigs.k8s.io/yaml/goyaml.v3/patch.go | 39 - .../yaml/goyaml.v3/yaml_aliases.go | 130 + tools/vendor/sigs.k8s.io/yaml/yaml.go | 11 +- 2839 files changed, 268342 insertions(+), 78875 deletions(-) create mode 100644 tools/vendor/dario.cat/mergo/FUNDING.json create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/.clang-format create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles rename tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/{cim_mount.go => cimfs.go} (70%) create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go delete mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go delete mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go create mode 100644 tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go create mode 100644 tools/vendor/github.com/VividCortex/ewma/.gitignore create mode 100644 tools/vendor/github.com/VividCortex/ewma/.whitesource create mode 100644 tools/vendor/github.com/VividCortex/ewma/LICENSE create mode 100644 tools/vendor/github.com/VividCortex/ewma/README.md create mode 100644 tools/vendor/github.com/VividCortex/ewma/codecov.yml create mode 100644 tools/vendor/github.com/VividCortex/ewma/ewma.go create mode 100644 tools/vendor/github.com/acarl005/stripansi/LICENSE create mode 100644 tools/vendor/github.com/acarl005/stripansi/README.md create mode 100644 tools/vendor/github.com/acarl005/stripansi/stripansi.go create mode 100644 tools/vendor/github.com/antlr4-go/antlr/v4/mutex.go create mode 100644 tools/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go delete mode 100644 tools/vendor/github.com/cenkalti/backoff/v4/context.go delete mode 100644 tools/vendor/github.com/cenkalti/backoff/v4/exponential.go delete mode 100644 tools/vendor/github.com/cenkalti/backoff/v4/retry.go delete mode 100644 tools/vendor/github.com/cenkalti/backoff/v4/tries.go rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/.gitignore (100%) create mode 100644 tools/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/LICENSE (100%) rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/README.md (64%) rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/backoff.go (87%) create mode 100644 tools/vendor/github.com/cenkalti/backoff/v5/error.go create mode 100644 tools/vendor/github.com/cenkalti/backoff/v5/exponential.go create mode 100644 tools/vendor/github.com/cenkalti/backoff/v5/retry.go rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/ticker.go (80%) rename tools/vendor/github.com/cenkalti/backoff/{v4 => v5}/timer.go (96%) delete mode 100644 tools/vendor/github.com/containerd/containerd/log/context_deprecated.go create mode 100644 tools/vendor/github.com/containerd/typeurl/v2/types_gogo.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/.gitignore create mode 100644 tools/vendor/github.com/containers/ocicrypt/.golangci.yml create mode 100644 tools/vendor/github.com/containers/ocicrypt/ADOPTERS.md create mode 100644 tools/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md create mode 100644 tools/vendor/github.com/containers/ocicrypt/MAINTAINERS create mode 100644 tools/vendor/github.com/containers/ocicrypt/Makefile create mode 100644 tools/vendor/github.com/containers/ocicrypt/README.md create mode 100644 tools/vendor/github.com/containers/ocicrypt/SECURITY.md create mode 100644 tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/config/config.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/config/constructors.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/encryption.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/gpg.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/gpgvault.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/reader.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/delayedreader.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/ioutils.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/testing.go create mode 100644 tools/vendor/github.com/containers/ocicrypt/utils/utils.go create mode 100644 tools/vendor/github.com/cyberphone/json-canonicalization/LICENSE create mode 100644 tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go create mode 100644 tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go create mode 100644 tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go delete mode 100644 tools/vendor/github.com/docker/distribution/.dockerignore delete mode 100644 tools/vendor/github.com/docker/distribution/.golangci.yml delete mode 100644 tools/vendor/github.com/docker/distribution/.mailmap delete mode 100644 tools/vendor/github.com/docker/distribution/BUILDING.md delete mode 100644 tools/vendor/github.com/docker/distribution/CONTRIBUTING.md delete mode 100644 tools/vendor/github.com/docker/distribution/Dockerfile delete mode 100644 tools/vendor/github.com/docker/distribution/MAINTAINERS delete mode 100644 tools/vendor/github.com/docker/distribution/Makefile delete mode 100644 tools/vendor/github.com/docker/distribution/README.md delete mode 100644 tools/vendor/github.com/docker/distribution/ROADMAP.md delete mode 100644 tools/vendor/github.com/docker/distribution/blobs.go delete mode 100644 tools/vendor/github.com/docker/distribution/doc.go delete mode 100644 tools/vendor/github.com/docker/distribution/docker-bake.hcl delete mode 100644 tools/vendor/github.com/docker/distribution/errors.go delete mode 100644 tools/vendor/github.com/docker/distribution/manifests.go delete mode 100644 tools/vendor/github.com/docker/distribution/metrics/prometheus.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/auth/api_version.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/auth/session.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/errors.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/repository.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/client/transport/transport.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/storage/cache/cache.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go delete mode 100644 tools/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 tools/vendor/github.com/docker/distribution/tags.go delete mode 100644 tools/vendor/github.com/docker/distribution/vendor.conf delete mode 100644 tools/vendor/github.com/docker/docker/api/types/filters/errors.go delete mode 100644 tools/vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 tools/vendor/github.com/docker/docker/api/types/registry/authconfig.go delete mode 100644 tools/vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 tools/vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 tools/vendor/github.com/docker/docker/api/types/registry/search.go delete mode 100644 tools/vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 tools/vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 tools/vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 tools/vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 tools/vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/buffer.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/readers.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go delete mode 100644 tools/vendor/github.com/docker/docker/pkg/ioutils/writers.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/auth.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/config.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/config_unix.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/config_windows.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/errors.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/registry.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/search.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/search_endpoint_v1.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/search_session.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/service.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/service_v2.go delete mode 100644 tools/vendor/github.com/docker/docker/registry/types.go delete mode 100644 tools/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/CONTRIBUTING.md delete mode 100644 tools/vendor/github.com/docker/go-metrics/LICENSE.docs delete mode 100644 tools/vendor/github.com/docker/go-metrics/NOTICE delete mode 100644 tools/vendor/github.com/docker/go-metrics/README.md delete mode 100644 tools/vendor/github.com/docker/go-metrics/counter.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/docs.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/gauge.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/handler.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/helpers.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/namespace.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/register.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/timer.go delete mode 100644 tools/vendor/github.com/docker/go-metrics/unit.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/shared.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/staticcheck.conf delete mode 100644 tools/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 tools/vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go create mode 100644 tools/vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/.gitignore create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/.golangci.yml create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/.travis.yml create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md rename tools/vendor/github.com/{google/gofuzz => go-jose/go-jose/v4}/LICENSE (100%) create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/README.md create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/SECURITY.md create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/asymmetric.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/crypter.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/doc.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/encoding.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/LICENSE create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/README.md create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/decode.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/encode.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/indent.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/scanner.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/stream.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/json/tags.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/jwe.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/jwk.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/jws.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/opaque.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/shared.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/signing.go create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/symmetric.go rename tools/vendor/{google.golang.org/grpc/internal/grpcsync/oncefunc.go => github.com/go-jose/go-jose/v4/symmetric_go124.go} (66%) create mode 100644 tools/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go create mode 100644 tools/vendor/github.com/go-openapi/jsonpointer/errors.go create mode 100644 tools/vendor/github.com/go-openapi/swag/errors.go create mode 100644 tools/vendor/github.com/go-viper/mapstructure/v2/errors.go create mode 100644 tools/vendor/github.com/google/cel-go/cel/prompt.go create mode 100644 tools/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl create mode 100644 tools/vendor/github.com/google/cel-go/common/env/BUILD.bazel create mode 100644 tools/vendor/github.com/google/cel-go/common/env/env.go create mode 100644 tools/vendor/github.com/google/cel-go/common/types/format.go create mode 100644 tools/vendor/github.com/google/cel-go/ext/extension_option_factory.go create mode 100644 tools/vendor/github.com/google/cel-go/ext/formatting_v2.go create mode 100644 tools/vendor/github.com/google/cel-go/ext/regex.go delete mode 100644 tools/vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 tools/vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 tools/vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 tools/vendor/github.com/gorilla/websocket/tls_handshake.go delete mode 100644 tools/vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 tools/vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/matchlen_generic.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 tools/vendor/github.com/klauspost/compress/flate/token.go create mode 100644 tools/vendor/github.com/klauspost/compress/internal/le/le.go create mode 100644 tools/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go create mode 100644 tools/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go create mode 100644 tools/vendor/github.com/klauspost/pgzip/.gitignore create mode 100644 tools/vendor/github.com/klauspost/pgzip/.travis.yml create mode 100644 tools/vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 tools/vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 tools/vendor/github.com/klauspost/pgzip/README.md create mode 100644 tools/vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 tools/vendor/github.com/klauspost/pgzip/gzip.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/LICENSE.txt create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/challenges.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/interfaces.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/objects.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/proto/core.proto create mode 100644 tools/vendor/github.com/letsencrypt/boulder/core/util.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/identifier/identifier.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/probs/probs.go create mode 100644 tools/vendor/github.com/letsencrypt/boulder/revocation/reasons.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/.gitignore rename tools/vendor/github.com/{syndtr/gocapability => miekg/pkcs11}/LICENSE (60%) create mode 100644 tools/vendor/github.com/miekg/pkcs11/Makefile.release create mode 100644 tools/vendor/github.com/miekg/pkcs11/README.md create mode 100644 tools/vendor/github.com/miekg/pkcs11/error.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/hsm.db create mode 100644 tools/vendor/github.com/miekg/pkcs11/params.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/pkcs11.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/pkcs11.h create mode 100644 tools/vendor/github.com/miekg/pkcs11/pkcs11f.h create mode 100644 tools/vendor/github.com/miekg/pkcs11/pkcs11go.h create mode 100644 tools/vendor/github.com/miekg/pkcs11/pkcs11t.h create mode 100644 tools/vendor/github.com/miekg/pkcs11/release.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/softhsm.conf create mode 100644 tools/vendor/github.com/miekg/pkcs11/softhsm2.conf create mode 100644 tools/vendor/github.com/miekg/pkcs11/types.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/vendor.go create mode 100644 tools/vendor/github.com/miekg/pkcs11/zconst.go create mode 100644 tools/vendor/github.com/moby/sys/user/idtools.go create mode 100644 tools/vendor/github.com/moby/sys/user/idtools_unix.go create mode 100644 tools/vendor/github.com/moby/sys/user/idtools_windows.go create mode 100644 tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go delete mode 100644 tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/null.go delete mode 100644 tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/writerhook.go delete mode 100644 tools/vendor/github.com/otiai10/copy/fileinfo_go1.15.go delete mode 100644 tools/vendor/github.com/otiai10/copy/fileinfo_go1.16.go create mode 100644 tools/vendor/github.com/otiai10/copy/symlink_test_x.go delete mode 100644 tools/vendor/github.com/otiai10/copy/test_setup.go delete mode 100644 tools/vendor/github.com/otiai10/copy/test_setup_x.go create mode 100644 tools/vendor/github.com/otiai10/mint/.gitignore create mode 100644 tools/vendor/github.com/otiai10/mint/LICENSE create mode 100644 tools/vendor/github.com/otiai10/mint/README.md create mode 100644 tools/vendor/github.com/otiai10/mint/because.go create mode 100644 tools/vendor/github.com/otiai10/mint/comparer.go create mode 100644 tools/vendor/github.com/otiai10/mint/exit.go create mode 100644 tools/vendor/github.com/otiai10/mint/exit_freebsd.go create mode 100644 tools/vendor/github.com/otiai10/mint/log.go create mode 100644 tools/vendor/github.com/otiai10/mint/mint.go create mode 100644 tools/vendor/github.com/otiai10/mint/mocks.go create mode 100644 tools/vendor/github.com/otiai10/mint/mquery/README.md create mode 100644 tools/vendor/github.com/otiai10/mint/mquery/mquery.go create mode 100644 tools/vendor/github.com/otiai10/mint/result.go create mode 100644 tools/vendor/github.com/otiai10/mint/testee.go create mode 100644 tools/vendor/github.com/proglottis/gpgme/.gitignore create mode 100644 tools/vendor/github.com/proglottis/gpgme/LICENSE create mode 100644 tools/vendor/github.com/proglottis/gpgme/README.md create mode 100644 tools/vendor/github.com/proglottis/gpgme/data.go create mode 100644 tools/vendor/github.com/proglottis/gpgme/go_gpgme.c create mode 100644 tools/vendor/github.com/proglottis/gpgme/go_gpgme.h create mode 100644 tools/vendor/github.com/proglottis/gpgme/gpgme.go create mode 100644 tools/vendor/github.com/proglottis/gpgme/unset_agent_info.go create mode 100644 tools/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go create mode 100644 tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go create mode 100644 tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go create mode 100644 tools/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE create mode 100644 tools/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go create mode 100644 tools/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go rename tools/vendor/github.com/{google/gofuzz/doc.go => sigstore/fulcio/COPYRIGHT.txt} (78%) rename tools/vendor/github.com/{xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt => sigstore/fulcio/LICENSE} (99%) create mode 100644 tools/vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go create mode 100644 tools/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go create mode 100644 tools/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt rename tools/vendor/github.com/{xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt => sigstore/protobuf-specs/LICENSE} (99%) create mode 100644 tools/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt create mode 100644 tools/vendor/github.com/sigstore/sigstore/LICENSE create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/message.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/util.go create mode 100644 tools/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go rename tools/vendor/github.com/{docker/distribution => smallstep/pkcs7}/.gitignore (60%) create mode 100644 tools/vendor/github.com/smallstep/pkcs7/LICENSE create mode 100644 tools/vendor/github.com/smallstep/pkcs7/Makefile create mode 100644 tools/vendor/github.com/smallstep/pkcs7/README.md create mode 100644 tools/vendor/github.com/smallstep/pkcs7/ber.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/decrypt.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/encrypt.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/pkcs7.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/sign.go create mode 100644 tools/vendor/github.com/smallstep/pkcs7/verify.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/Makefile delete mode 100644 tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go delete mode 100644 tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go delete mode 100644 tools/vendor/github.com/sourcegraph/conc/iter/iter.go delete mode 100644 tools/vendor/github.com/sourcegraph/conc/iter/map.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/context_pool.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/error_pool.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/pool.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/pool/result_pool.go create mode 100644 tools/vendor/github.com/spf13/cast/.editorconfig create mode 100644 tools/vendor/github.com/spf13/cast/.golangci.yaml create mode 100644 tools/vendor/github.com/spf13/cast/alias.go create mode 100644 tools/vendor/github.com/spf13/cast/basic.go delete mode 100644 tools/vendor/github.com/spf13/cast/caste.go create mode 100644 tools/vendor/github.com/spf13/cast/indirect.go create mode 100644 tools/vendor/github.com/spf13/cast/internal/time.go create mode 100644 tools/vendor/github.com/spf13/cast/internal/timeformattype_string.go create mode 100644 tools/vendor/github.com/spf13/cast/map.go create mode 100644 tools/vendor/github.com/spf13/cast/number.go create mode 100644 tools/vendor/github.com/spf13/cast/slice.go create mode 100644 tools/vendor/github.com/spf13/cast/time.go delete mode 100644 tools/vendor/github.com/spf13/cast/timeformattype_string.go create mode 100644 tools/vendor/github.com/spf13/cast/zz_generated.go create mode 100644 tools/vendor/github.com/spf13/cobra/SECURITY.md create mode 100644 tools/vendor/github.com/spf13/pflag/bool_func.go create mode 100644 tools/vendor/github.com/spf13/pflag/errors.go create mode 100644 tools/vendor/github.com/spf13/pflag/func.go create mode 100644 tools/vendor/github.com/spf13/pflag/text.go create mode 100644 tools/vendor/github.com/spf13/pflag/time.go rename tools/vendor/github.com/spf13/viper/{UPDATES.md => UPGRADE.md} (79%) create mode 100644 tools/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore create mode 100644 tools/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml rename tools/vendor/github.com/{docker/go-metrics => stefanberger/go-pkcs11uri}/LICENSE (94%) create mode 100644 tools/vendor/github.com/stefanberger/go-pkcs11uri/Makefile create mode 100644 tools/vendor/github.com/stefanberger/go-pkcs11uri/README.md create mode 100644 tools/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/capability.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/capability_linux.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/capability_noop.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/enum.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/enum_gen.go delete mode 100644 tools/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 tools/vendor/github.com/titanous/rocacheck/LICENSE create mode 100644 tools/vendor/github.com/titanous/rocacheck/README.md create mode 100644 tools/vendor/github.com/titanous/rocacheck/rocacheck.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 tools/vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 tools/vendor/github.com/ulikunitz/xz/README.md create mode 100644 tools/vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 tools/vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 tools/vendor/github.com/ulikunitz/xz/bits.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/crc.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/format.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 tools/vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 tools/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/make-docs create mode 100644 tools/vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/reader.go create mode 100644 tools/vendor/github.com/ulikunitz/xz/writer.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/asm/README.md create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/asm/doc.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/storage/doc.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/storage/entry.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/storage/getter.go create mode 100644 tools/vendor/github.com/vbatts/tar-split/tar/storage/packer.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/.gitignore create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/README.md create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/UNLICENSE create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar_filler.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/bar_option.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/container_option.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/any.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/counters.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/doc.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/eta.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/meta.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/name.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/speed.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/doc.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/heap_manager.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/internal/width.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/priority_queue.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/progress.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/proxyreader.go create mode 100644 tools/vendor/github.com/vbauerster/mpb/v8/proxywriter.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonpointer/README.md delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonpointer/pointer.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonreference/README.md delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonreference/reference.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/.gitignore delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/.travis.yml delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/README.md delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/draft.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/errors.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/glide.yaml delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/internalLog.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/locales.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/result.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/schema.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/schemaType.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/subSchema.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/types.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/utils.go delete mode 100644 tools/vendor/github.com/xeipuuv/gojsonschema/validation.go create mode 100644 tools/vendor/go.etcd.io/bbolt/OWNERS delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_386.go rename tools/vendor/go.etcd.io/bbolt/{bolt_unix_solaris.go => bolt_aix.go} (94%) delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_amd64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_android.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_loong64.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/bolt_s390x.go rename tools/vendor/go.etcd.io/bbolt/{bolt_unix_aix.go => bolt_solaris.go} (95%) create mode 100644 tools/vendor/go.etcd.io/bbolt/errors/errors.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/bucket.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/inode.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/meta.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/page.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/types.go rename tools/vendor/go.etcd.io/bbolt/{ => internal/common}/unsafe.go (75%) create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/utils.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/common/verify.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/freelist/array.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go create mode 100644 tools/vendor/go.etcd.io/bbolt/internal/freelist/shared.go create mode 100644 tools/vendor/go.etcd.io/bbolt/logger.go delete mode 100644 tools/vendor/go.etcd.io/bbolt/page.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/LICENSE create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 tools/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go create mode 100644 tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/.clomonitor.yml rename tools/vendor/go.opentelemetry.io/otel/{internal/attribute => attribute/internal}/attribute.go (68%) create mode 100644 tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile delete mode 100644 tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh delete mode 100644 tools/vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/auto.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go create mode 100644 tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go delete mode 100644 tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh rename tools/vendor/{github.com/containers => go.podman.io}/common/LICENSE (100%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/auth/auth.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/auth/cli.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/capabilities/capabilities.go (70%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/completion/completion.go (94%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/password/password_supported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/common/pkg/password/password_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/LICENSE (100%) create mode 100644 tools/vendor/go.podman.io/image/v5/copy/blob.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/compression.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/copy.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/digesting_reader.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/encryption.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/manifest.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/multiple.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/progress_bars.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/progress_channel.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/sign.go create mode 100644 tools/vendor/go.podman.io/image/v5/copy/single.go create mode 100644 tools/vendor/go.podman.io/image/v5/directory/explicitfilepath/path.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/body_reader.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/cache.go (89%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/distribution_error.go (82%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_client.go (93%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image.go (92%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image_dest.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image_src.go (90%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_transport.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/errors.go (94%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/paths_common.go (75%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/paths_freebsd.go (79%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/policyconfiguration/naming.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/README.md (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/helpers.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/normalize.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/reference.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/regexp-additions.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/regexp.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/registries_d.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/docker/wwwauthenticate.go (92%) create mode 100644 tools/vendor/go.podman.io/image/v5/image/docker_schema2.go create mode 100644 tools/vendor/go.podman.io/image/v5/image/sourced.go create mode 100644 tools/vendor/go.podman.io/image/v5/image/unparsed.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/blobinfocache/blobinfocache.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/blobinfocache/types.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_list.go (91%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_schema1.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_schema2.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/manifest.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/memory.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/oci.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/oci_index.go (91%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/sourced.go (99%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/unparsed.go (87%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/compat.go (81%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/helpers.go (82%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/properties.go (98%) create mode 100644 tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/put_blob_partial.go (88%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/signatures.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/stubs.go (100%) create mode 100644 tools/vendor/go.podman.io/image/v5/internal/imagedestination/wrapper.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/compat.go (94%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/layer_infos.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/properties.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/signatures.go (93%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/stubs/get_blob_at.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/stubs/stubs.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/wrapper.go (90%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/iolimits/iolimits.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/common.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/docker_schema2.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/docker_schema2_list.go (89%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/errors.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/list.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/manifest.go (99%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/oci_index.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/multierr/multierr.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/pkg/platform/platform_matcher.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/private/private.go (75%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/putblobdigest/put_blob_digest.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/rootless/rootless.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/set/set.go (84%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/signature.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/sigstore.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/simple.go (100%) create mode 100644 tools/vendor/go.podman.io/image/v5/internal/signer/signer.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/streamdigest/stream_digest.go (89%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/tmpdir/tmpdir.go (91%) create mode 100644 tools/vendor/go.podman.io/image/v5/internal/unparsedimage/wrapper.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/uploadreader/upload_reader.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/internal/useragent/useragent.go (83%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/common.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema1.go (94%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema2.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema2_list.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/list.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/manifest.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/oci.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/manifest/oci_index.go (94%) create mode 100644 tools/vendor/go.podman.io/image/v5/oci/internal/oci_util.go create mode 100644 tools/vendor/go.podman.io/image/v5/oci/layout/oci_delete.go create mode 100644 tools/vendor/go.podman.io/image/v5/oci/layout/oci_dest.go create mode 100644 tools/vendor/go.podman.io/image/v5/oci/layout/oci_src.go create mode 100644 tools/vendor/go.podman.io/image/v5/oci/layout/oci_transport.go create mode 100644 tools/vendor/go.podman.io/image/v5/oci/layout/reader.go create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/default.go create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/memory/memory.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/none/none.go (97%) create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/sqlite/sqlite.go create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/compression/compression.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/internal/types.go (84%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/types/types.go (97%) create mode 100644 tools/vendor/go.podman.io/image/v5/pkg/compression/zstd.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/docker/config/config.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/strslice/README.md (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/strslice/strslice.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/paths_common.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/paths_freebsd.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/shortnames.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/system_registries_v2.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/pkg/tlsclientconfig/tlsclientconfig.go (91%) create mode 100644 tools/vendor/go.podman.io/image/v5/signature/docker.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/fulcio_cert.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/errors.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/json.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/rekor_api_types.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/rekor_set.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.c create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.h create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoiafuncs.h create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.h create mode 100644 tools/vendor/go.podman.io/image/v5/signature/internal/sigstore_payload.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/mechanism.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme_only.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/mechanism_openpgp.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/mechanism_sequoia.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/pki_cert.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_config.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_config_sigstore.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_eval.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_eval_baselayer.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_eval_signedby.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_eval_sigstore.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_eval_simple.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_paths_common.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_paths_freebsd.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_reference_match.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/policy_types.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/signer/signer.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/sigstore/copied.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/sigstore/generate.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/sigstore/internal/signer.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/sigstore/signer.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/simple.go create mode 100644 tools/vendor/go.podman.io/image/v5/signature/simplesigning/signer.go rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/transports/stub.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/transports/transports.go (93%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/types/types.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/image/v5/version/version.go (93%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/AUTHORS (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/LICENSE (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/NOTICE (100%) create mode 100644 tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go create mode 100644 tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go create mode 100644 tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/README.md create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_110.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_19.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_bsd.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_linux.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_other.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_unix.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_windows.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/archive_zstd.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/changes.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/changes_linux.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/changes_other.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/changes_unix.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/changes_windows.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/copy.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/copy_unix.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/copy_windows.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/diff.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/fflags_unsupported.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/filter.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/time_linux.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/time_unsupported.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/whiteouts.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/archive/wrap.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/chunked/compressor/rollsum.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_unix.go (89%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_darwin.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_solaris.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_windows.go (100%) create mode 100644 tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir_unix.go (99%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools.go (96%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_supported.go (91%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_unix.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/parser.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/usergroupadd_linux.go (99%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/usergroupadd_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/utils_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/buffer.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/bytespipe.go (95%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters_other.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/readers.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/temp_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/temp_windows.go (87%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/writeflusher.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/writers.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lastwrite.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile.go (92%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile_unix.go (70%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile_windows.go (71%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/longpath/longpath.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mount.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mountinfo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mountinfo_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/sharedsubtree_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/unmount_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/unmount_unsupported.go (100%) create mode 100644 tools/vendor/go.podman.io/storage/pkg/pools/pools.go create mode 100644 tools/vendor/go.podman.io/storage/pkg/promise/promise.go rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/README.md (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/reexec.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp_dontprecompile.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp_precompile.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chmod.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/errors.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/exitcode.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/extattr_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/extattr_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/init.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/init_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lchflags_bsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lchown.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lcow_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lcow_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lstat_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lstat_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_solaris.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/process_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm_common.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_common.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_darwin.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_linux.go (84%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_netbsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_openbsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_solaris.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/syscall_unix.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/syscall_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/umask.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/umask_windows.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_darwin.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_freebsd.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_linux.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_unsupported.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/getenv_linux_cgo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/getenv_linux_nocgo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare.c (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_cgo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_darwin.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_freebsd.c (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_freebsd.go (98%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_gccgo.go (100%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_linux.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_unsupported.go (97%) rename tools/vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_unsupported_cgo.go (100%) create mode 100644 tools/vendor/go.yaml.in/yaml/v2/.travis.yml rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE.libyaml (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE (100%) create mode 100644 tools/vendor/go.yaml.in/yaml/v2/README.md rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/apic.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/decode.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/emitterc.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/encode.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/parserc.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/readerc.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/resolve.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/scannerc.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/sorter.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/writerc.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlprivateh.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE (100%) create mode 100644 tools/vendor/go.yaml.in/yaml/v3/README.md rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/apic.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/decode.go (97%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/emitterc.go (98%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/encode.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/parserc.go (93%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/readerc.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/resolve.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/scannerc.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/sorter.go (100%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/writerc.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yaml.go (91%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yamlh.go (99%) rename tools/vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yamlprivateh.go (97%) create mode 100644 tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 tools/vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 tools/vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/alias/alias.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/alias/alias_purego.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go create mode 100644 tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s create mode 100644 tools/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 tools/vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 tools/vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 tools/vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go delete mode 100644 tools/vendor/golang.org/x/net/http2/config_go124.go create mode 100644 tools/vendor/golang.org/x/net/http2/config_go125.go create mode 100644 tools/vendor/golang.org/x/net/http2/config_go126.go delete mode 100644 tools/vendor/golang.org/x/net/http2/config_pre_go124.go delete mode 100644 tools/vendor/golang.org/x/net/http2/timer.go rename tools/vendor/golang.org/x/net/http2/{writesched_priority.go => writesched_priority_rfc7540.go} (78%) create mode 100644 tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/parse.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go delete mode 100644 tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go delete mode 100644 tools/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go delete mode 100644 tools/vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go create mode 100644 tools/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 tools/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go create mode 100644 tools/vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 tools/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go create mode 100644 tools/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go create mode 100644 tools/vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 tools/vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 tools/vendor/google.golang.org/grpc/stats/metrics.go rename tools/vendor/helm.sh/helm/v3/pkg/{cli => kube}/roundtripper.go (87%) create mode 100644 tools/vendor/helm.sh/helm/v3/pkg/registry/fallback.go create mode 100644 tools/vendor/helm.sh/helm/v3/pkg/registry/reference.go create mode 100644 tools/vendor/helm.sh/helm/v3/pkg/registry/transport.go create mode 100644 tools/vendor/k8s.io/api/networking/v1/well_known_labels.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta1/devicetaint.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/devicetaint.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/doc.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/generated.pb.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/generated.proto create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/register.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/types.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/zz_generated.deepcopy.go create mode 100644 tools/vendor/k8s.io/api/resource/v1beta2/zz_generated.prerelease-lifecycle.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go create mode 100644 tools/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go create mode 100644 tools/vendor/k8s.io/apiserver/pkg/util/compatibility/registry.go create mode 100644 tools/vendor/k8s.io/apiserver/pkg/util/compatibility/version.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counter.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counterset.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicecounterconsumption.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicesubrequest.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetoleration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go create mode 100644 tools/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/clustertrustbundle.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/leasecandidate.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ipaddress.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/servicecidr.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/deviceclass.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/doc.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/generated_expansion.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resource_client.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaim.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaimtemplate.go create mode 100644 tools/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceslice.go create mode 100644 tools/vendor/k8s.io/client-go/rest/.mockery.yaml create mode 100644 tools/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go create mode 100644 tools/vendor/k8s.io/code-generator/LICENSE create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go rename tools/vendor/{sigs.k8s.io/yaml/yaml_go110.go => k8s.io/code-generator/cmd/client-gen/generators/util/gvpackages.go} (59%) create mode 100644 tools/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go create mode 100644 tools/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go create mode 100644 tools/vendor/k8s.io/component-base/cli/flag/tracker_flag.go create mode 100644 tools/vendor/k8s.io/component-base/compatibility/OWNERS rename tools/vendor/k8s.io/component-base/{featuregate => compatibility}/registry.go (78%) create mode 100644 tools/vendor/k8s.io/component-base/compatibility/version.go rename tools/vendor/{github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt => k8s.io/gengo/v2/LICENSE} (99%) create mode 100644 tools/vendor/k8s.io/gengo/v2/Makefile create mode 100644 tools/vendor/k8s.io/gengo/v2/README.md create mode 100644 tools/vendor/k8s.io/gengo/v2/comments.go create mode 100644 tools/vendor/k8s.io/gengo/v2/execute.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/doc.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/error_tracker.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/execute.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/generator.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/go_generator.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/import_tracker.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/simple_target.go create mode 100644 tools/vendor/k8s.io/gengo/v2/generator/snippet_writer.go create mode 100644 tools/vendor/k8s.io/gengo/v2/namer/doc.go create mode 100644 tools/vendor/k8s.io/gengo/v2/namer/import_tracker.go create mode 100644 tools/vendor/k8s.io/gengo/v2/namer/namer.go create mode 100644 tools/vendor/k8s.io/gengo/v2/namer/order.go create mode 100644 tools/vendor/k8s.io/gengo/v2/namer/plural_namer.go create mode 100644 tools/vendor/k8s.io/gengo/v2/parser/doc.go create mode 100644 tools/vendor/k8s.io/gengo/v2/parser/parse.go create mode 100644 tools/vendor/k8s.io/gengo/v2/parser/parse_122.go create mode 100644 tools/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go create mode 100644 tools/vendor/k8s.io/gengo/v2/types/doc.go create mode 100644 tools/vendor/k8s.io/gengo/v2/types/types.go create mode 100644 tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go delete mode 100644 tools/vendor/k8s.io/utils/clock/testing/fake_clock.go delete mode 100644 tools/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/client.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/client_opts.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/docker/client.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/docker/login.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/docker/logout.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/consts.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/decompress.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/file.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/gunzip.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/iowriter.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/manifest.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/memory.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/multireader.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/multiwriter.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/oci.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/opts.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/passthrough.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/readerat.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/registry.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/untar.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/content/utils.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/context/logger.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/oras/copy.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/oras/errors.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/oras/opts.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/oras/provider.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/oras/store.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/registry/reference.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/registry/remote/repository.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/registry/remote/url.go delete mode 100644 tools/vendor/oras.land/oras-go/pkg/registry/repository.go create mode 100644 tools/vendor/oras.land/oras-go/v2/.gitignore create mode 100644 tools/vendor/oras.land/oras-go/v2/CODEOWNERS create mode 100644 tools/vendor/oras.land/oras-go/v2/CODE_OF_CONDUCT.md rename tools/vendor/oras.land/oras-go/{ => v2}/LICENSE (100%) create mode 100644 tools/vendor/oras.land/oras-go/v2/MIGRATION_GUIDE.md create mode 100644 tools/vendor/oras.land/oras-go/v2/Makefile create mode 100644 tools/vendor/oras.land/oras-go/v2/OWNERS.md create mode 100644 tools/vendor/oras.land/oras-go/v2/README.md create mode 100644 tools/vendor/oras.land/oras-go/v2/SECURITY.md create mode 100644 tools/vendor/oras.land/oras-go/v2/content.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/descriptor.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/graph.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/limitedstorage.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/memory/memory.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/oci/oci.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/oci/readonlyoci.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/oci/readonlystorage.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/oci/storage.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/reader.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/resolver.go create mode 100644 tools/vendor/oras.land/oras-go/v2/content/storage.go create mode 100644 tools/vendor/oras.land/oras-go/v2/copy.go create mode 100644 tools/vendor/oras.land/oras-go/v2/copyerror.go create mode 100644 tools/vendor/oras.land/oras-go/v2/errdef/errors.go create mode 100644 tools/vendor/oras.land/oras-go/v2/extendedcopy.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/cas/memory.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/cas/proxy.go rename tools/vendor/oras.land/oras-go/{pkg/content/errors.go => v2/internal/container/set/set.go} (53%) create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/copyutil/stack.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/docker/mediatype.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/fs/tarfs/tarfs.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/graph/memory.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/httputil/seek.go rename tools/vendor/oras.land/oras-go/{pkg/content/interface.go => v2/internal/interfaces/registry.go} (67%) create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/ioutil/io.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/manifestutil/parser.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/platform/platform.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/resolver/memory.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/spec/artifact.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/status/tracker.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/syncutil/limit.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/syncutil/merge.go rename tools/vendor/oras.land/oras-go/{pkg/registry/remote => v2}/internal/syncutil/once.go (74%) create mode 100644 tools/vendor/oras.land/oras-go/v2/internal/syncutil/pool.go create mode 100644 tools/vendor/oras.land/oras-go/v2/pack.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/reference.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/registry.go rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/cache.go (67%) rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/challenge.go (98%) rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/client.go (70%) rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/credential.go (89%) rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/scope.go (58%) create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go rename tools/vendor/oras.land/oras-go/{pkg/artifact/consts.go => v2/registry/remote/credentials/native_store_darwin.go} (66%) rename tools/vendor/oras.land/oras-go/{pkg/target/target.go => v2/registry/remote/credentials/native_store_generic.go} (64%) create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go rename tools/vendor/oras.land/oras-go/{pkg/context/context.go => v2/registry/remote/credentials/native_store_windows.go} (67%) create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/manifest.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/referrers.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/registry.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/repository.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/retry/client.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/url.go rename tools/vendor/oras.land/oras-go/{pkg => v2}/registry/remote/utils.go (65%) create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/remote/warning.go create mode 100644 tools/vendor/oras.land/oras-go/v2/registry/repository.go create mode 100644 tools/vendor/oras.land/oras-go/v2/target.go create mode 100644 tools/vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/doc.go create mode 100644 tools/vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/gen.go create mode 100644 tools/vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/zz_generated.markerhelp.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/cli/alpha/command.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/cli/alpha/generate.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/cli/alpha/internal/generate.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/certmanager/certificate_metrics.go rename tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/certmanager/{certificate.go => certificate_webhook.go} (70%) create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/certmanager/issuer.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/cert_metrics_manager_patch.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/enablecainection_patch.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/kdefault/kustomization_conversion_updater.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/prometheus/monitor_tls_patch.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/common/kustomize/v2/scaffolds/internal/templates/config/rbac/crd_admin_role.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/hub.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/spoke.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_suitetest.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/api/webhook_test_template.go rename tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/{ => cmd}/main.go (62%) create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/devcontainer.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/github/lint.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/github/test-e2e.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/github/test.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/webhooks/webhook.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/webhooks/webhook_suitetest.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/golang/v4/scaffolds/internal/templates/webhooks/webhook_test_template.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/commons.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/edit.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/init.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/plugin.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/init.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/cert-manager/certificate.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/helpers_tpl.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/manager/manager.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/metrics/metrics_service.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/prometheus/monitor.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/webhook/service.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart-templates/webhook/webhook.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/chart.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/github/test_chart.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/helmignore.go create mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/plugins/optional/helm/v1alpha/scaffolds/internal/templates/values.go delete mode 100644 tools/vendor/sigs.k8s.io/kubebuilder/v4/pkg/rescaffold/migrate.go create mode 100644 tools/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md create mode 100644 tools/vendor/sigs.k8s.io/randfill/LICENSE create mode 100644 tools/vendor/sigs.k8s.io/randfill/NOTICE create mode 100644 tools/vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 tools/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES rename tools/vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/README.md (53%) create mode 100644 tools/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS rename tools/vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/bytesource/bytesource.go (100%) create mode 100644 tools/vendor/sigs.k8s.io/randfill/code-of-conduct.md create mode 100644 tools/vendor/sigs.k8s.io/randfill/randfill.go delete mode 100644 tools/vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 tools/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go delete mode 100644 tools/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS delete mode 100644 tools/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go create mode 100644 tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go diff --git a/tools/go.mod b/tools/go.mod index f040e6b0f..812488c3c 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -1,105 +1,102 @@ module github.com/csi-addons/kubernetes-csi-addons/tools -go 1.23.4 - -toolchain go1.24.1 +go 1.24.6 require ( - github.com/operator-framework/operator-sdk v1.39.2 + github.com/operator-framework/operator-sdk v1.42.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.10 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1 - sigs.k8s.io/controller-tools v0.17.3 + sigs.k8s.io/controller-tools v0.18.0 sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 ) require ( - cel.dev/expr v0.18.0 // indirect - dario.cat/mergo v1.0.1 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + cel.dev/expr v0.24.0 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.9 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/Microsoft/hcsshim v0.13.0 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.27 // indirect - github.com/containerd/containerd/api v1.8.0 // indirect - github.com/containerd/continuity v0.4.4 // indirect - github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect + github.com/containerd/containerd v1.7.29 // indirect + github.com/containerd/containerd/api v1.9.0 // indirect + github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect github.com/containerd/ttrpc v1.2.7 // indirect - github.com/containerd/typeurl/v2 v2.2.0 // indirect - github.com/containers/common v0.60.4 // indirect - github.com/containers/image/v5 v5.32.2 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.2.0 // indirect - github.com/containers/storage v1.57.2 // indirect - github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/containers/ocicrypt v1.2.1 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v27.3.1+incompatible // indirect + github.com/docker/cli v28.4.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.2.0+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect - github.com/docker/go-connections v0.5.0 // indirect - github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.16.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/envy v1.6.5 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-migrate/migrate/v4 v4.18.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang-migrate/migrate/v4 v4.19.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/cel-go v0.22.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.1 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/go-containerregistry v0.20.6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -113,18 +110,21 @@ require ( github.com/joho/godotenv v1.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/markbates/inflect v1.0.4 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -133,108 +133,126 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.36.2 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/operator-framework/ansible-operator-plugins v1.37.2 // indirect - github.com/operator-framework/api v0.27.0 // indirect - github.com/operator-framework/operator-manifest-tools v0.8.0 // indirect - github.com/operator-framework/operator-registry v1.49.0 // indirect - github.com/otiai10/copy v1.14.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect + github.com/operator-framework/ansible-operator-plugins v1.42.0 // indirect + github.com/operator-framework/api v0.34.0 // indirect + github.com/operator-framework/operator-manifest-tools v0.10.0 // indirect + github.com/operator-framework/operator-registry v1.59.0 // indirect + github.com/otiai10/copy v1.14.1 // indirect + github.com/otiai10/mint v1.6.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/proglottis/gpgme v0.1.5 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rubenv/sql-migrate v1.7.0 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/sergi/go-diff v1.4.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect + github.com/sigstore/sigstore v1.9.5 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.20.0 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/smallstep/pkcs7 v0.2.1 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect + github.com/stoewer/go-strcase v1.3.1 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/thoas/go-funk v0.9.3 // indirect - github.com/vbatts/tar-split v0.11.7 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/bbolt v1.3.11 // indirect + go.etcd.io/bbolt v1.4.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect + go.podman.io/common v0.65.0 // indirect + go.podman.io/image/v5 v5.37.0 // indirect + go.podman.io/storage v1.60.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.31.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.68.1 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.37.0 // indirect + golang.org/x/tools/go/expect v0.1.1-deprecated // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.16.3 // indirect - k8s.io/api v0.32.2 // indirect - k8s.io/apiextensions-apiserver v0.32.2 // indirect - k8s.io/apimachinery v0.32.2 // indirect - k8s.io/apiserver v0.32.2 // indirect - k8s.io/cli-runtime v0.31.7 // indirect - k8s.io/client-go v0.32.2 // indirect - k8s.io/component-base v0.32.2 // indirect + helm.sh/helm/v3 v3.18.6 // indirect + k8s.io/api v0.33.5 // indirect + k8s.io/apiextensions-apiserver v0.33.5 // indirect + k8s.io/apimachinery v0.33.5 // indirect + k8s.io/apiserver v0.33.5 // indirect + k8s.io/cli-runtime v0.33.5 // indirect + k8s.io/client-go v0.33.5 // indirect + k8s.io/code-generator v0.33.5 // indirect + k8s.io/component-base v0.33.5 // indirect + k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect - k8s.io/kubectl v0.31.7 // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - oras.land/oras-go v1.2.5 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect - sigs.k8s.io/controller-runtime v0.19.7 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kubebuilder/v4 v4.2.0 // indirect + k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect + k8s.io/kubectl v0.33.5 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + oras.land/oras-go/v2 v2.6.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect + sigs.k8s.io/controller-runtime v0.21.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kubebuilder/v4 v4.6.0 // indirect sigs.k8s.io/kustomize/api v0.19.0 // indirect sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/tools/go.sum b/tools/go.sum index 67c64da63..b3b7679b0 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -1,51 +1,51 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= -github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -53,86 +53,82 @@ github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNS github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= -github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= -github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= -github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= -github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= -github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= +github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE= +github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= -github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= -github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= -github.com/containers/common v0.60.4 h1:H5+LAMHPZEqX6vVNOQ+IguVsaFl8kbO/SZ/VPXjxhy0= -github.com/containers/common v0.60.4/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= -github.com/containers/image/v5 v5.32.2 h1:SzNE2Y6sf9b1GJoC8qjCuMBXwQrACFp4p0RK15+4gmQ= -github.com/containers/image/v5 v5.32.2/go.mod h1:v1l73VeMugfj/QtKI+jhYbwnwFCFnNGckvbST3rQ5Hk= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= -github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= +github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= -github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= -github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= -github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= -github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk= +github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -145,64 +141,63 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= +github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= +github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= -github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= -github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= +github.com/golang-migrate/migrate/v4 v4.19.0 h1:RcjOnCGz3Or6HQYEJ/EEVLfWnmw9KnoigPSjzhCuaSE= +github.com/golang-migrate/migrate/v4 v4.19.0/go.mod h1:9dyEcu+hO+G9hPSw8AIg50yg622pXJsoHItQnDGZkI0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -213,10 +208,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -226,15 +221,14 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0= -github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -244,16 +238,16 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -263,11 +257,10 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= @@ -276,6 +269,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= @@ -284,19 +279,14 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -310,12 +300,14 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d h1:fCRb9hXR4QQJpwc7xnGugnva0DD5ollTGkys0n8aXT4= +github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d/go.mod h1:BVoSL2Ed8oCncct0meeBqoTY7b1Mzx7WqEOZ8EisFmY= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -326,11 +318,12 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= -github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -347,63 +340,59 @@ github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCnd github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.2 h1:LYLd7Wz401p0N7xR8y7WL6D2QZwKpbirDg0EVIvzvMM= -github.com/onsi/ginkgo/v2 v2.23.2/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/ansible-operator-plugins v1.37.2 h1:dvzvx5DjFWyAX3H+0dJz2pg6Gf4n7HnnJxyx3u8nm8U= -github.com/operator-framework/ansible-operator-plugins v1.37.2/go.mod h1:rr1ornLcBtaPN806AS/G6maIvmawM3n3dRqqJDa1Bcc= -github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI= -github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM= -github.com/operator-framework/operator-manifest-tools v0.8.0 h1:2zVVPs7IHrH8wgFInjF2QHJjEz9ih0qUqusMqrd4Qgg= -github.com/operator-framework/operator-manifest-tools v0.8.0/go.mod h1:oxVwdj0c7bqFBb1/bljVfImPwThORrwSn/mFn2mR4s8= -github.com/operator-framework/operator-registry v1.49.0 h1:7oasuzReEBt3Fh6g5DTE5nzY6wO8bFp5F/LSmYnNWD0= -github.com/operator-framework/operator-registry v1.49.0/go.mod h1:mqHeBS3DtQBToyF9tVlMOE1BfsJB76nLAujckmgC1gQ= -github.com/operator-framework/operator-sdk v1.39.2 h1:+Vu+JFJSYJ3QNuY2G/Db5QMmmqkRGLlD++CCaD9v7bg= -github.com/operator-framework/operator-sdk v1.39.2/go.mod h1:MYgCLo6D1TRKeF/RW+j+cNjs+TJ2utH+jZngEzzal50= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= -github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= -github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/operator-framework/ansible-operator-plugins v1.42.0 h1:ahupKUXl7sYKILEUp1tiQNW9WiFxpGGyN1UQ/EfsNGY= +github.com/operator-framework/ansible-operator-plugins v1.42.0/go.mod h1:gGyNgCrNU1opGioTWbYdnbRTcJkJrFPS8Ysu/hKybnE= +github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= +github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= +github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= +github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= +github.com/operator-framework/operator-registry v1.59.0 h1:SQhT0qMTYJXqStNhBOYXmLAMpS3eszzbcXAg5NLgJu8= +github.com/operator-framework/operator-registry v1.59.0/go.mod h1:QE1RRQGe+iau8sfY10DbP3+eoahH0G0l+coYrnEzJgI= +github.com/operator-framework/operator-sdk v1.42.0 h1:ng0eWo1GInKQ12ycwzMVK0Eq/T3m0N1c6f1h1exskRk= +github.com/operator-framework/operator-sdk v1.42.0/go.mod h1:6XuqltQbJb7H0QRBzJjaU6a77TUsodvLOf3ArHkxUGQ= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -411,65 +400,70 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/proglottis/gpgme v0.1.5 h1:KCGyOw8sQ+SI96j6G8D8YkOGn+1TwbQTT9/zQXoVlz0= +github.com/proglottis/gpgme v0.1.5/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 h1:uTiEyEyfLhkw678n6EulHVto8AkcXVr8zUcBJNZ0ark= +github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0/go.mod h1:eFYL/99JvdLP4T9/3FZ5t2pClnv7mMskc+WstTcyVr4= +github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 h1:4z7/hCJ9Jft8EBb2tDmK38p2WjyIEJ1ShhhwAhjOCps= +github.com/redis/go-redis/extra/redisotel/v9 v9.10.0/go.mod h1:B0thqLh4hB8MvvcUKSwyP5YiIcCCp8UrQ0cA9gEqyjk= +github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs= +github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= -github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= +github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= +github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= +github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -482,156 +476,204 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= +github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= +go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= +go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= +go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= +go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.podman.io/common v0.65.0 h1:8JNl25U4VpKDkFHSymSPm4te7ZQHJbfAB/l2FqtmYEg= +go.podman.io/common v0.65.0/go.mod h1:+lJu8KHeoDQsD9HDdiFaMaOUiqPLQnK406WuLnqM7Z0= +go.podman.io/image/v5 v5.37.0 h1:yzgQybwuWIIeK63hu+mQqna/wOh96XD5cpVc6j8Dg5M= +go.podman.io/image/v5 v5.37.0/go.mod h1:+s2Sx5dia/jVeT8tI3r2NAPrARMiDdbEq3QPIQogx3I= +go.podman.io/storage v1.60.0 h1:bWNSrR58nxg39VNFDSx3m0AswbvyzPGOo5XsUfomTao= +go.podman.io/storage v1.60.0/go.mod h1:NK+rsWJVuQeCM7ifv7cxD3abegWxwtW/3OkuSUJJoE4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -640,30 +682,40 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -677,7 +729,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -690,56 +741,58 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.16.3 h1:kb8bSxMeRJ+knsK/ovvlaVPfdis0X3/ZhYCSFRP+YmY= -helm.sh/helm/v3 v3.16.3/go.mod h1:zeVWGDR4JJgiRbT3AnNsjYaX8OTJlIE9zC+Q7F7iUSU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +helm.sh/helm/v3 v3.18.6 h1:S/2CqcYnNfLckkHLI0VgQbxgcDaU3N4A/46E3n9wSNY= +helm.sh/helm/v3 v3.18.6/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= -k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= -k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= -k8s.io/cli-runtime v0.31.7 h1:k8IkChvyUkemf7ssvgLKtq5JJH6sXZet4c3iyY0Tn74= -k8s.io/cli-runtime v0.31.7/go.mod h1:4QmnnX+OB8L8IT5qdWTfrT9NlCcqvxLkFn3lNzO8N84= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw= +k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs= +k8s.io/apiextensions-apiserver v0.33.5 h1:93NZh6rmrcamX/tfv/dZrTsMiQX69ufANmDcKPEgSeA= +k8s.io/apiextensions-apiserver v0.33.5/go.mod h1:JIbyQnNlu6nQa7b1vgFi51pmlXOk8mdn0WJwUJnz/7U= +k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo= +k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.5 h1:X1Gy33r4YkRLRqTjGjofk7X1/EjSLEVSJ/A+1qjoj60= +k8s.io/apiserver v0.33.5/go.mod h1:Q+b5Btbc8x0PqOCeh/xBTesKk+cXQRN+PF2wdrTKDeg= +k8s.io/cli-runtime v0.33.5 h1:wM7DoglOkrJDmddla864mVpueaEDX7/XGAkHGMWQkpc= +k8s.io/cli-runtime v0.33.5/go.mod h1:ZmUR+ybq97SqxSkkqGQdIhzCfk/+ETUhwKQq5EguaCw= +k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o= +k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0= +k8s.io/code-generator v0.33.5 h1:KwkOvhwAaorjSwF2MQhhdhL3i8bBmAal/TWhX67kdHw= +k8s.io/code-generator v0.33.5/go.mod h1:Ra+sdZquRakeTGcEnQAPw6BmlZ92IvxwQQTX/XOvOIE= +k8s.io/component-base v0.33.5 h1:4D3kxjEx1pJRy3WHAZsmX3+LCpmd4ftE+2J4v6naTnQ= +k8s.io/component-base v0.33.5/go.mod h1:Zma1YjBVuuGxIbspj1vGR3/5blzo2ARf1v0QTtog1to= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/kubectl v0.31.7 h1:zFJlTxSd+o/TlvcquAZGT3z01ifAvVfuFtxBeRe7euE= -k8s.io/kubectl v0.31.7/go.mod h1:mD2vgBEXN4AbSKb0V2NXN2pGiNu3IzKsRo9NRgCKUJ0= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= -oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48= -sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= +k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kubectl v0.33.5 h1:/wj5EjXXrVeSd8+FcZ2sIIP1PlQkq8HWsR9T1Nsl32c= +k8s.io/kubectl v0.33.5/go.mod h1:YrBGE7U+nz7+UatG+aNDocIQtdTyqN528dwFCv6+Kuw= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1 h1:1/GQWB9rabeYd3oANeTQH7OHrtShvVgH0FmqHWBpR6I= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= -sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw= -sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kubebuilder/v4 v4.2.0 h1:vl5WgaYKR6e6YDK02Mizf7d1RxFNk1pOSnh6uRnHm6s= -sigs.k8s.io/kubebuilder/v4 v4.2.0/go.mod h1:Jq0Qrlrtn3YKdCFSW6CBbmGuwsw6xO6a7beFiVQf/bI= +sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= +sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kubebuilder/v4 v4.6.0 h1:SBc37jghs3L2UaEL91A1t5K5dANrEviUDuNic9hMQSw= +sigs.k8s.io/kubebuilder/v4 v4.6.0/go.mod h1:zlXrnLiJPDPpK4hKCUrlgzzLOusfA8Sd8tpYGIrvD00= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= sigs.k8s.io/kustomize/cmd/config v0.19.0 h1:D3uASwjHWHmNiEHu3pPJBJMBIsb+auFvHrHql3HAarU= @@ -748,7 +801,11 @@ sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 h1:MWtRRDWCwQEeW2rnJTqJMuV6Agy56P53Skb sigs.k8s.io/kustomize/kustomize/v5 v5.6.0/go.mod h1:XuuZiQF7WdcvZzEYyNww9A0p3LazCKeJmCjeycN8e1I= sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/tools/vendor/cel.dev/expr/.bazelversion b/tools/vendor/cel.dev/expr/.bazelversion index 26bc914a3..13c50892b 100644 --- a/tools/vendor/cel.dev/expr/.bazelversion +++ b/tools/vendor/cel.dev/expr/.bazelversion @@ -1,2 +1,2 @@ -7.0.1 +7.3.2 # Keep this pinned version in parity with cel-go diff --git a/tools/vendor/cel.dev/expr/MODULE.bazel b/tools/vendor/cel.dev/expr/MODULE.bazel index 9794266f5..85ac9ff61 100644 --- a/tools/vendor/cel.dev/expr/MODULE.bazel +++ b/tools/vendor/cel.dev/expr/MODULE.bazel @@ -8,26 +8,38 @@ bazel_dep( ) bazel_dep( name = "gazelle", - version = "0.36.0", + version = "0.39.1", repo_name = "bazel_gazelle", ) bazel_dep( name = "googleapis", - version = "0.0.0-20240819-fe8ba054a", + version = "0.0.0-20241220-5e258e33.bcr.1", repo_name = "com_google_googleapis", ) +bazel_dep( + name = "googleapis-cc", + version = "1.0.0", +) +bazel_dep( + name = "googleapis-java", + version = "1.0.0", +) +bazel_dep( + name = "googleapis-go", + version = "1.0.0", +) bazel_dep( name = "protobuf", - version = "26.0", + version = "27.0", repo_name = "com_google_protobuf", ) bazel_dep( name = "rules_cc", - version = "0.0.9", + version = "0.0.17", ) bazel_dep( name = "rules_go", - version = "0.49.0", + version = "0.53.0", repo_name = "io_bazel_rules_go", ) bazel_dep( @@ -36,7 +48,7 @@ bazel_dep( ) bazel_dep( name = "rules_proto", - version = "6.0.0", + version = "7.0.2", ) bazel_dep( name = "rules_python", @@ -50,16 +62,8 @@ python.toolchain( python_version = "3.11", ) -switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") -switched_rules.use_languages( - cc = True, - go = True, - java = True, -) -use_repo(switched_rules, "com_google_googleapis_imports") - go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.21.1") +go_sdk.download(version = "1.22.0") go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") go_deps.from_file(go_mod = "//:go.mod") diff --git a/tools/vendor/cel.dev/expr/README.md b/tools/vendor/cel.dev/expr/README.md index 7930c0b75..42d67f87c 100644 --- a/tools/vendor/cel.dev/expr/README.md +++ b/tools/vendor/cel.dev/expr/README.md @@ -69,5 +69,3 @@ For more detail, see: * [Language Definition](doc/langdef.md) Released under the [Apache License](LICENSE). - -Disclaimer: This is not an official Google product. diff --git a/tools/vendor/cel.dev/expr/cloudbuild.yaml b/tools/vendor/cel.dev/expr/cloudbuild.yaml index c40881f12..e3e533a04 100644 --- a/tools/vendor/cel.dev/expr/cloudbuild.yaml +++ b/tools/vendor/cel.dev/expr/cloudbuild.yaml @@ -1,5 +1,5 @@ steps: -- name: 'gcr.io/cloud-builders/bazel:7.0.1' +- name: 'gcr.io/cloud-builders/bazel:7.3.2' entrypoint: bazel args: ['build', '...'] id: bazel-build diff --git a/tools/vendor/cel.dev/expr/eval.pb.go b/tools/vendor/cel.dev/expr/eval.pb.go index 8f651f9cc..a7aae0900 100644 --- a/tools/vendor/cel.dev/expr/eval.pb.go +++ b/tools/vendor/cel.dev/expr/eval.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.3 +// protoc v5.27.1 // source: cel/expr/eval.proto package expr import ( - status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -22,21 +22,18 @@ const ( ) type EvalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState) Reset() { *x = EvalState{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState) String() string { @@ -47,7 +44,7 @@ func (*EvalState) ProtoMessage() {} func (x *EvalState) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,25 +74,22 @@ func (x *EvalState) GetResults() []*EvalState_Result { } type ExprValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *ExprValue_Value // *ExprValue_Error // *ExprValue_Unknown - Kind isExprValue_Kind `protobuf_oneof:"kind"` + Kind isExprValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExprValue) Reset() { *x = ExprValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExprValue) String() string { @@ -106,7 +100,7 @@ func (*ExprValue) ProtoMessage() {} func (x *ExprValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -121,30 +115,36 @@ func (*ExprValue) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} } -func (m *ExprValue) GetKind() isExprValue_Kind { - if m != nil { - return m.Kind +func (x *ExprValue) GetKind() isExprValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *ExprValue) GetValue() *Value { - if x, ok := x.GetKind().(*ExprValue_Value); ok { - return x.Value + if x != nil { + if x, ok := x.Kind.(*ExprValue_Value); ok { + return x.Value + } } return nil } func (x *ExprValue) GetError() *ErrorSet { - if x, ok := x.GetKind().(*ExprValue_Error); ok { - return x.Error + if x != nil { + if x, ok := x.Kind.(*ExprValue_Error); ok { + return x.Error + } } return nil } func (x *ExprValue) GetUnknown() *UnknownSet { - if x, ok := x.GetKind().(*ExprValue_Unknown); ok { - return x.Unknown + if x != nil { + if x, ok := x.Kind.(*ExprValue_Unknown); ok { + return x.Unknown + } } return nil } @@ -172,20 +172,17 @@ func (*ExprValue_Error) isExprValue_Kind() {} func (*ExprValue_Unknown) isExprValue_Kind() {} type ErrorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ErrorSet) Reset() { *x = ErrorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorSet) String() string { @@ -196,7 +193,7 @@ func (*ErrorSet) ProtoMessage() {} func (x *ErrorSet) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,28 +208,85 @@ func (*ErrorSet) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} } -func (x *ErrorSet) GetErrors() []*status.Status { +func (x *ErrorSet) GetErrors() []*Status { if x != nil { return x.Errors } return nil } -type UnknownSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +func (x *Status) Reset() { + *x = Status{} + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UnknownSet) Reset() { - *x = UnknownSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[3] +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnknownSet) String() string { @@ -242,8 +296,8 @@ func (x *UnknownSet) String() string { func (*UnknownSet) ProtoMessage() {} func (x *UnknownSet) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -255,7 +309,7 @@ func (x *UnknownSet) ProtoReflect() protoreflect.Message { // Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. func (*UnknownSet) Descriptor() ([]byte, []int) { - return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} + return file_cel_expr_eval_proto_rawDescGZIP(), []int{4} } func (x *UnknownSet) GetExprs() []int64 { @@ -266,21 +320,18 @@ func (x *UnknownSet) GetExprs() []int64 { } type EvalState_Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState_Result) Reset() { *x = EvalState_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState_Result) String() string { @@ -290,8 +341,8 @@ func (x *EvalState_Result) String() string { func (*EvalState_Result) ProtoMessage() {} func (x *EvalState_Result) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,39 +376,45 @@ var File_cel_expr_eval_proto protoreflect.FileDescriptor var file_cel_expr_eval_proto_rawDesc = []byte{ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, - 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, + 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, + 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, + 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -372,28 +429,30 @@ func file_cel_expr_eval_proto_rawDescGZIP() []byte { return file_cel_expr_eval_proto_rawDescData } -var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_eval_proto_goTypes = []interface{}{ +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_cel_expr_eval_proto_goTypes = []any{ (*EvalState)(nil), // 0: cel.expr.EvalState (*ExprValue)(nil), // 1: cel.expr.ExprValue (*ErrorSet)(nil), // 2: cel.expr.ErrorSet - (*UnknownSet)(nil), // 3: cel.expr.UnknownSet - (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result - (*Value)(nil), // 5: cel.expr.Value - (*status.Status)(nil), // 6: google.rpc.Status + (*Status)(nil), // 3: cel.expr.Status + (*UnknownSet)(nil), // 4: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result + (*Value)(nil), // 6: cel.expr.Value + (*anypb.Any)(nil), // 7: google.protobuf.Any } var file_cel_expr_eval_proto_depIdxs = []int32{ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue - 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result - 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet - 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet - 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status + 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_cel_expr_eval_proto_init() } @@ -402,69 +461,7 @@ func file_cel_expr_eval_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExprValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnknownSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{ (*ExprValue_Value)(nil), (*ExprValue_Error)(nil), (*ExprValue_Unknown)(nil), @@ -475,7 +472,7 @@ func file_cel_expr_eval_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cel_expr_eval_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/tools/vendor/dario.cat/mergo/FUNDING.json b/tools/vendor/dario.cat/mergo/FUNDING.json new file mode 100644 index 000000000..0585e1fe1 --- /dev/null +++ b/tools/vendor/dario.cat/mergo/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930" + } + } +} diff --git a/tools/vendor/dario.cat/mergo/README.md b/tools/vendor/dario.cat/mergo/README.md index 0b3c48889..0e4a59afd 100644 --- a/tools/vendor/dario.cat/mergo/README.md +++ b/tools/vendor/dario.cat/mergo/README.md @@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend * [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) * [go-micro/go-micro](https://github.com/go-micro/go-micro) * [grafana/loki](https://github.com/grafana/loki) -* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) * [masterminds/sprig](github.com/Masterminds/sprig) * [moby/moby](https://github.com/moby/moby) * [slackhq/nebula](https://github.com/slackhq/nebula) @@ -191,10 +190,6 @@ func main() { } ``` -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? diff --git a/tools/vendor/dario.cat/mergo/SECURITY.md b/tools/vendor/dario.cat/mergo/SECURITY.md index a5de61f77..3788fcc1c 100644 --- a/tools/vendor/dario.cat/mergo/SECURITY.md +++ b/tools/vendor/dario.cat/mergo/SECURITY.md @@ -4,8 +4,8 @@ | Version | Supported | | ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | +| 1.x.x | :white_check_mark: | +| < 1.0 | :x: | ## Security contact information diff --git a/tools/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/tools/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go index adfeedf5e..361c9ac69 100644 --- a/tools/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go +++ b/tools/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -48,6 +48,7 @@ type ConsumeFuzzer struct { NumberOfCalls int position uint32 fuzzUnexportedFields bool + forceUTF8Strings bool curDepth int Funcs map[reflect.Type]reflect.Value } @@ -104,6 +105,14 @@ func (f *ConsumeFuzzer) DisallowUnexportedFields() { f.fuzzUnexportedFields = false } +func (f *ConsumeFuzzer) AllowNonUTF8Strings() { + f.forceUTF8Strings = false +} + +func (f *ConsumeFuzzer) DisallowNonUTF8Strings() { + f.forceUTF8Strings = true +} + func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error { e := reflect.ValueOf(targetStruct).Elem() return f.fuzzStruct(e, false) @@ -224,6 +233,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error if e.CanSet() { e.Set(uu) } + case reflect.Uint: + newInt, err := f.GetUint() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } case reflect.Uint16: newInt, err := f.GetUint16() if err != nil { @@ -309,6 +326,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error if e.CanSet() { e.SetUint(uint64(b)) } + case reflect.Bool: + b, err := f.GetBool() + if err != nil { + return err + } + if e.CanSet() { + e.SetBool(b) + } } return nil } @@ -410,6 +435,23 @@ func (f *ConsumeFuzzer) GetUint64() (uint64, error) { return binary.BigEndian.Uint64(u64), nil } +func (f *ConsumeFuzzer) GetUint() (uint, error) { + var zero uint + size := int(unsafe.Sizeof(zero)) + if size == 8 { + u64, err := f.GetUint64() + if err != nil { + return 0, err + } + return uint(u64), nil + } + u32, err := f.GetUint32() + if err != nil { + return 0, err + } + return uint(u32), nil +} + func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { var length uint32 var err error @@ -461,7 +503,11 @@ func (f *ConsumeFuzzer) GetString() (string, error) { return "nil", errors.New("numbers overflow") } f.position = byteBegin + length - return string(f.data[byteBegin:f.position]), nil + s := string(f.data[byteBegin:f.position]) + if f.forceUTF8Strings { + s = strings.ToValidUTF8(s, "") + } + return s, nil } func (f *ConsumeFuzzer) GetBool() (bool, error) { diff --git a/tools/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/tools/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 593b10ab6..194d5e9c9 100644 --- a/tools/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/tools/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) { return nextState, err } - switch { - case isOscStringTerminator(b): + // There are several control characters and sequences which can + // terminate an OSC string. Most of them are handled by the baseState + // handler. The ANSI_BEL character is a special case which behaves as a + // terminator only for an OSC string. + if b == ANSI_BEL { return oscState.parser.ground, nil } return oscState, nil } - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/tools/vendor/github.com/BurntSushi/toml/README.md b/tools/vendor/github.com/BurntSushi/toml/README.md index 639e6c399..235496eeb 100644 --- a/tools/vendor/github.com/BurntSushi/toml/README.md +++ b/tools/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/tools/vendor/github.com/BurntSushi/toml/decode.go b/tools/vendor/github.com/BurntSushi/toml/decode.go index 7aaf462c9..3fa516caa 100644 --- a/tools/vendor/github.com/BurntSushi/toml/decode.go +++ b/tools/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { @@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error { func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } diff --git a/tools/vendor/github.com/BurntSushi/toml/encode.go b/tools/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9..ac196e7df 100644 --- a/tools/vendor/github.com/BurntSushi/toml/encode.go +++ b/tools/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/tools/vendor/github.com/BurntSushi/toml/error.go b/tools/vendor/github.com/BurntSushi/toml/error.go index b45a3f45f..b7077d3ae 100644 --- a/tools/vendor/github.com/BurntSushi/toml/error.go +++ b/tools/vendor/github.com/BurntSushi/toml/error.go @@ -67,21 +67,36 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Length of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } // ErrorWithPosition returns the error with detailed location context. @@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) @@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string { diff := len(expanded) - len(lines[pe.Position.Line-1]) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - func expandTab(s string) string { var ( b strings.Builder diff --git a/tools/vendor/github.com/BurntSushi/toml/lex.go b/tools/vendor/github.com/BurntSushi/toml/lex.go index a1016d98a..1c3b47702 100644 --- a/tools/vendor/github.com/BurntSushi/toml/lex.go +++ b/tools/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/tools/vendor/github.com/BurntSushi/toml/meta.go b/tools/vendor/github.com/BurntSushi/toml/meta.go index e61453730..0d337026c 100644 --- a/tools/vendor/github.com/BurntSushi/toml/meta.go +++ b/tools/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/tools/vendor/github.com/BurntSushi/toml/parse.go b/tools/vendor/github.com/BurntSushi/toml/parse.go index 11ac3108b..e3ea8a9a2 100644 --- a/tools/vendor/github.com/BurntSushi/toml/parse.go +++ b/tools/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) { func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -123,10 +123,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f95a504fe..fabe5e43d 100644 --- a/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/tools/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + ## 3.3.0 (2024-08-27) ### Added @@ -137,7 +163,7 @@ functions. These are described in the added and changed sections below. - #78: Fix unchecked error in example code (thanks @ravron) - #70: Fix the handling of pre-releases and the 0.0.0 release edge case - #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num +- #107: Fix handling prerelease when sorting alphanum and num - #109: Fixed where Validate sometimes returns wrong message on error ## 1.4.2 (2018-04-10) diff --git a/tools/vendor/github.com/Masterminds/semver/v3/README.md b/tools/vendor/github.com/Masterminds/semver/v3/README.md index ed5693608..2f56c676a 100644 --- a/tools/vendor/github.com/Masterminds/semver/v3/README.md +++ b/tools/vendor/github.com/Masterminds/semver/v3/README.md @@ -50,6 +50,18 @@ other versions, convert the version back into a string, and get the original string. Getting the original string is useful if the semantic version was coerced into a valid form. +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + ## Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. @@ -160,6 +172,10 @@ means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case sensitivity doesn't apply here. This is due to ASCII sort ordering which is what the spec specifies. +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + ### Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. @@ -250,7 +266,7 @@ or [create a pull request](https://github.com/Masterminds/semver/pulls). Security is an important consideration for this project. The project currently uses the following tools to help discover security issues: -* [CodeQL](https://github.com/Masterminds/semver) +* [CodeQL](https://codeql.github.com) * [gosec](https://github.com/securego/gosec) * Daily Fuzz testing diff --git a/tools/vendor/github.com/Masterminds/semver/v3/constraints.go b/tools/vendor/github.com/Masterminds/semver/v3/constraints.go index 8461c7ed9..8b7a10f83 100644 --- a/tools/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/tools/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -12,6 +12,13 @@ import ( // checked against. type Constraints struct { constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool } // NewConstraint returns a Constraints instance that a Version instance can @@ -22,11 +29,10 @@ func NewConstraint(c string) (*Constraints, error) { c = rewriteRange(c) ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - // Validate the segment if !validConstraintRegex.MatchString(v) { return nil, fmt.Errorf("improper constraint: %s", v) @@ -43,12 +49,22 @@ func NewConstraint(c string) (*Constraints, error) { return nil, err } + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + result[i] = pc } or[k] = result } - o := &Constraints{constraints: or} + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } return o, nil } @@ -57,10 +73,10 @@ func (cs Constraints) Check(v *Version) bool { // TODO(mattfarina): For v4 of this library consolidate the Check and Validate // functions as the underlying functions make that possible now. // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { - if check, _ := c.check(v); !check { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { joy = false break } @@ -83,12 +99,12 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { // Capture the prerelease message only once. When it happens the first time // this var is marked var prerelesase bool - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { // Before running the check handle the case there the version is // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { if !prerelesase { em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) e = append(e, em) @@ -98,7 +114,7 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { } else { - if _, err := c.check(v); err != nil { + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { e = append(e, err) joy = false } @@ -227,8 +243,8 @@ type constraint struct { } // Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) } // String prints an individual constraint into a string @@ -236,7 +252,7 @@ func (c *constraint) string() string { return c.origfunc + c.orig } -type cfunc func(v *Version, c *constraint) (bool, error) +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) func parseConstraint(c string) (*constraint, error) { if len(c) > 0 { @@ -272,7 +288,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs.con = con @@ -290,7 +306,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs := &constraint{ @@ -305,16 +321,14 @@ func parseConstraint(c string) (*constraint, error) { } // Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + if c.dirty { if c.con.Major() != v.Major() { return true, nil } @@ -345,12 +359,11 @@ func constraintNotEqual(v *Version, c *constraint) (bool, error) { return true, nil } -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -391,11 +404,10 @@ func constraintGreaterThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) } -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -406,12 +418,11 @@ func constraintLessThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) } -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -422,11 +433,10 @@ func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than %s", v, c.orig) } -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -455,11 +465,10 @@ func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { // ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 // ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 // ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -487,16 +496,15 @@ func constraintTilde(v *Version, c *constraint) (bool, error) { // When there is a .x (dirty) status it automatically opts in to ~. Otherwise // it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } if c.dirty { - return constraintTilde(v, c) + return constraintTilde(v, c, includePre) } eq := v.Equal(c.con) @@ -516,11 +524,10 @@ func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { // ^0.0.3 --> >=0.0.3 <0.0.4 // ^0.0 --> >=0.0.0 <0.1.0 // ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } diff --git a/tools/vendor/github.com/Masterminds/semver/v3/version.go b/tools/vendor/github.com/Masterminds/semver/v3/version.go index ff499fb66..7a3ba7388 100644 --- a/tools/vendor/github.com/Masterminds/semver/v3/version.go +++ b/tools/vendor/github.com/Masterminds/semver/v3/version.go @@ -14,32 +14,52 @@ import ( // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") + ErrInvalidSemVer = errors.New("invalid semantic version") // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") + ErrEmptyString = errors.New("version string empty") // ErrInvalidCharacters is returned when invalid characters are found as // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") + ErrInvalidCharacters = errors.New("invalid characters in version") // ErrSegmentStartsZero is returned when a version segment starts with 0. // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") + ErrSegmentStartsZero = errors.New("version segment starts with 0") // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") + ErrInvalidMetadata = errors.New("invalid metadata string") // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") + ErrInvalidPrerelease = errors.New("invalid prerelease string") ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` @@ -53,6 +73,7 @@ type Version struct { func init() { versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") } const ( @@ -140,7 +161,80 @@ func StrictNewVersion(v string) (*Version, error) { // attempts to convert it to SemVer. If you want to validate it was a strict // semantic version at parse time see StrictNewVersion(). func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) if m == nil { return nil, ErrInvalidSemVer } @@ -154,13 +248,13 @@ func NewVersion(v string) (*Version, error) { var err error sv.major, err = strconv.ParseUint(m[1], 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } if m[2] != "" { sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.minor = 0 @@ -169,7 +263,7 @@ func NewVersion(v string) (*Version, error) { if m[3] != "" { sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.patch = 0 @@ -612,7 +706,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,9 +727,62 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) } } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + return nil } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/.clang-format b/tools/vendor/github.com/Microsoft/hcsshim/.clang-format new file mode 100644 index 000000000..fd843ce39 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/.clang-format @@ -0,0 +1,12 @@ +Language: Cpp +BasedOnStyle: Microsoft +BreakBeforeBraces: Attach +PointerAlignment: Left +AllowShortFunctionsOnASingleLine: All +# match Go style +IndentCaseLabels: false +# don't break comments over line limit (needed for CodeQL exceptions) +ReflowComments: false +InsertNewlineAtEOF: true +KeepEmptyLines: + AtEndOfFile: true diff --git a/tools/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/tools/vendor/github.com/Microsoft/hcsshim/.golangci.yml index 7d38a2fb9..113e6f07a 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/tools/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -5,9 +5,6 @@ run: - admin - functional - integration - skip-dirs: - # paths are relative to module root - - cri-containerd/test-images linters: enable: @@ -34,13 +31,15 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true stylecheck: # https://staticcheck.io/docs/checks checks: ["all"] issues: + exclude-dirs: + # paths are relative to module root + - cri-containerd/test-images exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -70,22 +69,22 @@ issues: - path: layer.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcsshim.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy\\nodenetsvc\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy_mock\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema2\\ linters: @@ -95,67 +94,67 @@ issues: - path: internal\\wclayer\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcn\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema1\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hns\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\compactext4\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\format\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guestrequest\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guest\\prot\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\windevice\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\winapi\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\vmcompute\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\regstate\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcserror\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" # v0 APIs are deprecated, but still retained for backwards compatability - path: cmd\\ncproxy\\ @@ -171,4 +170,4 @@ issues: - path: internal\\vhdx\\info linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" diff --git a/tools/vendor/github.com/Microsoft/hcsshim/Makefile b/tools/vendor/github.com/Microsoft/hcsshim/Makefile index de6435894..9a9f5b401 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/tools/vendor/github.com/Microsoft/hcsshim/Makefile @@ -1,13 +1,20 @@ -BASE:=base.tar.gz -DEV_BUILD:=0 +include Makefile.bootfiles GO:=go GO_FLAGS:=-ldflags "-s -w" # strip Go binaries CGO_ENABLED:=0 GOMODVENDOR:= +KMOD:=0 CFLAGS:=-O2 -Wall -LDFLAGS:=-static -s # strip C binaries +LDFLAGS:=-static -s #strip C binaries +LDLIBS:= +PREPROCESSORFLAGS:= +ifeq "$(KMOD)" "1" +LDFLAGS:= -s +LDLIBS:= -lkmod +PREPROCESSORFLAGS:=-DMODULES=1 +endif GO_FLAGS_EXTRA:= ifeq "$(GOMODVENDOR)" "1" @@ -23,108 +30,14 @@ SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) # additional directories to search for rule prerequisites and targets VPATH=$(SRCROOT) -DELTA_TARGET=out/delta.tar.gz - -ifeq "$(DEV_BUILD)" "1" -DELTA_TARGET=out/delta-dev.tar.gz -endif - -ifeq "$(SNP_BUILD)" "1" -DELTA_TARGET=out/delta-snp.tar.gz -endif - # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -# Common path prefix. -PATH_PREFIX:= -# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) -VMGS_TOOL:= -IGVM_TOOL:= -KERNEL_PATH:= - -.PHONY: all always rootfs test snp simple - -.DEFAULT_GOAL := all - -all: out/initrd.img out/rootfs.tar.gz - -clean: - find -name '*.o' -print0 | xargs -0 -r rm - rm -rf bin deps rootfs out - test: cd $(SRCROOT) && $(GO) test -v ./internal/guest/... -rootfs: out/rootfs.vhd - -snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs - -simple: out/simple.vmgs snp - -%.vmgs: %.bin - rm -f $@ - # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes - $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` - $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 - -# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. -out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 - -ROOTFS_DEVICE:=/dev/sda -VERITY_DEVICE:=/dev/sdb -# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) -out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 - -# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. -out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 - -# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. -%.vhd: % bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. -%.vhd: %.ext4 bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt - veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info - # Retrieve info required by dm-verity at boot time - # Get the blocksize of rootfs - cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest - cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt - cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize - cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize - cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks - echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors - -out/rootfs.hash.salt: - hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ - -out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 - gzip -f -d ./out/rootfs.tar.gz - ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ - -out/rootfs.tar.gz: out/initrd.img - rm -rf rootfs-conv - mkdir rootfs-conv - gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) - tar -zcf $@ -C rootfs-conv . - rm -rf rootfs-conv - -out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed - gzip -c out/initrd.img.uncompressed > $@ - rm out/initrd.img.uncompressed - # This target includes utilities which may be useful for testing purposes. out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report rm -rf rootfs-dev @@ -168,10 +81,7 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -out/containerd-shim-runhcs-v1.exe: - GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 - -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -181,8 +91,8 @@ bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o bin/init: init/init.o vsockexec/vsock.o @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ + $(CC) $(LDFLAGS) -o $@ $^ $(LDLIBS) %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file + $(CC) $(PREPROCESSORFLAGS) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/tools/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles b/tools/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles new file mode 100644 index 000000000..e6f06d491 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles @@ -0,0 +1,197 @@ +BASE:=base.tar.gz +DEV_BUILD:=0 + +DELTA_TARGET=out/delta.tar.gz + +ifeq "$(DEV_BUILD)" "1" +DELTA_TARGET=out/delta-dev.tar.gz +endif + +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= +TAR2EXT4_TOOL:=bin/cmd/tar2ext4 + +ROOTFS_DEVICE:=/dev/sda +HASH_DEVICE:=/dev/sdb + +.PHONY: all always rootfs test snp simple + +.DEFAULT_GOAL := all + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin rootfs out + +rootfs: out/rootfs.vhd + +snp: out/kernel.vmgs out/rootfs-verity.vhd out/v2056.vmgs out/v2056combined.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" \ + -rdinit out/initrd.img \ + -vtl 0 + +# The boot performance is optimized by supplying rootfs as a SCSI attachment. In this case the kernel boots with +# dm-verity to ensure the integrity. Similar to layer VHDs the verity Merkle tree is appended to ext4 filesystem. +# It transpires that the /dev/sd* order is not deterministic wrt the scsi device order. Thus build a single userland +# fs + merkle tree device and boot that. +# +# From https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-init.html +# +# dm-mod.create=,,,,[,
+][;,,,,
[,
+]+] +# +# where: +# ::= The device name. +# ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" +# ::= The device minor number | "" +# ::= "ro" | "rw" +#
::= +# ::= "verity" | "linear" | ... (see list below) +# +# From https://docs.kernel.org/admin-guide/device-mapper/verity.html +# +# +# +# +# [<#opt_params> ] +# +# typical igvm tool line once all the macros are expanded +# python3 /home/user/igvmfile.py -o out/v2056.bin -kernel /hose/user/bzImage -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 +# +# so a kernel command line of: +# 8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh +# +# and a dm-mod.create of: +# dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# +# which breaks down to: +# +# name = "dmverity" +# uuid = "" +# minor = "" +# flags = "ro" +# table = 0 196744 verity "args" +# start_sector = 0 +# num_sectors = 196744 +# target_type = verity +# target_args = 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# args: +# version 1 +# dev /dev/sda +# hash_dev /dev/sdb +# data_block_size 4096 +# hash_block_size 4096 +# num_data_blocks 24593 +# hash_start_block 0 +# algorithm sha256 +# digest 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 +# salt b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba +# opt_params +# count = 1 +# ignore_corruption +# +# combined typical (not bigger count of sectors for the whole device) +# dmverity,,,ro,0 199672 verity 1 /dev/sda /dev/sda 4096 4096 24959 24959 sha256 4aa6e79866ee946ddbd9cddd6554bc6449272942fcc65934326817785a3bd374 adc4956274489c936395bab046a2d476f21ef436e571ba53da2fdf3aee59bf0a +# +# A few notes: +# - num_sectors is the size of the final (aka target) verity device, i.e. the size of our rootfs excluding the Merkle +# tree. +# - We don't add verity superblock, so the will be exactly at the end of ext4 filesystem and equal +# to its size. In the case when verity superblock is present an extra block should be added to the offset value, +# i.e. 24959 becomes 24960. + + +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +# Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(HASH_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +out/v2056combined.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernel.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" \ + -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz $(TAR2EXT4_TOOL) + gzip -f -d ./out/rootfs.tar.gz + $(TAR2EXT4_TOOL) -i ./out/rootfs.tar -o $@ + +out/rootfs-verity.ext4: out/rootfs.ext4 out/rootfs.hash + cp out/rootfs.ext4 $@ + cat out/rootfs.hash >> $@ + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed diff --git a/tools/vendor/github.com/Microsoft/hcsshim/README.md b/tools/vendor/github.com/Microsoft/hcsshim/README.md index 320438048..ae6668263 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/README.md +++ b/tools/vendor/github.com/Microsoft/hcsshim/README.md @@ -44,7 +44,7 @@ delta.tar.gz initrd.img rootfs.tar.gz ### Containerd Shim -For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8ef611d6a..fef2bf546 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,10 +63,10 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { //nolint:errorlint - case nil: + if err == nil { return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + } + if errors.Is(err, ErrVmcomputeOperationInvalidState) || errors.Is(err, ErrComputeSystemDoesNotExist) || errors.Is(err, ErrElementNotFound) { if !process.stopped() { // The process should be gone, but we have not received the notification. // After a second, force unblock the process wait to work around a possible @@ -82,9 +82,8 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo }() } return false, nil - default: - return false, err } + return false, nil } // Signal signals the process with `options`. diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go index ca75277a3..93857da69 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -24,4 +24,6 @@ type Chipset struct { // LinuxKernelDirect - Added in v2.2 Builds >=181117 LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` + + FirmwareFile *FirmwareFile `json:"FirmwareFile,omitempty"` } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go similarity index 70% rename from tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go rename to tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go index 81865e7ea..52fb62a82 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go @@ -9,14 +9,6 @@ package hcsschema -const ( - CimMountFlagNone uint32 = 0x0 - CimMountFlagChildOnly uint32 = 0x1 - CimMountFlagEnableDax uint32 = 0x2 - CimMountFlagCacheFiles uint32 = 0x4 - CimMountFlagCacheRegions uint32 = 0x8 -) - type CimMount struct { ImagePath string `json:"ImagePath,omitempty"` FileSystemName string `json:"FileSystemName,omitempty"` diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go new file mode 100644 index 000000000..c27a13200 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go @@ -0,0 +1,8 @@ +package hcsschema + +type FirmwareFile struct { + // Parameters is an experimental/pre-release field. The field itself or its + // behavior can change in future iterations of the schema. Avoid taking a hard + // dependency on this field. + Parameters []byte `json:"Parameters,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go deleted file mode 100644 index 71224c75b..000000000 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` - - // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed - // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by - // the guest operating system). - EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` - - // LowMmioGapInMB is the low MMIO region allocated below 4GB. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` - - // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and - // size). - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - - // HighMmioGapInMB is the high MMIO region. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` -} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go new file mode 100644 index 000000000..41837416c --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swaggerapi/swaggercodegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swaggerapi/swaggercodegen.git) + */ + +package hcsschema + +type MemoryBackingType string + +// List of MemoryBackingType +const ( + MemoryBackingType_PHYSICAL MemoryBackingType = "Physical" + MemoryBackingType_VIRTUAL MemoryBackingType = "Virtual" + MemoryBackingType_HYBRID MemoryBackingType = "Hybrid" +) diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go new file mode 100644 index 000000000..70a139519 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Numa struct { + VirtualNodeCount uint8 `json:"VirtualNodeCount,omitempty"` + PreferredPhysicalNodes []int64 `json:"PreferredPhysicalNodes,omitempty"` + Settings []NumaSetting `json:"Settings,omitempty"` + MaxSizePerNode uint64 `json:"MaxSizePerNode,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go new file mode 100644 index 000000000..5984bdecd --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNode struct { + VirtualNodeIndex uint32 `json:"VirtualNodeIndex,omitempty"` + PhysicalNodeIndex uint32 `json:"PhysicalNodeIndex,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go new file mode 100644 index 000000000..88567f0f6 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeMemory struct { + // Total physical memory on on this physical NUMA node that is consumable by the VMs. + TotalConsumableMemoryInPages uint64 `json:"TotalConsumableMemoryInPages,omitempty"` + // Currently available physical memory on this physical NUMA node for the VMs. + AvailableMemoryInPages uint64 `json:"AvailableMemoryInPages,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go new file mode 100644 index 000000000..4b6795bb9 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeProcessor struct { + TotalAssignedProcessors uint32 `json:"TotalAssignedProcessors,omitempty"` + TotalAvailableProcessors uint32 `json:"TotalAvailableProcessors,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go new file mode 100644 index 000000000..bc3fba37a --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaProcessors struct { + CountPerNode Range `json:"count_per_node,omitempty"` + NodePerSocket uint32 `json:"node_per_socket,omitempty"` +} + +type Range struct { + Max uint32 `json:"max,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go new file mode 100644 index 000000000..3f27b2ca0 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaSetting struct { + VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"` + PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"` + VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"` + CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"` + CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"` + MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go deleted file mode 100644 index c64f335ec..000000000 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.5 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor2 struct { - Count int32 `json:"Count,omitempty"` - - Limit int32 `json:"Limit,omitempty"` - - Weight int32 `json:"Weight,omitempty"` - - ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` - - // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` -} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go index 0c7efe8d4..d4cb95bdd 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -26,6 +26,8 @@ type Properties struct { RuntimeId string `json:"RuntimeId,omitempty"` + SystemGUID string `json:"SystemGUID,omitempty"` + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` State string `json:"State,omitempty"` diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go index 98f2c96ed..934f777fc 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -23,4 +23,5 @@ const ( PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" PTProcessorTopology PropertyType = "ProcessorTopology" PTCPUGroup PropertyType = "CpuGroup" + PTSystemGUID PropertyType = "SystemGUID" ) diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go index 834869940..9cca85171 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -1,16 +1,18 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` - - Processor *Processor2 `json:"Processor,omitempty"` + Memory *VirtualMachineMemory `json:"Memory,omitempty"` + Processor *VirtualMachineProcessor `json:"Processor,omitempty"` + Numa *Numa `json:"Numa,omitempty"` } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go index 1e0fab289..3f750466f 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -1,36 +1,29 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema +// Configuration of a virtual machine, used during its creation to set up and/or use resources. type VirtualMachine struct { - - // StopOnReset is private in the schema. If regenerated need to put back. - StopOnReset bool `json:"StopOnReset,omitempty"` - - Chipset *Chipset `json:"Chipset,omitempty"` - - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - - Devices *Devices `json:"Devices,omitempty"` - - GuestState *GuestState `json:"GuestState,omitempty"` - - RestoreState *RestoreState `json:"RestoreState,omitempty"` - + Version *Version `json:"Version,omitempty"` + // When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state. + StopOnReset bool `json:"StopOnReset,omitempty"` + Chipset *Chipset `json:"Chipset,omitempty"` + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + Devices *Devices `json:"Devices,omitempty"` + GuestState *GuestState `json:"GuestState,omitempty"` + RestoreState *RestoreState `json:"RestoreState,omitempty"` RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` - - SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` - - DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go new file mode 100644 index 000000000..17573c92a --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go @@ -0,0 +1,33 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineMemory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + Backing *MemoryBackingType `json:"Backing,omitempty"` + // If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory. + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + // If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system). + EnableHotHint bool `json:"EnableHotHint,omitempty"` + // If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system). + EnableColdHint bool `json:"EnableColdHint,omitempty"` + // If enabled, then the memory cold discard hint feature is exposed to the VM, allowing it to trim non-zeroed pages from the working set (if supported by the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + // If enabled, then commit is not charged for each backing page until first access. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + // Low MMIO region allocated below 4GB + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + // High MMIO region allocated above 4GB (base and size) + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` + SlitType *VirtualSlitType `json:"SlitType,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go new file mode 100644 index 000000000..619cd8340 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineProcessor struct { + Count uint32 `json:"Count,omitempty"` + Limit uint64 `json:"Limit,omitempty"` + Weight uint64 `json:"Weight,omitempty"` + Reservation uint64 `json:"Reservation,omitempty"` + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` + NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"` +} diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go index f5e05903c..a4a62da16 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -9,8 +9,9 @@ package hcsschema -// TODO: This is pre-release support in schema 2.3. Need to add build number +// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number // docs when a public build with this is out. type VirtualPciDevice struct { Functions []VirtualPciFunction `json:",omitempty"` + PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"` } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go new file mode 100644 index 000000000..dfad62313 --- /dev/null +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go @@ -0,0 +1,23 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// VirtualSlitType : Indicates if a virtual SLIT should ne enabled for a VM and the type of virtual SLIT to be enabled. +type VirtualSlitType string + +// List of VirtualSlitType +const ( + VirtualSlitType_NONE VirtualSlitType = "None" + VirtualSlitType_FIRMWARE VirtualSlitType = "Firmware" + VirtualSlitType_MEASURED VirtualSlitType = "Measured" + VirtualSlitType_FIRMWARE_FALLBACK_MEASURED VirtualSlitType = "FirmwareFallbackMeasured" +) diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go index 8ed7e566d..ee85c43b3 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -13,4 +13,6 @@ type WindowsCrashReporting struct { DumpFileName string `json:"DumpFileName,omitempty"` MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` + + DumpType string `json:"DumpType,omitempty"` } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 81d60ed43..b1597466f 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -238,9 +238,10 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -259,9 +260,10 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -279,14 +281,13 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { //nolint:errorlint - case nil: + if err == nil { log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: + } else if errors.Is(err, ErrVmcomputeUnexpectedExit) { log.G(ctx).Debug("unexpected system exit") computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) err = nil - default: + } else { err = makeSystemError(computeSystem, operation, err, nil) } computeSystem.closedWaitOnce.Do(func() { diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go index 82ca5baef..4b1e51cb7 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go @@ -47,7 +47,7 @@ func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMac func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Get" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("GET", "", "") } @@ -55,6 +55,6 @@ func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Delete" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("DELETE", "", "") } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index b505731c3..3afa240aa 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -22,9 +22,8 @@ import ( // of the job and a mutex for synchronized handle access. type JobObject struct { handle windows.Handle - // All accesses to this MUST be done atomically except in `Open` as the object - // is being created in the function. 1 signifies that this job is currently a silo. - silo uint32 + // silo signifies that this job is currently a silo. + silo atomic.Bool mq *queue.MessageQueue handleLock sync.RWMutex } @@ -204,9 +203,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { handle: jobHandle, } - if isJobSilo(jobHandle) { - job.silo = 1 - } + job.silo.Store(isJobSilo(jobHandle)) // If the IOCP we'll be using to receive messages for all jobs hasn't been // created, create it and start polling. @@ -479,7 +476,7 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error return ErrAlreadyClosed } - if !job.isSilo() { + if !job.silo.Load() { return ErrNotSilo } @@ -546,7 +543,7 @@ func (job *JobObject) PromoteToSilo() error { return ErrAlreadyClosed } - if job.isSilo() { + if job.silo.Load() { return nil } @@ -569,15 +566,10 @@ func (job *JobObject) PromoteToSilo() error { return fmt.Errorf("failed to promote job to silo: %w", err) } - atomic.StoreUint32(&job.silo, 1) + job.silo.Store(true) return nil } -// isSilo returns if the job object is a silo. -func (job *JobObject) isSilo() bool { - return atomic.LoadUint32(&job.silo) == 1 -} - // QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the // private working set for every process running in the job. func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index e3b1a1edc..fedf8add6 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -150,6 +150,7 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) } + // CodeQL [SM03681] checked against max value above (there is no math.MaxUintPtr ...) info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/context.go index d17d909d9..4399cec6f 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/context.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/context.go @@ -4,7 +4,6 @@ import ( "context" "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) type entryContextKeyType int @@ -20,13 +19,13 @@ var ( // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. L = logrus.NewEntry(logrus.StandardLogger()) - // G is an alias for GetEntry + // G is an alias for GetEntry. G = GetEntry - // S is an alias for SetEntry + // S is an alias for SetEntry. S = SetEntry - // U is an alias for UpdateContext + // U is an alias for UpdateContext. U = UpdateContext ) @@ -83,7 +82,7 @@ func UpdateContext(ctx context.Context) context.Context { // WithContext returns a context that contains the provided log entry. // The entry can be extracted with `GetEntry` (`G`) // -// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) +// The entry in the context is a copy of `entry` (generated by `entry.WithContext`). func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { // regardless of the order, entry.Context != GetEntry(ctx) // here, the returned entry will reference the supplied context @@ -93,25 +92,6 @@ func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *lo return ctx, entry } -// Copy extracts the tracing Span and logging entry from the src Context, if they -// exist, and adds them to the dst Context. -// -// This is useful to share tracing and logging between contexts, but not the -// cancellation. For example, if the src Context has been cancelled but cleanup -// operations triggered by the cancellation require a non-cancelled context to -// execute. -func Copy(dst context.Context, src context.Context) context.Context { - if s := trace.FromContext(src); s != nil { - dst = trace.NewContext(dst, s) - } - - if e := fromContext(src); e != nil { - dst, _ = WithContext(dst, e) - } - - return dst -} - func fromContext(ctx context.Context) *logrus.Entry { e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) return e diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 1ceb26bad..f26316fab 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -103,9 +103,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { - // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) - //nolint:errorlint // non-wrapping format verb for fmt.Errorf - return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) + return nil, fmt.Errorf("protojson encoding: %w; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index 5a960e0d3..5346f9b7c 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -22,23 +22,14 @@ var ( // case sensitive keywords, so "env" is not a substring on "Environment" _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} - _scrub int32 + _scrub atomic.Bool ) // SetScrubbing enables scrubbing -func SetScrubbing(enable bool) { - v := int32(0) // cant convert from bool to int32 directly - if enable { - v = 1 - } - atomic.StoreInt32(&_scrub, v) -} +func SetScrubbing(enable bool) { _scrub.Store(enable) } // IsScrubbingEnabled checks if scrubbing is enabled -func IsScrubbingEnabled() bool { - v := atomic.LoadInt32(&_scrub) - return v != 0 -} +func IsScrubbingEnabled() bool { return _scrub.Load() } // ScrubProcessParameters scrubs HCS Create Process requests with config parameters of // type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 67ca897cf..965086a58 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint + if ctx.Err() == gcontext.DeadlineExceeded { log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index fc12eeba4..627060cee 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -34,6 +34,7 @@ const ( UtilityVMPath = `UtilityVM` UtilityVMFilesPath = `UtilityVM\Files` RegFilesPath = `Files\Windows\System32\config` + BootDirRelativePath = `\EFI\Microsoft\Boot` BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD` BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi` ContainerBaseVhd = `blank-base.vhdx` diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index 21664577b..6c026d982 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -32,10 +32,16 @@ type CimFsFileMetadata struct { EACount uint32 } +type CimFsImagePath struct { + ImageDir *uint16 + ImageName *uint16 +} + //sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage? //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? +//sys CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage2? //sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? @@ -45,3 +51,8 @@ type CimFsFileMetadata struct { //sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath? //sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink? //sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream? +//sys CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimAddFsToMergedImage? +//sys CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) = cimfs.CimAddFsToMergedImage2? +//sys CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) = cimfs.CimMergeMountImage? +//sys CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimTombstoneFile? +//sys CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateMergeLink? diff --git a/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index ecdded312..2abdc2e07 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -53,6 +53,8 @@ var ( procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCimAddFsToMergedImage = modcimfs.NewProc("CimAddFsToMergedImage") + procCimAddFsToMergedImage2 = modcimfs.NewProc("CimAddFsToMergedImage2") procCimCloseImage = modcimfs.NewProc("CimCloseImage") procCimCloseStream = modcimfs.NewProc("CimCloseStream") procCimCommitImage = modcimfs.NewProc("CimCommitImage") @@ -60,9 +62,13 @@ var ( procCimCreateFile = modcimfs.NewProc("CimCreateFile") procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink") procCimCreateImage = modcimfs.NewProc("CimCreateImage") + procCimCreateImage2 = modcimfs.NewProc("CimCreateImage2") + procCimCreateMergeLink = modcimfs.NewProc("CimCreateMergeLink") procCimDeletePath = modcimfs.NewProc("CimDeletePath") procCimDismountImage = modcimfs.NewProc("CimDismountImage") + procCimMergeMountImage = modcimfs.NewProc("CimMergeMountImage") procCimMountImage = modcimfs.NewProc("CimMountImage") + procCimTombstoneFile = modcimfs.NewProc("CimTombstoneFile") procCimWriteStream = modcimfs.NewProc("CimWriteStream") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") @@ -181,6 +187,54 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } +func CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage(cimFSHandle, _p0) +} + +func _CimAddFsToMergedImage(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimAddFsToMergedImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage2(cimFSHandle, _p0, flags) +} + +func _CimAddFsToMergedImage2(cimFSHandle FsHandle, path *uint16, flags uint32) (hr error) { + hr = procCimAddFsToMergedImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage2.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(flags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimCloseImage(cimFSHandle FsHandle) (err error) { err = procCimCloseImage.Find() if err != nil { @@ -321,6 +375,59 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci return } +func CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + return _CimCreateImage2(_p0, flags, oldFSName, newFSName, cimFSHandle) +} + +func _CimCreateImage2(imagePath *uint16, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + hr = procCimCreateImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateImage2.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(flags), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(newPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(oldPath) + if hr != nil { + return + } + return _CimCreateMergeLink(cimFSHandle, _p0, _p1) +} + +func _CimCreateMergeLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) { + hr = procCimCreateMergeLink.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateMergeLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(path) @@ -360,6 +467,21 @@ func CimDismountImage(volumeID *g) (hr error) { return } +func CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) { + hr = procCimMergeMountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimMergeMountImage.Addr(), uintptr(numCimPaths), uintptr(unsafe.Pointer(backingImagePaths)), uintptr(flags), uintptr(unsafe.Pointer(volumeID))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(imagePath) @@ -389,6 +511,30 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g return } +func CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimTombstoneFile(cimFSHandle, _p0) +} + +func _CimTombstoneFile(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimTombstoneFile.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimTombstoneFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) { hr = procCimWriteStream.Find() if hr != nil { diff --git a/tools/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/tools/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go index f8d411ad7..a7860895c 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -3,7 +3,8 @@ package osversion // List of stable ABI compliant ltsc releases // Note: List must be sorted in ascending order var compatLTSCReleases = []uint16{ - V21H2Server, + LTSC2022, + LTSC2025, } // CheckHostAndContainerCompat checks if given host and container @@ -20,16 +21,25 @@ func CheckHostAndContainerCompat(host, ctr OSVersion) bool { } // If host is < WS 2022, exact version match is required - if host.Build < V21H2Server { + if host.Build < LTSC2022 { return host.Build == ctr.Build } - var supportedLtscRelease uint16 + // Find the latest LTSC version that is earlier than the host version. + // This is the earliest version of container that the host can run. + // + // If the host version is an LTSC, then it supports compatibility with + // everything from the previous LTSC up to itself, so we want supportedLTSCRelease + // to be the previous entry. + // + // If no match is found, then we know that the host is LTSC2022 exactly, + // since we already checked that it's not less than LTSC2022. + var supportedLTSCRelease uint16 = LTSC2022 for i := len(compatLTSCReleases) - 1; i >= 0; i-- { - if host.Build >= compatLTSCReleases[i] { - supportedLtscRelease = compatLTSCReleases[i] + if host.Build > compatLTSCReleases[i] { + supportedLTSCRelease = compatLTSCReleases[i] break } } - return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build + return supportedLTSCRelease <= ctr.Build && ctr.Build <= host.Build } diff --git a/tools/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/tools/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index 446369591..5392a4cea 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -81,4 +81,11 @@ const ( // V22H2Win11 corresponds to Windows 11 (2022 Update). V22H2Win11 = 22621 + + // V23H2 is the 23H2 release in the Windows Server annual channel. + V23H2 = 25398 + + // Windows Server 2025 build 26100 + V25H1Server = 26100 + LTSC2025 = V25H1Server ) diff --git a/tools/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go b/tools/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go index 4ebfbbc2f..17247f0c5 100644 --- a/tools/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go +++ b/tools/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go @@ -61,8 +61,7 @@ func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLay func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, root string) (int64, error) { t := tar.NewReader(r) - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err := t.Next() totalSize := int64(0) buf := bufio.NewWriter(nil) @@ -80,16 +79,14 @@ func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, if err != nil { return 0, err } - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err = t.Next() } else if hdr.Typeflag == tar.TypeLink { err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) if err != nil { return 0, err } - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err = t.Next() } else { var ( diff --git a/tools/vendor/github.com/VividCortex/ewma/.gitignore b/tools/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 000000000..c66769f6c --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.*.sw? +/coverage.txt \ No newline at end of file diff --git a/tools/vendor/github.com/VividCortex/ewma/.whitesource b/tools/vendor/github.com/VividCortex/ewma/.whitesource new file mode 100644 index 000000000..d7eebc0cf --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/.whitesource @@ -0,0 +1,3 @@ +{ + "settingsInheritedFrom": "VividCortex/whitesource-config@master" +} \ No newline at end of file diff --git a/tools/vendor/github.com/VividCortex/ewma/LICENSE b/tools/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 000000000..a78d643ed --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/tools/vendor/github.com/VividCortex/ewma/README.md b/tools/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 000000000..87b4a3c7e --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,145 @@ +# EWMA + +[![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) +![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) +[![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main + +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/tools/vendor/github.com/VividCortex/ewma/codecov.yml b/tools/vendor/github.com/VividCortex/ewma/codecov.yml new file mode 100644 index 000000000..0d36d903f --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + threshold: 15% + patch: off diff --git a/tools/vendor/github.com/VividCortex/ewma/ewma.go b/tools/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 000000000..44d5d53e3 --- /dev/null +++ b/tools/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/tools/vendor/github.com/acarl005/stripansi/LICENSE b/tools/vendor/github.com/acarl005/stripansi/LICENSE new file mode 100644 index 000000000..00abe0dbf --- /dev/null +++ b/tools/vendor/github.com/acarl005/stripansi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Andrew Carlson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tools/vendor/github.com/acarl005/stripansi/README.md b/tools/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 000000000..8bdb1f505 --- /dev/null +++ b/tools/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/tools/vendor/github.com/acarl005/stripansi/stripansi.go b/tools/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 000000000..235732a78 --- /dev/null +++ b/tools/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go index 3bb4fd7c4..48bd362bf 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -17,9 +17,9 @@ ANTLR4 that it is compatible with (I.E. uses the /v4 path). However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not -list the release tag such as @4.12.0 - this was confusing, to say the least. +list the release tag such as @4.13.1 - this was confusing, to say the least. -As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` (the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. @@ -49,7 +49,7 @@ Here is a general/recommended template for an ANTLR based recognizer in Go: . ├── parser │ ├── mygrammar.g4 - │ ├── antlr-4.12.1-complete.jar + │ ├── antlr-4.13.1-complete.jar │ ├── generate.go │ └── generate.sh ├── parsing - generated code goes here @@ -71,7 +71,7 @@ And the generate.sh file will look similar to this: #!/bin/sh - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go b/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go index cdeefed24..e749ebd0c 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -4,8 +4,6 @@ package antlr -import "sync" - // ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or // which is invalid for a particular struct such as [*antlr.BaseRuleContext] var ATNInvalidAltNumber int @@ -56,9 +54,9 @@ type ATN struct { // states []ATNState - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex + mu Mutex + stateMu RWMutex + edgeMu RWMutex } // NewATN returns a new ATN struct representing the given grammarType and is used diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go index a83f25d34..267308bb3 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -73,9 +73,6 @@ func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *AT // NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' // are just wrappers around this one. func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed - } b := &ATNConfig{} b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) b.cType = parserConfig diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go index b737fe85f..ab4e96be5 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -148,7 +148,7 @@ func (is *InputStream) GetTextFromInterval(i Interval) string { } func (*InputStream) GetSourceName() string { - return "" + return "Obtained from string" } // String returns the entire input stream as a string diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go index ceccd96d2..6d668f798 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -8,7 +8,6 @@ import ( "container/list" "runtime/debug" "sort" - "sync" ) // Collectable is an interface that a struct should implement if it is to be @@ -587,12 +586,12 @@ type VisitRecord struct { type VisitList struct { cache *list.List - lock sync.RWMutex + lock RWMutex } var visitListPool = VisitList{ cache: list.New(), - lock: sync.RWMutex{}, + lock: RWMutex{}, } // NewVisitRecord returns a new VisitRecord instance from the pool if available. diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go index 3c7896a91..e5594b216 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -207,7 +207,7 @@ func (b *BaseLexer) NextToken() Token { for { b.thetype = TokenInvalidType - ttype := b.safeMatch() + ttype := b.safeMatch() // Defaults to LexerSkip if b.input.LA(1) == TokenEOF { b.hitEOF = true diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go index 4955ac876..dfdff000b 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -40,6 +40,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() + // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/tools/vendor/github.com/antlr4-go/antlr/v4/mutex.go new file mode 100644 index 000000000..2b0cda474 --- /dev/null +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/mutex.go @@ -0,0 +1,41 @@ +//go:build !antlr.nomutex +// +build !antlr.nomutex + +package antlr + +import "sync" + +// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it +// is used to provide a mutex implementation for the antlr package, which users +// can turn off with the build tag -tags antlr.nomutex +type Mutex struct { + mu sync.Mutex +} + +func (m *Mutex) Lock() { + m.mu.Lock() +} + +func (m *Mutex) Unlock() { + m.mu.Unlock() +} + +type RWMutex struct { + mu sync.RWMutex +} + +func (m *RWMutex) Lock() { + m.mu.Lock() +} + +func (m *RWMutex) Unlock() { + m.mu.Unlock() +} + +func (m *RWMutex) RLock() { + m.mu.RLock() +} + +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() +} diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/tools/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go new file mode 100644 index 000000000..35ce4353e --- /dev/null +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go @@ -0,0 +1,32 @@ +//go:build antlr.nomutex +// +build antlr.nomutex + +package antlr + +type Mutex struct{} + +func (m *Mutex) Lock() { + // No-op +} + +func (m *Mutex) Unlock() { + // No-op +} + +type RWMutex struct{} + +func (m *RWMutex) Lock() { + // No-op +} + +func (m *RWMutex) Unlock() { + // No-op +} + +func (m *RWMutex) RLock() { + // No-op +} + +func (m *RWMutex) RUnlock() { + // No-op +} diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go index ae2869692..724fa17a1 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -10,8 +10,6 @@ import ( "strings" ) -var () - // ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over // a standard JStore so that we can use Lazy instantiation of the JStore, mostly // to avoid polluting the stats module with a ton of JStore instances with nothing in them. @@ -883,7 +881,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // the ERROR state was reached, outerContext as the initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// Teh func returns the value to return from [AdaptivePredict], or +// The func returns the value to return from [AdaptivePredict], or // [ATNInvalidAltNumber] if a suitable alternative was not // identified and [AdaptivePredict] should report an error instead. func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go index c1b80cc1f..a1d5186b8 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -6,7 +6,6 @@ package antlr import ( "fmt" - "golang.org/x/exp/slices" "strconv" ) @@ -101,7 +100,7 @@ func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) hash = murmurUpdate(hash, returnState) } hash = murmurFinish(hash, len(parents)<<1) - + nec := &PredictionContext{} nec.cachedHash = hash nec.pcType = PredictionContextArray @@ -115,6 +114,9 @@ func (p *PredictionContext) Hash() int { } func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + if p == other { + return true + } switch p.pcType { case PredictionContextEmpty: otherP := other.(*PredictionContext) @@ -138,13 +140,11 @@ func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool if p.cachedHash != other.Hash() { return false // can't be same if hash is different } - + // Must compare the actual array elements and not just the array address // - return slices.Equal(p.returnStates, other.returnStates) && - slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { - return x.Equals(y) - }) + return intSlicesEqual(p.returnStates, other.returnStates) && + pcSliceEqual(p.parents, other.parents) } func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { @@ -152,23 +152,23 @@ func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext return false } otherP := other.(*PredictionContext) - if otherP == nil { + if otherP == nil || otherP.pcType != PredictionContextSingleton { return false } - + if p.cachedHash != otherP.Hash() { return false // Can't be same if hash is different } - + if p.returnState != otherP.getReturnState(0) { return false } - + // Both parents must be nil if one is if p.parentCtx == nil { return otherP.parentCtx == nil } - + return p.parentCtx.Equals(otherP.parentCtx) } @@ -225,27 +225,27 @@ func (p *PredictionContext) String() string { return "$" case PredictionContextSingleton: var up string - + if p.parentCtx == nil { up = "" } else { up = p.parentCtx.String() } - + if len(up) == 0 { if p.returnState == BasePredictionContextEmptyReturnState { return "$" } - + return strconv.Itoa(p.returnState) } - + return strconv.Itoa(p.returnState) + " " + up case PredictionContextArray: if p.isEmpty() { return "[]" } - + s := "[" for i := 0; i < len(p.returnStates); i++ { if i > 0 { @@ -263,7 +263,7 @@ func (p *PredictionContext) String() string { } } return s + "]" - + default: return "unknown" } @@ -309,18 +309,18 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *Predict parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) state := a.states[outerContext.GetInvokingState()] transition := state.GetTransitions()[0] - + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) } func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { - + // Share same graph if both same // if a == b || a.Equals(b) { return a } - + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { return mergeSingletons(a, b, rootIsWildcard, mergeCache) } @@ -334,7 +334,7 @@ func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *Pr return b } } - + // Convert either Singleton or Empty to arrays, so that we can merge them // ara := convertToArray(a) @@ -395,7 +395,7 @@ func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *J return previous } } - + rootMerge := mergeRoot(a, b, rootIsWildcard) if rootMerge != nil { if mergeCache != nil { @@ -564,7 +564,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa i := 0 // walks a j := 0 // walks b k := 0 // walks target M array - + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) // walk and merge to yield mergedParents, mergedReturnStates @@ -626,9 +626,9 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa mergedParents = mergedParents[0:k] mergedReturnStates = mergedReturnStates[0:k] } - + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - + // if we created same array as a or b, return that instead // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation if M.Equals(a) { @@ -650,7 +650,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa return b } combineCommonParents(&mergedParents) - + if mergeCache != nil { mergeCache.Put(a, b, M) } @@ -666,7 +666,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa //goland:noinspection GoUnusedFunction func combineCommonParents(parents *[]*PredictionContext) { uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") - + for p := 0; p < len(*parents); p++ { parent := (*parents)[p] _, _ = uniqueParents.Put(parent) @@ -685,7 +685,7 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr if present { return existing } - + existing, present = contextCache.Get(context) if present { visited.Put(context, existing) @@ -722,6 +722,6 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr contextCache.add(updated) visited.Put(updated, updated) visited.Put(context, updated) - + return updated } diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go index 2e0b504fb..dcb8548cd 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -56,7 +56,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" + runtimeVersion := "4.13.1" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go index 70c0673a0..8cb5f3ed6 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sort" "strconv" - "sync" ) // This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default @@ -30,7 +29,7 @@ type goRunStats struct { // within this package. // jStats []*JStatRec - jStatsLock sync.RWMutex + jStatsLock RWMutex topN int topNByMax []*JStatRec topNByUsed []*JStatRec diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/token.go b/tools/vendor/github.com/antlr4-go/antlr/v4/token.go index 9670efb82..f5bc34229 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/token.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -104,6 +104,25 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } +func (b *BaseToken) GetText() string { + if b.text != "" { + return b.text + } + input := b.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if b.GetStart() < n && b.GetStop() < n { + return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) + } + return "" +} + +func (b *BaseToken) SetText(text string) { + b.text = text +} + func (b *BaseToken) GetTokenIndex() int { return b.tokenIndex } @@ -120,6 +139,28 @@ func (b *BaseToken) GetInputStream() CharStream { return b.source.charStream } +func (b *BaseToken) String() string { + txt := b.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if b.GetChannel() > 0 { + ch = ",channel=" + strconv.Itoa(b.GetChannel()) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" + + txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" + + ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]" +} + type CommonToken struct { BaseToken } @@ -170,44 +211,3 @@ func (c *CommonToken) clone() *CommonToken { t.text = c.GetText() return t } - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go b/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go index 733d7df9d..36a37f247 100644 --- a/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go +++ b/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -326,3 +326,56 @@ func isDirectory(dir string) (bool, error) { } return fileInfo.IsDir(), err } + +// intSlicesEqual returns true if the two slices of ints are equal, and is a little +// faster than slices.Equal. +func intSlicesEqual(s1, s2 []int) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if v != s2[i] { + return false + } + } + return true +} + +func pcSliceEqual(s1, s2 []*PredictionContext) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if !v.Equals(s2[i]) { + return false + } + } + return true +} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/context.go b/tools/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330e..000000000 --- a/tools/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go b/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index aac99f196..000000000 --- a/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,216 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. -type ExponentialBackOffOpts func(*ExponentialBackOff) - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - for _, fn := range opts { - fn(b) - } - b.Reset() - return b -} - -// WithInitialInterval sets the initial interval between retries. -func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.InitialInterval = duration - } -} - -// WithRandomizationFactor sets the randomization factor to add jitter to intervals. -func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.RandomizationFactor = randomizationFactor - } -} - -// WithMultiplier sets the multiplier for increasing the interval after each retry. -func WithMultiplier(multiplier float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Multiplier = multiplier - } -} - -// WithMaxInterval sets the maximum interval between retries. -func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxInterval = duration - } -} - -// WithMaxElapsedTime sets the maximum total time for retries. -func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxElapsedTime = duration - } -} - -// WithRetryStopDuration sets the duration after which retries should stop. -func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Stop = duration - } -} - -// WithClockProvider sets the clock used to measure time. -func WithClockProvider(clock Clock) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Clock = clock - } -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/retry.go b/tools/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51cd..000000000 --- a/tools/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/tries.go b/tools/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37..000000000 --- a/tools/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore b/tools/vendor/github.com/cenkalti/backoff/v5/.gitignore similarity index 100% rename from tools/vendor/github.com/cenkalti/backoff/v4/.gitignore rename to tools/vendor/github.com/cenkalti/backoff/v5/.gitignore diff --git a/tools/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/tools/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 000000000..658c37436 --- /dev/null +++ b/tools/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE b/tools/vendor/github.com/cenkalti/backoff/v5/LICENSE similarity index 100% rename from tools/vendor/github.com/cenkalti/backoff/v4/LICENSE rename to tools/vendor/github.com/cenkalti/backoff/v5/LICENSE diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/README.md b/tools/vendor/github.com/cenkalti/backoff/v5/README.md similarity index 64% rename from tools/vendor/github.com/cenkalti/backoff/v4/README.md rename to tools/vendor/github.com/cenkalti/backoff/v5/README.md index 9433004a2..4611b1d17 100644 --- a/tools/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/tools/vendor/github.com/cenkalti/backoff/v5/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. ## Contributing @@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go b/tools/vendor/github.com/cenkalti/backoff/v5/backoff.go similarity index 87% rename from tools/vendor/github.com/cenkalti/backoff/v4/backoff.go rename to tools/vendor/github.com/cenkalti/backoff/v5/backoff.go index 3676ee405..dd2b24ca7 100644 --- a/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ b/tools/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -15,16 +15,16 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. + // backoff.Stop to indicate that no more retries should be made. // // Example usage: // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } // NextBackOff() time.Duration diff --git a/tools/vendor/github.com/cenkalti/backoff/v5/error.go b/tools/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 000000000..beb2b38a2 --- /dev/null +++ b/tools/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/tools/vendor/github.com/cenkalti/backoff/v5/exponential.go b/tools/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 000000000..c1f3e442d --- /dev/null +++ b/tools/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,125 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/tools/vendor/github.com/cenkalti/backoff/v5/retry.go b/tools/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 000000000..e43f47fb8 --- /dev/null +++ b/tools/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go b/tools/vendor/github.com/cenkalti/backoff/v5/ticker.go similarity index 80% rename from tools/vendor/github.com/cenkalti/backoff/v4/ticker.go rename to tools/vendor/github.com/cenkalti/backoff/v5/ticker.go index df9d68bce..f0d4b2ae7 100644 --- a/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ b/tools/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -1,7 +1,6 @@ package backoff import ( - "context" "sync" "time" ) @@ -14,8 +13,7 @@ type Ticker struct { C <-chan time.Time c chan time.Time b BackOff - ctx context.Context - timer Timer + timer timer stop chan struct{} stopOnce sync.Once } @@ -27,22 +25,12 @@ type Ticker struct { // provided backoff policy (notably calling NextBackOff or Reset) // while the ticker is running. func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, - ctx: getContext(b), - timer: timer, + timer: &defaultTimer{}, stop: make(chan struct{}), } t.b.Reset() @@ -73,8 +61,6 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.ctx.Done(): - return } } } diff --git a/tools/vendor/github.com/cenkalti/backoff/v4/timer.go b/tools/vendor/github.com/cenkalti/backoff/v5/timer.go similarity index 96% rename from tools/vendor/github.com/cenkalti/backoff/v4/timer.go rename to tools/vendor/github.com/cenkalti/backoff/v5/timer.go index 8120d0213..a89530974 100644 --- a/tools/vendor/github.com/cenkalti/backoff/v4/timer.go +++ b/tools/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -2,7 +2,7 @@ package backoff import "time" -type Timer interface { +type timer interface { Start(duration time.Duration) Stop() C() <-chan time.Time diff --git a/tools/vendor/github.com/containerd/containerd/api/events/content.pb.go b/tools/vendor/github.com/containerd/containerd/api/events/content.pb.go index 8f183f60c..fdadd7266 100644 --- a/tools/vendor/github.com/containerd/containerd/api/events/content.pb.go +++ b/tools/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -36,6 +36,61 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ContentCreate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ContentCreate) Reset() { + *x = ContentCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentCreate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentCreate) ProtoMessage() {} + +func (x *ContentCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentCreate.ProtoReflect.Descriptor instead. +func (*ContentCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} +} + +func (x *ContentCreate) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *ContentCreate) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + type ContentDelete struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -47,7 +102,7 @@ type ContentDelete struct { func (x *ContentDelete) Reset() { *x = ContentDelete{} if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -60,7 +115,7 @@ func (x *ContentDelete) String() string { func (*ContentDelete) ProtoMessage() {} func (x *ContentDelete) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -73,7 +128,7 @@ func (x *ContentDelete) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentDelete.ProtoReflect.Descriptor instead. func (*ContentDelete) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{1} } func (x *ContentDelete) GetDigest() string { @@ -94,14 +149,18 @@ var file_github_com_containerd_containerd_api_events_content_proto_rawDesc = []b 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3b, 0x0a, 0x0d, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -116,9 +175,10 @@ func file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP( return file_github_com_containerd_containerd_api_events_content_proto_rawDescData } -var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_github_com_containerd_containerd_api_events_content_proto_goTypes = []interface{}{ - (*ContentDelete)(nil), // 0: containerd.events.ContentDelete + (*ContentCreate)(nil), // 0: containerd.events.ContentCreate + (*ContentDelete)(nil), // 1: containerd.events.ContentDelete } var file_github_com_containerd_containerd_api_events_content_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -135,6 +195,18 @@ func file_github_com_containerd_containerd_api_events_content_proto_init() { } if !protoimpl.UnsafeEnabled { file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ContentDelete); i { case 0: return &v.state @@ -153,7 +225,7 @@ func file_github_com_containerd_containerd_api_events_content_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_containerd_containerd_api_events_content_proto_rawDesc, NumEnums: 0, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/tools/vendor/github.com/containerd/containerd/api/events/content.proto b/tools/vendor/github.com/containerd/containerd/api/events/content.proto index 6b023d6b6..58bd9155e 100644 --- a/tools/vendor/github.com/containerd/containerd/api/events/content.proto +++ b/tools/vendor/github.com/containerd/containerd/api/events/content.proto @@ -23,6 +23,11 @@ import "github.com/containerd/containerd/api/types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; +message ContentCreate { + string digest = 1; + int64 size = 2; +} + message ContentDelete { string digest = 1; } diff --git a/tools/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go b/tools/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go index 9485b664c..d8d717884 100644 --- a/tools/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go +++ b/tools/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go @@ -2,6 +2,20 @@ // source: github.com/containerd/containerd/api/events/content.proto package events +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContentCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: size + case "digest": + return string(m.Digest), len(m.Digest) > 0 + } + return "", false +} + // Field returns the value for the given fieldpath as a string, if defined. // If the value is not defined, the second value will be false. func (m *ContentDelete) Field(fieldpath []string) (string, bool) { diff --git a/tools/vendor/github.com/containerd/containerd/archive/compression/compression.go b/tools/vendor/github.com/containerd/containerd/archive/compression/compression.go index 23ddfab1a..3c152f281 100644 --- a/tools/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/tools/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -45,6 +45,8 @@ const ( Gzip // Zstd is zstd compression algorithm. Zstd + // Unknown is used when a plugin handles the algorithm. + Unknown ) const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" @@ -254,6 +256,8 @@ func (compression *Compression) Extension() string { return "gz" case Zstd: return "zst" + case Unknown: + return "unknown" } return "" } diff --git a/tools/vendor/github.com/containerd/containerd/log/context_deprecated.go b/tools/vendor/github.com/containerd/containerd/log/context_deprecated.go deleted file mode 100644 index 9e9e8b491..000000000 --- a/tools/vendor/github.com/containerd/containerd/log/context_deprecated.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/containerd/log" -) - -// G is a shorthand for [GetLogger]. -// -// Deprecated: use [log.G]. -var G = log.G - -// L is an alias for the standard logger. -// -// Deprecated: use [log.L]. -var L = log.L - -// Fields type to pass to "WithFields". -// -// Deprecated: use [log.Fields]. -type Fields = log.Fields - -// Entry is a logging entry. -// -// Deprecated: use [log.Entry]. -type Entry = log.Entry - -// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using -// zeros to ensure the formatted time is always the same number of -// characters. -// -// Deprecated: use [log.RFC3339NanoFixed]. -const RFC3339NanoFixed = log.RFC3339NanoFixed - -// Level is a logging level. -// -// Deprecated: use [log.Level]. -type Level = log.Level - -// Supported log levels. -const ( - // TraceLevel level. - // - // Deprecated: use [log.TraceLevel]. - TraceLevel Level = log.TraceLevel - - // DebugLevel level. - // - // Deprecated: use [log.DebugLevel]. - DebugLevel Level = log.DebugLevel - - // InfoLevel level. - // - // Deprecated: use [log.InfoLevel]. - InfoLevel Level = log.InfoLevel - - // WarnLevel level. - // - // Deprecated: use [log.WarnLevel]. - WarnLevel Level = log.WarnLevel - - // ErrorLevel level - // - // Deprecated: use [log.ErrorLevel]. - ErrorLevel Level = log.ErrorLevel - - // FatalLevel level. - // - // Deprecated: use [log.FatalLevel]. - FatalLevel Level = log.FatalLevel - - // PanicLevel level. - // - // Deprecated: use [log.PanicLevel]. - PanicLevel Level = log.PanicLevel -) - -// SetLevel sets log level globally. It returns an error if the given -// level is not supported. -// -// Deprecated: use [log.SetLevel]. -func SetLevel(level string) error { - return log.SetLevel(level) -} - -// GetLevel returns the current log level. -// -// Deprecated: use [log.GetLevel]. -func GetLevel() log.Level { - return log.GetLevel() -} - -// OutputFormat specifies a log output format. -// -// Deprecated: use [log.OutputFormat]. -type OutputFormat = log.OutputFormat - -// Supported log output formats. -const ( - // TextFormat represents the text logging format. - // - // Deprecated: use [log.TextFormat]. - TextFormat log.OutputFormat = "text" - - // JSONFormat represents the JSON logging format. - // - // Deprecated: use [log.JSONFormat]. - JSONFormat log.OutputFormat = "json" -) - -// SetFormat sets the log output format. -// -// Deprecated: use [log.SetFormat]. -func SetFormat(format OutputFormat) error { - return log.SetFormat(format) -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -// -// Deprecated: use [log.WithLogger]. -func WithLogger(ctx context.Context, logger *log.Entry) context.Context { - return log.WithLogger(ctx, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -// -// Deprecated: use [log.GetLogger]. -func GetLogger(ctx context.Context) *log.Entry { - return log.GetLogger(ctx) -} diff --git a/tools/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go b/tools/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go index 124e8edb5..1e4e06c20 100644 --- a/tools/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go +++ b/tools/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go @@ -37,12 +37,11 @@ func SourceDateEpoch() (*time.Time, error) { if !ok || v == "" { return nil, nil // not an error } - i64, err := strconv.ParseInt(v, 10, 64) + t, err := ParseSourceDateEpoch(v) if err != nil { - return nil, fmt.Errorf("invalid %s value %q: %w", SourceDateEpochEnv, v, err) + return nil, fmt.Errorf("invalid %s value: %w", SourceDateEpochEnv, err) } - unix := time.Unix(i64, 0).UTC() - return &unix, nil + return t, nil } // SourceDateEpochOrNow returns the SOURCE_DATE_EPOCH time if available, @@ -58,12 +57,26 @@ func SourceDateEpochOrNow() time.Time { return time.Now().UTC() } +// ParseSourceDateEpoch parses the given source date epoch, as *time.Time. +// It returns an error if sourceDateEpoch is empty or not well-formatted. +func ParseSourceDateEpoch(sourceDateEpoch string) (*time.Time, error) { + if sourceDateEpoch == "" { + return nil, fmt.Errorf("value is empty") + } + i64, err := strconv.ParseInt(sourceDateEpoch, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid value: %w", err) + } + unix := time.Unix(i64, 0).UTC() + return &unix, nil +} + // SetSourceDateEpoch sets the SOURCE_DATE_EPOCH env var. func SetSourceDateEpoch(tm time.Time) { - os.Setenv(SourceDateEpochEnv, fmt.Sprintf("%d", tm.Unix())) + _ = os.Setenv(SourceDateEpochEnv, strconv.Itoa(int(tm.Unix()))) } // UnsetSourceDateEpoch unsets the SOURCE_DATE_EPOCH env var. func UnsetSourceDateEpoch() { - os.Unsetenv(SourceDateEpochEnv) + _ = os.Unsetenv(SourceDateEpochEnv) } diff --git a/tools/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/tools/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go index 244e03509..c9c224b2a 100644 --- a/tools/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ b/tools/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -86,11 +86,11 @@ type TokenOptions struct { // OAuthTokenResponse is response from fetching token with a OAuth POST request type OAuthTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } // FetchTokenWithOAuth fetches a token using a POST request @@ -152,11 +152,11 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. // FetchTokenResponse is response from fetching token with GET request type FetchTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` } // FetchToken fetches a token using a GET request diff --git a/tools/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/tools/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 2bf388e8c..6aabe95a4 100644 --- a/tools/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/tools/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -24,6 +24,7 @@ import ( "net/http" "strings" "sync" + "time" "github.com/containerd/log" @@ -206,9 +207,10 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R // authResult is used to control limit rate. type authResult struct { sync.WaitGroup - token string - refreshToken string - err error + token string + refreshToken string + expirationTime *time.Time + err error } // authHandler is used to handle auth request per registry server. @@ -271,8 +273,12 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st // Docs: https://docs.docker.com/registry/spec/auth/scope scoped := strings.Join(to.Scopes, " ") + // Keep track of the expiration time of cached bearer tokens so they can be + // refreshed when they expire without a server roundtrip. + var expirationTime *time.Time + ah.Lock() - if r, exist := ah.scopedTokens[scoped]; exist { + if r, exist := ah.scopedTokens[scoped]; exist && (r.expirationTime == nil || r.expirationTime.After(time.Now())) { ah.Unlock() r.Wait() return r.token, r.refreshToken, r.err @@ -286,7 +292,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st defer func() { token = fmt.Sprintf("Bearer %s", token) - r.token, r.refreshToken, r.err = token, refreshToken, err + r.token, r.refreshToken, r.err, r.expirationTime = token, refreshToken, err, expirationTime r.Done() }() @@ -312,6 +318,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st if err != nil { return "", "", err } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.Token, resp.RefreshToken, nil } log.G(ctx).WithFields(log.Fields{ @@ -321,6 +328,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st } return "", "", err } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.AccessToken, resp.RefreshToken, nil } // do request anonymously @@ -328,9 +336,18 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st if err != nil { return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.Token, resp.RefreshToken, nil } +func getExpirationTime(expiresInSeconds int) *time.Time { + if expiresInSeconds <= 0 { + return nil + } + expirationTime := time.Now().Add(time.Duration(expiresInSeconds) * time.Second) + return &expirationTime +} + func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) { errStr := c.Parameters["error"] if errStr == "" { diff --git a/tools/vendor/github.com/containerd/containerd/version/version.go b/tools/vendor/github.com/containerd/containerd/version/version.go index e806164ca..907553466 100644 --- a/tools/vendor/github.com/containerd/containerd/version/version.go +++ b/tools/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.27+unknown" + Version = "1.7.29+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/tools/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go b/tools/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go index 7a34eda3c..8192465cd 100644 --- a/tools/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go +++ b/tools/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go @@ -19,10 +19,36 @@ package fs import ( + "fmt" + "io/fs" "syscall" "time" ) +func Atime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Atimespec.Unix()), nil +} + +func Ctime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Ctimespec.Unix()), nil +} + +func Mtime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Mtimespec.Unix()), nil +} + // StatAtime returns the access time from a stat struct func StatAtime(st *syscall.Stat_t) syscall.Timespec { return st.Atimespec diff --git a/tools/vendor/github.com/containerd/continuity/fs/stat_unix.go b/tools/vendor/github.com/containerd/continuity/fs/stat_unix.go index 0edebdf1d..503d24eec 100644 --- a/tools/vendor/github.com/containerd/continuity/fs/stat_unix.go +++ b/tools/vendor/github.com/containerd/continuity/fs/stat_unix.go @@ -30,7 +30,7 @@ func Atime(st fs.FileInfo) (time.Time, error) { if !ok { return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) } - return StatATimeAsTime(stSys), nil + return time.Unix(stSys.Atim.Unix()), nil } func Ctime(st fs.FileInfo) (time.Time, error) { @@ -38,7 +38,7 @@ func Ctime(st fs.FileInfo) (time.Time, error) { if !ok { return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) } - return time.Unix(stSys.Atim.Unix()), nil + return time.Unix(stSys.Ctim.Unix()), nil } func Mtime(st fs.FileInfo) (time.Time, error) { diff --git a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 6aba0ef1f..8b804b7dd 100644 --- a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -408,11 +408,11 @@ func readerFromEntries(entries ...*entry) io.Reader { defer tw.Close() for _, entry := range entries { if err := tw.WriteHeader(entry.header); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err)) return } if _, err := io.Copy(tw, entry.payload); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err)) return } } @@ -627,12 +627,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { default: - return 0, fmt.Errorf("Unknown whence: %v", whence) + return 0, fmt.Errorf("unknown whence: %v", whence) case io.SeekStart: case io.SeekCurrent: offset += *cr.cPos case io.SeekEnd: - return 0, fmt.Errorf("Unsupported whence: %v", whence) + return 0, fmt.Errorf("unsupported whence: %v", whence) } if offset < 0 { diff --git a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index f24afe32f..88fa13b19 100644 --- a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte { header[0], header[1] = 'S', 'G' subfield := fmt.Sprintf("%016xSTARGZ", tocOff) binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 - gz.Header.Extra = append(header, []byte(subfield)...) + gz.Extra = append(header, []byte(subfield)...) gz.Close() if buf.Len() != FooterSize { panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) @@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t return 0, 0, 0, err } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] if si1 != 'S' || si2 != 'G' { return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) @@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra if len(extra) != 16+len("STARGZ") { return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") } diff --git a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ba650b4d1..a8dcdb868 100644 --- a/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/tools/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -357,14 +357,15 @@ func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.S buf := new(bytes.Buffer) var w io.WriteCloser var err error - if srcCompression == gzipType { + switch srcCompression { + case gzipType: w = gzip.NewWriter(buf) - } else if srcCompression == zstdType { + case zstdType: w, err = zstd.NewWriter(buf) if err != nil { t.Fatalf("failed to init zstd writer: %v", err) } - } else { + default: return src } src.Seek(0, io.SeekStart) @@ -445,7 +446,7 @@ func contains(t *testing.T, a, b stargzEntry) bool { bbytes, bnext, bok := readOffset(t, bf, nr, b) if !aok && !bok { break - } else if !(aok && bok) || anext != bnext { + } else if !aok || !bok || anext != bnext { t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", ae.Name, be.Name, nr, aok, bok, anext, bnext) return false @@ -2346,8 +2347,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { t.Fatalf("countStreams(gzip), Copy: %v", err) } var extra string - if len(zr.Header.Extra) > 0 { - extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + if len(zr.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Extra) } t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) delete(wants, int64(zoff)) diff --git a/tools/vendor/github.com/containerd/typeurl/v2/README.md b/tools/vendor/github.com/containerd/typeurl/v2/README.md index 8d86600a4..3098526ab 100644 --- a/tools/vendor/github.com/containerd/typeurl/v2/README.md +++ b/tools/vendor/github.com/containerd/typeurl/v2/README.md @@ -18,3 +18,9 @@ As a containerd sub-project, you will find the: * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. + +## Optional + +By default, support for gogoproto is available along side the standard Google +protobuf types. +You can choose to leave gogo support out by using the `!no_gogo` build tag. diff --git a/tools/vendor/github.com/containerd/typeurl/v2/types.go b/tools/vendor/github.com/containerd/typeurl/v2/types.go index 78817b701..9bf781041 100644 --- a/tools/vendor/github.com/containerd/typeurl/v2/types.go +++ b/tools/vendor/github.com/containerd/typeurl/v2/types.go @@ -24,7 +24,6 @@ import ( "reflect" "sync" - gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/known/anypb" @@ -33,8 +32,16 @@ import ( var ( mu sync.RWMutex registry = make(map[reflect.Type]string) + handlers []handler ) +type handler interface { + Marshaller(interface{}) func() ([]byte, error) + Unmarshaller(interface{}) func([]byte) error + TypeURL(interface{}) string + GetType(url string) (reflect.Type, bool) +} + // Definitions of common error types used throughout typeurl. // // These error types are used with errors.Wrap and errors.Wrapf to add context @@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) { switch t := v.(type) { case proto.Message: return string(t.ProtoReflect().Descriptor().FullName()), nil - case gogoproto.Message: - return gogoproto.MessageName(t), nil default: + for _, h := range handlers { + if u := h.TypeURL(v); u != "" { + return u, nil + } + } return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound) } } @@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) { marshal = func(v interface{}) ([]byte, error) { return proto.Marshal(t) } - case gogoproto.Message: - marshal = func(v interface{}) ([]byte, error) { - return gogoproto.Marshal(t) - } default: - marshal = json.Marshal + for _, h := range handlers { + if m := h.Marshaller(v); m != nil { + marshal = func(v interface{}) ([]byte, error) { + return m() + } + break + } + } + + if marshal == nil { + marshal = json.Marshal + } } url, err := TypeURL(v) @@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { } func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) + t, isProto, err := getTypeByUrl(typeURL) if err != nil { return nil, err } if v == nil { - v = reflect.New(t.t).Interface() + v = reflect.New(t).Interface() } else { // Validate interface type provided by client vURL, err := TypeURL(v) @@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) } } - if t.isProto { - switch t := v.(type) { - case proto.Message: - err = proto.Unmarshal(value, t) - case gogoproto.Message: - err = gogoproto.Unmarshal(value, t) + if isProto { + pm, ok := v.(proto.Message) + if ok { + return v, proto.Unmarshal(value, pm) } - } else { - err = json.Unmarshal(value, v) - } - return v, err -} + for _, h := range handlers { + if unmarshal := h.Unmarshaller(v); unmarshal != nil { + return v, unmarshal(value) + } + } + } -type urlType struct { - t reflect.Type - isProto bool + // fallback to json unmarshaller + return v, json.Unmarshal(value, v) } -func getTypeByUrl(url string) (urlType, error) { +func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) { mu.RLock() for t, u := range registry { if u == url { mu.RUnlock() - return urlType{ - t: t, - }, nil + return t, false, nil } } mu.RUnlock() - // fallback to proto registry - t := gogoproto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) if err != nil { - return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound) + if errors.Is(err, protoregistry.NotFound) { + for _, h := range handlers { + if t, isProto := h.GetType(url); t != nil { + return t, isProto, nil + } + } + } + return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound) } empty := mt.New().Interface() - return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil + return reflect.TypeOf(empty).Elem(), true, nil } func tryDereference(v interface{}) reflect.Type { diff --git a/tools/vendor/github.com/containerd/typeurl/v2/types_gogo.go b/tools/vendor/github.com/containerd/typeurl/v2/types_gogo.go new file mode 100644 index 000000000..adb892ec6 --- /dev/null +++ b/tools/vendor/github.com/containerd/typeurl/v2/types_gogo.go @@ -0,0 +1,68 @@ +//go:build !no_gogo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +import ( + "reflect" + + gogoproto "github.com/gogo/protobuf/proto" +) + +func init() { + handlers = append(handlers, gogoHandler{}) +} + +type gogoHandler struct{} + +func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + return func() ([]byte, error) { + return gogoproto.Marshal(pm) + } +} + +func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + + return func(dt []byte) error { + return gogoproto.Unmarshal(dt, pm) + } +} + +func (gogoHandler) TypeURL(v interface{}) string { + pm, ok := v.(gogoproto.Message) + if !ok { + return "" + } + return gogoproto.MessageName(pm) +} + +func (gogoHandler) GetType(url string) (reflect.Type, bool) { + t := gogoproto.MessageType(url) + if t == nil { + return nil, false + } + return t.Elem(), true +} diff --git a/tools/vendor/github.com/containers/ocicrypt/.gitignore b/tools/vendor/github.com/containers/ocicrypt/.gitignore new file mode 100644 index 000000000..b25c15b81 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/tools/vendor/github.com/containers/ocicrypt/.golangci.yml b/tools/vendor/github.com/containers/ocicrypt/.golangci.yml new file mode 100644 index 000000000..bf39af836 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -0,0 +1,35 @@ +linters: + enable: + - depguard + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - govet + - unused + - misspell + +linters-settings: + depguard: + rules: + main: + files: + - $all + deny: + - pkg: "io/ioutil" + + revive: + severity: error + rules: + - name: indent-error-flow + severity: warning + disabled: false + + - name: error-strings + disabled: false + + staticcheck: + # Suppress reports of deprecated packages + checks: ["-SA1019"] diff --git a/tools/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/tools/vendor/github.com/containers/ocicrypt/ADOPTERS.md new file mode 100644 index 000000000..fa4b03bb8 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/ADOPTERS.md @@ -0,0 +1,10 @@ +Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images: +- [skopeo](https://github.com/containers/skopeo) +- [buildah](https://github.com/containers/buildah) +- [containerd](https://github.com/containerd/imgcrypt) +- [nerdctl](https://github.com/containerd/nerdctl) +- [distribution](https://github.com/distribution/distribution) + +Below are the list of projects that are in the process of adopting support: +- [quay](https://github.com/quay/quay) +- [kata-containers](https://github.com/kata-containers/kata-containers) diff --git a/tools/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/tools/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..d68f8dbda --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The OCIcrypt Library Project Community Code of Conduct + +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/tools/vendor/github.com/containers/ocicrypt/MAINTAINERS b/tools/vendor/github.com/containers/ocicrypt/MAINTAINERS new file mode 100644 index 000000000..af38d03bf --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -0,0 +1,6 @@ +# ocicrypt maintainers +# +# Github ID, Name, Email Address +lumjjb, Brandon Lum, lumjjb@gmail.com +stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/tools/vendor/github.com/containers/ocicrypt/Makefile b/tools/vendor/github.com/containers/ocicrypt/Makefile new file mode 100644 index 000000000..97ddeefbb --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/Makefile @@ -0,0 +1,35 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build decoder generate-protobuf + +all: build + +FORCE: + +check: + golangci-lint run + +build: vendor + go build ./... + +vendor: + go mod tidy + +test: + go clean -testcache + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/tools/vendor/github.com/containers/ocicrypt/README.md b/tools/vendor/github.com/containers/ocicrypt/README.md new file mode 100644 index 000000000..b69d14e3b --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/README.md @@ -0,0 +1,50 @@ +# OCIcrypt Library + +The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. + +Consumers of OCIcrypt: + +- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) +- [cri-o](https://github.com/cri-o/cri-o) +- [skopeo](https://github.com/containers/skopeo) + + +## Usage + +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. + +### Runtime/Build tool usage + +The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: + +``` +package "github.com/containers/ocicrypt" +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) +``` + +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). + + +### Crypto Agility and Extensibility + +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers +- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping + +We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. + + +#### Keyprovider interface + +As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service. + + +## Security Issues + +We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/tools/vendor/github.com/containers/ocicrypt/SECURITY.md b/tools/vendor/github.com/containers/ocicrypt/SECURITY.md new file mode 100644 index 000000000..ea98cb129 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the OCIcrypt Library Project + +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go new file mode 100644 index 000000000..b8436a8d5 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -0,0 +1,160 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "errors" + "fmt" + "io" + + "github.com/opencontainers/go-digest" +) + +// LayerCipherType is the ciphertype as specified in the layer metadata +type LayerCipherType string + +// TODO: Should be obtained from OCI spec once included +const ( + AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" +) + +// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are sensitive and should not be in plaintext +type PrivateLayerBlockCipherOptions struct { + // SymmetricKey represents the symmetric key used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + SymmetricKey []byte `json:"symkey"` + + // Digest is the digest of the original data for verification. + // This is NOT populated by Encrypt/Decrypt calls + Digest digest.Digest `json:"digest"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are public and can be deduplicated in plaintext across multiple +// recipients +type PublicLayerBlockCipherOptions struct { + // CipherType denotes the cipher type according to the list of OCI suppported + // cipher types. + CipherType LayerCipherType `json:"cipher"` + + // Hmac contains the hmac string to help verify encryption + Hmac []byte `json:"hmac"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions +// required to encrypt/decrypt an image +type LayerBlockCipherOptions struct { + Public PublicLayerBlockCipherOptions + Private PrivateLayerBlockCipherOptions +} + +// LayerBlockCipher returns a provider for encrypt/decrypt functionality +// for handling the layer data for a specific algorithm +type LayerBlockCipher interface { + // GenerateKey creates a symmetric key + GenerateKey() ([]byte, error) + // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions + Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) + // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions + Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) +} + +// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers +type LayerBlockCipherHandler struct { + cipherMap map[LayerCipherType]LayerBlockCipher +} + +// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob +type Finalizer func() (LayerBlockCipherOptions, error) + +// GetOpt returns the value of the cipher option and if the option exists +func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { + if v, ok := lbco.Public.CipherOptions[key]; ok { + return v, ok + } else if v, ok := lbco.Private.CipherOptions[key]; ok { + return v, ok + } + return nil, false +} + +func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { + return func() (LayerBlockCipherOptions, error) { + lbco, err := fin() + if err != nil { + return LayerBlockCipherOptions{}, err + } + lbco.Public.CipherType = typ + return lbco, err + } +} + +// Encrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { + if c, ok := h.cipherMap[typ]; ok { + sk, err := c.GenerateKey() + if err != nil { + return nil, nil, err + } + opt := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: sk, + }, + } + encDataReader, fin, err := c.Encrypt(plainDataReader, opt) + if err == nil { + fin = wrapFinalizerWithType(fin, typ) + } + return encDataReader, fin, err + } + return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// Decrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + typ := opt.Public.CipherType + if typ == "" { + return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") + } + if c, ok := h.cipherMap[typ]; ok { + return c.Decrypt(encDataReader, opt) + } + return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// NewLayerBlockCipherHandler returns a new default handler +func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { + h := LayerBlockCipherHandler{ + cipherMap: map[LayerCipherType]LayerBlockCipher{}, + } + + var err error + h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) + if err != nil { + return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err) + } + + return &h, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go new file mode 100644 index 000000000..7db03e2ec --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -0,0 +1,193 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + + "github.com/containers/ocicrypt/utils" +) + +// AESCTRLayerBlockCipher implements the AES CTR stream cipher +type AESCTRLayerBlockCipher struct { + keylen int // in bytes + reader io.Reader + encrypt bool + stream cipher.Stream + err error + hmac hash.Hash + expHmac []byte + doneEncrypting bool +} + +type aesctrcryptor struct { + bc *AESCTRLayerBlockCipher +} + +// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits +func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { + if bits != 256 { + return nil, errors.New("AES CTR bit count not supported") + } + return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil +} + +func (r *aesctrcryptor) Read(p []byte) (int, error) { + var ( + o int + ) + + if r.bc.err != nil { + return 0, r.bc.err + } + + o, err := utils.FillBuffer(r.bc.reader, p) + if err != nil { + if err == io.EOF { + r.bc.err = err + } else { + return 0, err + } + } + + if !r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Before we return EOF we let the HMAC comparison + // provide a verdict + if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { + r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) + return 0, r.bc.err + } + } + } + + r.bc.stream.XORKeyStream(p[:o], p[:o]) + + if r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Final data encrypted; Do the 'then-MAC' part + r.bc.doneEncrypting = true + } + } + + return o, r.bc.err +} + +// init initializes an instance +func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { + var ( + err error + ) + + key := opts.Private.SymmetricKey + if len(key) != bc.keylen { + return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) + } + + nonce, ok := opts.GetOpt("nonce") + if !ok { + nonce = make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err) + } + } + + block, err := aes.NewCipher(key) + if err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err) + } + + bc.reader = reader + bc.encrypt = encrypt + bc.stream = cipher.NewCTR(block, nonce) + bc.err = nil + bc.hmac = hmac.New(sha256.New, key) + bc.expHmac = opts.Public.Hmac + bc.doneEncrypting = false + + if !encrypt && len(bc.expHmac) == 0 { + return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") + } + + lbco := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: key, + CipherOptions: map[string][]byte{ + "nonce": nonce, + }, + }, + } + + return lbco, nil +} + +// GenerateKey creates a synmmetric key +func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { + key := make([]byte, bc.keylen) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return nil, err + } + return key, nil +} + +// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { + lbco, err := bc.init(true, plainDataReader, opt) + if err != nil { + return nil, nil, err + } + + finalizer := func() (LayerBlockCipherOptions, error) { + if !bc.doneEncrypting { + return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") + } + if lbco.Public.CipherOptions == nil { + lbco.Public.CipherOptions = map[string][]byte{} + } + lbco.Public.Hmac = bc.hmac.Sum(nil) + return lbco, nil + } + return &aesctrcryptor{bc}, finalizer, nil +} + +// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + lbco, err := bc.init(false, encDataReader, opt) + if err != nil { + return nil, LayerBlockCipherOptions{}, err + } + + return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/config/config.go b/tools/vendor/github.com/containers/ocicrypt/config/config.go new file mode 100644 index 000000000..d960766eb --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/config/config.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +// EncryptConfig is the container image PGP encryption configuration holding +// the identifiers of those that will be able to decrypt the container and +// the PGP public keyring file data that contains their public keys. +type EncryptConfig struct { + // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' + Parameters map[string][][]byte + + DecryptConfig DecryptConfig +} + +// DecryptConfig wraps the Parameters map that holds the decryption key +type DecryptConfig struct { + // map holding 'privkeys', 'x509s', 'gpg-privatekeys' + Parameters map[string][][]byte +} + +// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can +// be passed through functions that share much code for encryption and decryption +type CryptoConfig struct { + EncryptConfig *EncryptConfig + DecryptConfig *DecryptConfig +} + +// InitDecryption initialized a CryptoConfig object with parameters used for decryption +func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + DecryptConfig: &DecryptConfig{ + Parameters: dcparameters, + }, + } +} + +// InitEncryption initializes a CryptoConfig object with parameters used for encryption +// It also takes dcparameters that may be needed for decryption when adding a recipient +// to an already encrypted image +func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: parameters, + DecryptConfig: DecryptConfig{ + Parameters: dcparameters, + }, + }, + } +} + +// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig +// containing the crypto configuration of all the key bundles +func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { + ecparam := map[string][][]byte{} + ecdcparam := map[string][][]byte{} + dcparam := map[string][][]byte{} + + for _, cc := range ccs { + if ec := cc.EncryptConfig; ec != nil { + addToMap(ecparam, ec.Parameters) + addToMap(ecdcparam, ec.DecryptConfig.Parameters) + } + + if dc := cc.DecryptConfig; dc != nil { + addToMap(dcparam, dc.Parameters) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ecparam, + DecryptConfig: DecryptConfig{ + Parameters: ecdcparam, + }, + }, + DecryptConfig: &DecryptConfig{ + Parameters: dcparam, + }, + } + +} + +// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that +// the decryption parameters can be used to add recipients to an existing image +// if the user is able to decrypt it. +func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { + if dc != nil { + addToMap(ec.DecryptConfig.Parameters, dc.Parameters) + } +} + +func addToMap(orig map[string][][]byte, add map[string][][]byte) { + for k, v := range add { + if ov, ok := orig[k]; ok { + orig[k] = append(ov, v...) + } else { + orig[k] = v + } + } +} diff --git a/tools/vendor/github.com/containers/ocicrypt/config/constructors.go b/tools/vendor/github.com/containers/ocicrypt/config/constructors.go new file mode 100644 index 000000000..f7f29cd8d --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -0,0 +1,246 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "gopkg.in/yaml.v3" +) + +// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys +func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "pubkeys": pubKeys, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs +func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + + ep := map[string][][]byte{ + "x509s": x509s, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters +func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "gpg-recipients": gpgRecipients, + "gpg-pubkeyringfile": {gpgPubRingFile}, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys +func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { + if len(privKeys) != len(privKeysPasswords) { + return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "privkeys": privKeys, + "privkeys-passwords": privKeysPasswords, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs +func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "x509s": x509s, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys +func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "gpg-privatekeys": gpgPrivKeys, + "gpg-privatekeys-passwords": gpgPrivKeysPwds, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/tools/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 000000000..4785a831b --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,80 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err) + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 000000000..072d7fe18 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v3" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err) + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal(yamlstr, &p11keyfile) + if err != nil { + return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err) + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal(yamlstr, &p11conf) + if err != nil { + return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err) + } + return &p11conf, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 000000000..fe047a1e6 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,485 @@ +//go:build cgo +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256", "": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err) + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err) + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err) + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, fmt.Errorf("Could not login to device: %w", err) + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, fmt.Errorf("Initialize failed: %w", err) + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } + + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, fmt.Errorf("GetSlotList failed: %w", err) + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, fmt.Errorf("FindObjectsInit failed: %w", err) + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, fmt.Errorf("FindObjects failed: %w", err) + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, fmt.Errorf("FindObjectsFinal failed: %w", err) + } + if len(obj) > 1 { + return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, fmt.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256", "": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", fmt.Errorf("EncryptInit error: %w", err) + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", fmt.Errorf("Encrypt failed: %w", err) + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // An empty string from the Hash in the JSON historically defaults to sha1. + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, fmt.Errorf("DecryptInit failed: %w", err) + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, fmt.Errorf("Decrypt failed: %w", err) + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = fmt.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err) + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 000000000..6cf0aa2a9 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,30 @@ +//go:build !cgo +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import "fmt" + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 000000000..231da2317 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,115 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "os" + "runtime" + "strings" + "sync" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "riscv64": + ht = "riscv64" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/tools/vendor/github.com/containers/ocicrypt/encryption.go b/tools/vendor/github.com/containers/ocicrypt/encryption.go new file mode 100644 index 000000000..b6fa9db40 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/encryption.go @@ -0,0 +1,356 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/containers/ocicrypt/blockcipher" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/keyprovider" + "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" + "github.com/containers/ocicrypt/keywrap/pkcs7" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + log "github.com/sirupsen/logrus" +) + +// EncryptLayerFinalizer is a finalizer run to return the annotations to set for +// the encrypted layer +type EncryptLayerFinalizer func() (map[string]string, error) + +func init() { + keyWrappers = make(map[string]keywrap.KeyWrapper) + keyWrapperAnnotations = make(map[string]string) + RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) + RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) + RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } +} + +var keyWrappers map[string]keywrap.KeyWrapper +var keyWrapperAnnotations map[string]string + +// RegisterKeyWrapper allows to register key wrappers by their encryption scheme +func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { + keyWrappers[scheme] = iface + keyWrapperAnnotations[iface.GetAnnotationID()] = scheme +} + +// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) +func GetKeyWrapper(scheme string) keywrap.KeyWrapper { + return keyWrappers[scheme] +} + +// GetWrappedKeysMap returns a map of wrappedKeys as values in a +// map with the encryption scheme(s) as the key(s) +func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { + wrappedKeysMap := make(map[string]string) + + for annotationsID, scheme := range keyWrapperAnnotations { + if annotation, ok := desc.Annotations[annotationsID]; ok { + wrappedKeysMap[scheme] = annotation + } + } + return wrappedKeysMap +} + +// EncryptLayer encrypts the layer by running one encryptor after the other +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { + var ( + encLayerReader io.Reader + err error + encrypted bool + bcFin blockcipher.Finalizer + privOptsData []byte + pubOptsData []byte + ) + + if ec == nil { + return nil, nil, errors.New("EncryptConfig must not be nil") + } + + for annotationsID := range keyWrapperAnnotations { + annotation := desc.Annotations[annotationsID] + if annotation != "" { + privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) + if err != nil { + return nil, nil, err + } + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, nil, err + } + // already encrypted! + encrypted = true + } + } + + if !encrypted { + encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) + if err != nil { + return nil, nil, err + } + } + + encLayerFinalizer := func() (map[string]string, error) { + // If layer was already encrypted, bcFin should be nil, use existing optsData + if bcFin != nil { + opts, err := bcFin() + if err != nil { + return nil, err + } + privOptsData, err = json.Marshal(opts.Private) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + pubOptsData, err = json.Marshal(opts.Public) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + } + + newAnnotations := make(map[string]string) + keysWrapped := false + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotations := desc.Annotations[annotationsID] + keywrapper := GetKeyWrapper(scheme) + b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) + if err != nil { + return nil, err + } + if b64Annotations != "" { + keysWrapped = true + newAnnotations[annotationsID] = b64Annotations + } + } + + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } + newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) + + if len(newAnnotations) == 0 { + return nil, errors.New("no encryptor found to handle encryption") + } + + return newAnnotations, err + } + + // if nothing was encrypted, we just return encLayer = nil + return encLayerReader, encLayerFinalizer, err + +} + +// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the +// annotation data +func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { + newAnnotation, err := keywrapper.WrapKeys(ec, optsData) + if err != nil || len(newAnnotation) == 0 { + return b64Annotations, err + } + b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) + if b64Annotations == "" { + return b64newAnnotation, nil + } + return b64Annotations + "," + b64newAnnotation, nil +} + +// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it +// can apply the provided private key +// If unwrapOnly is set we will only try to decrypt the layer encryption key and return +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { + if dc == nil { + return nil, "", errors.New("DecryptConfig must not be nil") + } + privOptsData, err := decryptLayerKeyOptsData(dc, desc) + if err != nil || unwrapOnly { + return nil, "", err + } + + var pubOptsData []byte + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, "", err + } + + return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) +} + +func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { + privKeyGiven := false + errs := "" + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotation := desc.Annotations[annotationsID] + if b64Annotation != "" { + keywrapper := GetKeyWrapper(scheme) + + if keywrapper.NoPossibleKeys(dc.Parameters) { + continue + } + + if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { + privKeyGiven = true + } + optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) + if err != nil { + // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) + continue + } + if optsData == nil { + // try next keywrap.KeyWrapper + continue + } + return optsData, nil + } + } + if !privKeyGiven { + return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs) + } + return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) +} + +func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { + pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] + if pubOptsString == "" { + return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) + } + return base64.StdEncoding.DecodeString(pubOptsString) +} + +// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function +// of the given keywrapper with it and returns the result in case the Unwrap functions +// does not return an error. If all attempts fail, an error is returned. +func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { + if b64Annotations == "" { + return nil, nil + } + errs := "" + for _, b64Annotation := range strings.Split(b64Annotations, ",") { + annotation, err := base64.StdEncoding.DecodeString(b64Annotation) + if err != nil { + return nil, errors.New("could not base64 decode the annotation") + } + optsData, err := keywrapper.UnwrapKey(dc, annotation) + if err != nil { + errs += fmt.Sprintf("- %s\n", err) + continue + } + return optsData, nil + } + return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs) +} + +// commonEncryptLayer is a function to encrypt the plain layer using a new random +// symmetric key and return the LayerBlockCipherHandler's JSON in string form for +// later use during decryption +func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, nil, err + } + + encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) + if err != nil { + return nil, nil, err + } + + newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { + lbco, err := bcFin() + if err != nil { + return blockcipher.LayerBlockCipherOptions{}, err + } + lbco.Private.Digest = d + return lbco, nil + } + + return encLayerReader, newBcFin, err +} + +// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer +// by passing along the optsData +func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { + privOpts := blockcipher.PrivateLayerBlockCipherOptions{} + err := json.Unmarshal(privOptsData, &privOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err) + } + + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, "", err + } + + pubOpts := blockcipher.PublicLayerBlockCipherOptions{} + if len(pubOptsData) > 0 { + err := json.Unmarshal(pubOptsData, &pubOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err) + } + } + + opts := blockcipher.LayerBlockCipherOptions{ + Private: privOpts, + Public: pubOpts, + } + + plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) + if err != nil { + return nil, "", err + } + + return plainLayerReader, opts.Private.Digest, nil +} + +// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' +// and returns a map with those taken out +func FilterOutAnnotations(annotations map[string]string) map[string]string { + a := make(map[string]string) + if len(annotations) > 0 { + for k, v := range annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc.") { + continue + } + a[k] = v + } + } + return a +} diff --git a/tools/vendor/github.com/containers/ocicrypt/gpg.go b/tools/vendor/github.com/containers/ocicrypt/gpg.go new file mode 100644 index 000000000..3bba4669b --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/gpg.go @@ -0,0 +1,431 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/term" +) + +// GPGVersion enum representing the GPG client version to use. +type GPGVersion int + +const ( + // GPGv2 signifies gpgv2+ + GPGv2 GPGVersion = iota + // GPGv1 signifies gpgv1+ + GPGv1 + // GPGVersionUndetermined signifies gpg client version undetermined + GPGVersionUndetermined +) + +// GPGClient defines an interface for wrapping the gpg command line tools +type GPGClient interface { + // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring + ReadGPGPubRingFile() ([]byte, error) + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) + // GetSecretKeyDetails gets the details of a secret key + GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) + // GetKeyDetails gets the details of a public key + GetKeyDetails(keyid uint64) ([]byte, bool, error) + // ResolveRecipients resolves PGP key ids to user names + ResolveRecipients([]string) []string +} + +// gpgClient contains generic gpg client information +type gpgClient struct { + gpgHomeDir string +} + +// gpgv2Client is a gpg2 client +type gpgv2Client struct { + gpgClient +} + +// gpgv1Client is a gpg client +type gpgv1Client struct { + gpgClient +} + +// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if +// not defaults to regular gpg. +func GuessGPGVersion() GPGVersion { + if err := exec.Command("gpg2", "--version").Run(); err == nil { + return GPGv2 + } else if err := exec.Command("gpg", "--version").Run(); err == nil { + return GPGv1 + } + return GPGVersionUndetermined +} + +// NewGPGClient creates a new GPGClient object representing the given version +// and using the given home directory +func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { + v := new(GPGVersion) + switch gpgVersion { + case "v1": + *v = GPGv1 + case "v2": + *v = GPGv2 + default: + v = nil + } + return newGPGClient(v, gpgHomeDir) +} + +func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { + var gpgVersion GPGVersion + if version != nil { + gpgVersion = *version + } else { + gpgVersion = GuessGPGVersion() + } + + switch gpgVersion { + case GPGv1: + return &gpgv1Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGv2: + return &gpgv2Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGVersionUndetermined: + return nil, fmt.Errorf("unable to determine GPG version") + default: + return nil, fmt.Errorf("unhandled case: NewGPGClient") + } +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + + rfile, wfile, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("could not create pipe: %w", err) + } + defer func() { + rfile.Close() + wfile.Close() + }() + // fill pipe in background + go func(passphrase string) { + _, _ = wfile.Write([]byte(passphrase)) + wfile.Close() + }(passphrase) + + args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg2", args...) + cmd.ExtraFiles = []*os.File{rfile} + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg2", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg2", args...) + + keydata, err := runGPGGetOutput(cmd) + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg", args...) + + keydata, err := runGPGGetOutput(cmd) + + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// runGPGGetOutput runs the GPG commandline and returns stdout as byte array +// and any stderr in the error +func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + + stdoutstr, err2 := io.ReadAll(stdout) + stderrstr, _ := io.ReadAll(stderr) + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) + } + + return stdoutstr, err2 +} + +// resolveRecipients walks the list of recipients and attempts to convert +// all keyIds to email addresses; if something goes wrong during the +// conversion of a recipient, the original string is returned for that +// recpient +func resolveRecipients(gc GPGClient, recipients []string) []string { + var result []string + + for _, recipient := range recipients { + keyID, err := strconv.ParseUint(recipient, 0, 64) + if err != nil { + result = append(result, recipient) + } else { + details, found, _ := gc.GetKeyDetails(keyID) + if !found { + result = append(result, recipient) + } else { + email := extractEmailFromDetails(details) + if email == "" { + result = append(result, recipient) + } else { + result = append(result, email) + } + } + } + } + return result +} + +var ( + onceRegexp sync.Once + emailPattern *regexp.Regexp +) + +func extractEmailFromDetails(details []byte) string { + onceRegexp.Do(func() { + emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + }) + loc := emailPattern.FindSubmatchIndex(details) + if len(loc) == 0 { + return "" + } + return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) +} + +// uint64ToStringArray converts an array of uint64's to an array of strings +// by applying a format string to each uint64 +func uint64ToStringArray(format string, in []uint64) []string { + var ret []string + + for _, v := range in { + ret = append(ret, fmt.Sprintf(format, v)) + } + return ret +} + +// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the +// wrapped symmetric keys. For this it determines whether a private key is +// in the GPGVault or on this system and prompts for the passwords for those +// that are available. If we do not find a private key on the system for +// getting to the symmetric key of a layer then an error is generated. +func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { + // PrivateKeyData describes a private key + type PrivateKeyData struct { + KeyData []byte + KeyDataPassword []byte + } + var pkd PrivateKeyData + keyIDPasswordMap := make(map[uint64]PrivateKeyData) + + for _, desc := range descs { + for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { + if scheme != "pgp" { + continue + } + keywrapper := GetKeyWrapper(scheme) + if keywrapper == nil { + return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme) + } + keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, nil, err + } + + found := false + for _, keyid := range keyIds { + // do we have this key? -- first check the vault + if gpgVault != nil { + _, keydata := gpgVault.GetGPGPrivateKey(keyid) + if len(keydata) > 0 { + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: nil, // password not supported in this case + } + keyIDPasswordMap[keyid] = pkd + found = true + break + } + } else if gpgClient != nil { + // check the local system's gpg installation + keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) + // this may fail if the key is not here; we ignore the error + if !haveKey { + // key not on this system + continue + } + + _, found = keyIDPasswordMap[keyid] + if !found { + fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) + fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) + + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("\n") + if err != nil { + return nil, nil, err + } + keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) + if err != nil { + return nil, nil, err + } + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: password, + } + keyIDPasswordMap[keyid] = pkd + found = true + } + break + } else { + return nil, nil, errors.New("no GPGVault or GPGClient passed") + } + } + if !found && len(b64pgpPackets) > 0 && mustFindKey { + ids := uint64ToStringArray("0x%x", keyIds) + + return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + } + } + } + + for _, pkd := range keyIDPasswordMap { + gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) + gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) + } + + return gpgPrivKeys, gpgPrivKeysPwds, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/gpgvault.go b/tools/vendor/github.com/containers/ocicrypt/gpgvault.go new file mode 100644 index 000000000..f1bd0d989 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -0,0 +1,100 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +// GPGVault defines an interface for wrapping multiple secret key rings +type GPGVault interface { + // AddSecretKeyRingData adds a secret keyring via its raw byte array + AddSecretKeyRingData(gpgSecretKeyRingData []byte) error + // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays + AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error + // AddSecretKeyRingFiles adds secret keyrings given their filenames + AddSecretKeyRingFiles(filenames []string) error + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) +} + +// gpgVault wraps an array of gpgSecretKeyRing +type gpgVault struct { + entityLists []openpgp.EntityList + keyDataList [][]byte // the raw data original passed in +} + +// NewGPGVault creates an empty GPGVault +func NewGPGVault() GPGVault { + return &gpgVault{} +} + +// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte +// array read from the file must be passed and will be parsed by this function +func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { + // read the private keys + r := bytes.NewReader(gpgSecretKeyRingData) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return fmt.Errorf("could not read keyring: %w", err) + } + g.entityLists = append(g.entityLists, entityList) + g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) + return nil +} + +// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte +// arrays read from files must be passed +func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { + for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { + if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { + return err + } + } + return nil +} + +// AddSecretKeyRingFiles adds the secret key rings given their filenames +func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { + for _, filename := range filenames { + gpgSecretKeyRingData, err := os.ReadFile(filename) + if err != nil { + return err + } + err = g.AddSecretKeyRingData(gpgSecretKeyRingData) + if err != nil { + return err + } + } + return nil +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { + for i, el := range g.entityLists { + decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) + if len(decKeys) > 0 { + return decKeys, g.keyDataList[i] + } + } + return nil, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go new file mode 100644 index 000000000..c1bdd6fbe --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -0,0 +1,156 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package jwe + +import ( + "crypto/ecdsa" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/go-jose/go-jose/v4" +) + +type jweKeyWrapper struct { +} + +func (kw *jweKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.jwe" +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &jweKeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + var joseRecipients []jose.Recipient + + err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(joseRecipients) == 0 { + return nil, nil + } + + encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) + if err != nil { + return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err) + } + jwe, err := encrypter.Encrypt(optsData) + if err != nil { + return nil, fmt.Errorf("JWE Encrypt failed: %w", err) + } + return []byte(jwe.FullSerialize()), nil +} + +func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { + // cf. list of algorithms in func addPubKeys() below + keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW} + // accept all algorithms defined in RFC 7518, section 5.1 + contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM} + jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, errors.New("jose.ParseEncrypted failed") + } + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for JWE decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("Private key password array length must be same as that of private keys") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") + if err != nil { + return nil, err + } + _, _, plain, err := jwe.DecryptMulti(key) + if err == nil { + return plain, nil + } + } + return nil, errors.New("JWE: No suitable private key found for decryption") +} + +func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { + return nil, nil +} + +func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { + return []string{"[jwe]"}, nil +} + +func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { + if len(pubKeys) == 0 { + return nil + } + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "JWE") + if err != nil { + return err + } + + alg := jose.RSA_OAEP + switch key := key.(type) { + case *ecdsa.PublicKey: + alg = jose.ECDH_ES_A256KW + case *jose.JSONWebKey: + if key.Algorithm != "" { + alg = jose.KeyAlgorithm(key.Algorithm) + switch alg { + /* accepted algorithms */ + case jose.RSA_OAEP: + case jose.RSA_OAEP_256: + case jose.ECDH_ES_A128KW: + case jose.ECDH_ES_A192KW: + case jose.ECDH_ES_A256KW: + /* all others are rejected */ + default: + return fmt.Errorf("%s is an unsupported JWE key algorithm", alg) + } + } + } + + *joseRecipients = append(*joseRecipients, jose.Recipient{ + Algorithm: alg, + Key: key, + }) + } + return nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 000000000..6ac0fcb95 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,242 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err) + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err) + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("error while dialing rpc server: %w", err) + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err) + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err) + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go new file mode 100644 index 000000000..ed25e7dac --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go @@ -0,0 +1,48 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keywrap + +import ( + "github.com/containers/ocicrypt/config" +) + +// KeyWrapper is the interface used for wrapping keys using +// a specific encryption technology (pgp, jwe) +type KeyWrapper interface { + WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) + UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) + GetAnnotationID() string + + // NoPossibleKeys returns true if there is no possibility of performing + // decryption for parameters provided. + NoPossibleKeys(dcparameters map[string][][]byte) bool + + // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation + // as in some key services, a private key may not be exportable (i.e. HSM) + // If not implemented, return nil + GetPrivateKeys(dcparameters map[string][][]byte) [][]byte + + // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption + // schemes may not have a notion of key IDs + // If not implemented, return the nil slice + GetKeyIdsFromPacket(packet string) ([]uint64, error) + + // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of + // recipients in a particular encryptiong scheme + // If not implemented, return the nil slice + GetRecipients(packet string) ([]string, error) +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go new file mode 100644 index 000000000..4ab9bd978 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -0,0 +1,272 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pgp + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net/mail" + "strconv" + "strings" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +type gpgKeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface for pgp +func NewKeyWrapper() keywrap.KeyWrapper { + return &gpgKeyWrapper{} +} + +var ( + // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption + GPGDefaultEncryptConfig = &packet.Config{ + Rand: rand.Reader, + DefaultHash: crypto.SHA256, + DefaultCipher: packet.CipherAES256, + CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression + RSABits: 2048, + } +) + +func (kw *gpgKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pgp" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + ciphertext := new(bytes.Buffer) + el, err := kw.createEntityList(ec) + if err != nil { + return nil, fmt.Errorf("unable to create entity list: %w", err) + } + if len(el) == 0 { + // nothing to do -- not an error + return nil, nil + } + + plaintextWriter, err := openpgp.Encrypt(ciphertext, + el, /*EntityList*/ + nil, /* Sign*/ + nil, /* FileHint */ + GPGDefaultEncryptConfig) + if err != nil { + return nil, err + } + + if _, err = plaintextWriter.Write(optsData); err != nil { + return nil, err + } else if err = plaintextWriter.Close(); err != nil { + return nil, err + } + return ciphertext.Bytes(), err +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PGP payload. +func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { + pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for idx, pgpPrivateKey := range pgpPrivateKeys { + r := bytes.NewBuffer(pgpPrivateKey) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, fmt.Errorf("unable to parse private keys: %w", err) + } + + var prompt openpgp.PromptFunction + if len(pgpPrivateKeysPwd) > idx { + responded := false + prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { + if responded { + return nil, fmt.Errorf("don't seem to have the right password") + } + responded = true + for _, key := range keys { + if key.PrivateKey != nil { + _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) + } + } + return pgpPrivateKeysPwd[idx], nil + } + } + + r = bytes.NewBuffer(pgpPacket) + md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) + if err != nil { + continue + } + // we get the plain key options back + optsData, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + continue + } + return optsData, nil + } + return nil, errors.New("PGP: No suitable key found to unwrap key") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds +func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { + + var keyids []uint64 + for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { + pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) + if err != nil { + return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err) + } + newids, err := kw.getKeyIDs(pgpPacket) + if err != nil { + return nil, err + } + keyids = append(keyids, newids...) + } + return keyids, nil +} + +// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs +func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { + var keyids []uint64 + + kbuf := bytes.NewBuffer(pgpPacket) + packets := packet.NewReader(kbuf) +ParsePackets: + for { + p, err := packets.Next() + if err == io.EOF { + break ParsePackets + } + if err != nil { + return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err) + } + switch p := p.(type) { + case *packet.EncryptedKey: + keyids = append(keyids, p.KeyId) + case *packet.SymmetricallyEncrypted: + break ParsePackets + } + } + return keyids, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { + keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, err + } + var array []string + for _, keyid := range keyIds { + array = append(array, "0x"+strconv.FormatUint(keyid, 16)) + } + return array, nil +} + +func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["gpg-privatekeys"] +} + +func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { + + privKeys := kw.GetPrivateKeys(dcparameters) + if len(privKeys) == 0 { + return nil, nil, errors.New("GPG: Missing private key parameter") + } + + return privKeys, dcparameters["gpg-privatekeys-passwords"], nil +} + +// createEntityList creates the opengpg EntityList by reading the KeyRing +// first and then filtering out recipients' keys +func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { + pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] + if len(pgpPubringFile) == 0 { + return nil, nil + } + r := bytes.NewReader(pgpPubringFile[0]) + + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, err + } + + gpgRecipients := ec.Parameters["gpg-recipients"] + if len(gpgRecipients) == 0 { + return nil, nil + } + + rSet := make(map[string]int) + for _, r := range gpgRecipients { + rSet[string(r)] = 0 + } + + var filteredList openpgp.EntityList + for _, entity := range entityList { + for k := range entity.Identities { + addr, err := mail.ParseAddress(k) + if err != nil { + return nil, err + } + for _, r := range gpgRecipients { + recp := string(r) + if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { + filteredList = append(filteredList, entity) + rSet[recp] = rSet[recp] + 1 + } + } + } + } + + // make sure we found keys for all the Recipients... + var buffer bytes.Buffer + notFound := false + buffer.WriteString("PGP: No key found for the following recipients: ") + + for k, v := range rSet { + if v == 0 { + if notFound { + buffer.WriteString(", ") + } + buffer.WriteString(k) + notFound = true + } + } + + if notFound { + return nil, errors.New(buffer.String()) + } + + return filteredList, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 000000000..b9a83c536 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,152 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + // append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image) + // can't race writing to the same backing array. + pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"]) + pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...) + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err) + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err) +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) { + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go new file mode 100644 index 000000000..7ca32fc80 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -0,0 +1,137 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs7 + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/smallstep/pkcs7" +) + +type pkcs7KeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs7KeyWrapper{} +} + +func (kw *pkcs7KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs7" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + x509Certs, err := collectX509s(ec.Parameters["x509s"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(x509Certs) == 0 { + return nil, nil + } + + pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM + return pkcs7.Encrypt(optsData, x509Certs) +} + +func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { + if len(x509s) == 0 { + return nil, nil + } + var x509Certs []*x509.Certificate + for _, x509 := range x509s { + x509Cert, err := utils.ParseCertificate(x509, "PKCS7") + if err != nil { + return nil, err + } + x509Certs = append(x509Certs, x509Cert) + } + return x509Certs, nil +} + +func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PKCS7 payload. +func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("no private keys found for PKCS7 decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("private key password array length must be same as that of private keys") + } + + x509Certs, err := collectX509s(dc.Parameters["x509s"]) + if err != nil { + return nil, err + } + if len(x509Certs) == 0 { + return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") + } + + p7, err := pkcs7.Parse(pkcs7Packet) + if err != nil { + return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err) + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") + if err != nil { + return nil, err + } + for _, x509Cert := range x509Certs { + optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) + if err != nil { + continue + } + return optsData, nil + } + } + return nil, errors.New("PKCS7: No suitable private key found for decryption") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { + return nil, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { + return []string{"[pkcs7]"}, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/reader.go b/tools/vendor/github.com/containers/ocicrypt/reader.go new file mode 100644 index 000000000..a93eec8e9 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/reader.go @@ -0,0 +1,40 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "io" +) + +type readerAtReader struct { + r io.ReaderAt + off int64 +} + +// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader +func ReaderFromReaderAt(r io.ReaderAt) io.Reader { + return &readerAtReader{ + r: r, + off: 0, + } +} + +func (rar *readerAtReader) Read(p []byte) (n int, err error) { + n, err = rar.r.ReadAt(p, rar.off) + rar.off += int64(n) + return n, err +} diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/tools/vendor/github.com/containers/ocicrypt/utils/delayedreader.go new file mode 100644 index 000000000..3b939bdea --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/delayedreader.go @@ -0,0 +1,109 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "io" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// DelayedReader wraps a io.Reader and allows a client to use the Reader +// interface. The DelayedReader holds back some buffer to the client +// so that it can report any error that occurred on the Reader it wraps +// early to the client while it may still have held some data back. +type DelayedReader struct { + reader io.Reader // Reader to Read() bytes from and delay them + err error // error that occurred on the reader + buffer []byte // delay buffer + bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller + bufoff int // offset in the delay buffer to give to Read() +} + +// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes +func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { + return &DelayedReader{ + reader: reader, + buffer: make([]byte, bufsize), + } +} + +// Read implements the io.Reader interface +func (dr *DelayedReader) Read(p []byte) (int, error) { + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + + // if we are completely drained, return io.EOF + if dr.err == io.EOF && dr.bufbytes == 0 { + return 0, io.EOF + } + + // only at the beginning we fill our delay buffer in an extra step + if dr.bufbytes < len(dr.buffer) && dr.err == nil { + dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + // dr.err != nil means we have EOF and can drain the delay buffer + // otherwise we need to still read from the reader + + var tmpbuf []byte + tmpbufbytes := 0 + if dr.err == nil { + tmpbuf = make([]byte, len(p)) + tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + + // copy out of the delay buffer into 'p' + tocopy1 := min(len(p), dr.bufbytes) + c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) + dr.bufoff += c1 + dr.bufbytes -= c1 + + c2 := 0 + // can p still hold more data? + if c1 < len(p) { + // copy out of the tmpbuf into 'p' + c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) + } + + // if tmpbuf holds data we need to hold onto, copy them + // into the delay buffer + if tmpbufbytes-c2 > 0 { + // left-shift the delay buffer and append the tmpbuf's remaining data + dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] + dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) + dr.bufoff = 0 + dr.bufbytes = len(dr.buffer) + } + + var err error + if dr.bufbytes == 0 { + err = io.EOF + } + return c1 + c2, err +} diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/tools/vendor/github.com/containers/ocicrypt/utils/ioutils.go new file mode 100644 index 000000000..c6265168a --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -0,0 +1,58 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "fmt" + "io" + "os/exec" +) + +// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns +// EOF if an EOF was encountered or any other error. +func FillBuffer(reader io.Reader, buffer []byte) (int, error) { + n, err := io.ReadFull(reader, buffer) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + var stderr bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err) + } + return out.Bytes(), nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 000000000..dc477d3cf --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 000000000..a71f0a592 --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +} \ No newline at end of file diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/testing.go b/tools/vendor/github.com/containers/ocicrypt/utils/testing.go new file mode 100644 index 000000000..050aa885e --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -0,0 +1,174 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// CreateRSAKey creates an RSA key +func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateECDSAKey creates an elliptic curve key for the given curve +func CreateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateRSATestKey creates an RSA key of the given size and returns +// the public and private key in PEM or DER format +func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { + key, err := CreateRSAKey(bits) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + privData := x509.MarshalPKCS1PrivateKey(key) + + // no more encoding needed for DER + if !pemencode { + return pubData, privData, nil + } + + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pubData, + }) + + var block *pem.Block + + typ := "RSA PRIVATE KEY" + if len(password) > 0 { + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err) + } + } else { + block = &pem.Block{ + Type: typ, + Bytes: privData, + } + } + + privateKey := pem.EncodeToMemory(block) + + return publicKey, privateKey, nil +} + +// CreateECDSATestKey creates and elliptic curve key for the given curve and returns +// the public and private key in DER format +func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { + key, err := CreateECDSAKey(curve) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + + privData, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err) + } + + return pubData, privData, nil +} + +// CreateTestCA creates a root CA for testing +func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test-ca", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caCert, err := certifyKey(&key.PublicKey, ca, key, ca) + + return key, caCert, err +} + +// CertifyKey certifies a public key using the given CA's private key and cert; +// The certificate template for the public key is optional +func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") + if err != nil { + return nil, err + } + return certifyKey(pubKey, template, caKey, caCert) +} + +func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + if template == nil { + template = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "testkey", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) + if err != nil { + return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err) + } + + cert, err := x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err) + } + + return cert, nil +} diff --git a/tools/vendor/github.com/containers/ocicrypt/utils/utils.go b/tools/vendor/github.com/containers/ocicrypt/utils/utils.go new file mode 100644 index 000000000..f653f2efc --- /dev/null +++ b/tools/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -0,0 +1,249 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/go-jose/go-jose/v4" + "golang.org/x/crypto/openpgp" +) + +// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key +func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a private key", prefix) + } + return &jwk, nil +} + +// parseJWKPublicKey parses the input byte array as a JWK +func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if !jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a public key", prefix) + } + return &jwk, nil +} + +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// IsPasswordError checks whether an error is related to a missing or wrong +// password +func IsPasswordError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + + return strings.Contains(msg, "password") && + (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) +} + +// ParsePrivateKey tries to parse a private key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(privKey) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(privKey) + if err != nil { + key, err = x509.ParseECPrivateKey(privKey) + } + } + if err != nil { + block, _ := pem.Decode(privKey) + if block != nil { + var der []byte + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if privKeyPassword == nil { + return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix) + } + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix) + } + } else { + der = block.Bytes + } + + key, err = x509.ParsePKCS8PrivateKey(der) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(der) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err) + } + } + } else { + key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } + } + } + return key, err +} + +// IsPrivateKey returns true in case the given byte array represents a private key +// It returns an error if for example the password is wrong +func IsPrivateKey(data []byte, password []byte) (bool, error) { + _, err := ParsePrivateKey(data, password, "") + return err == nil, err +} + +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + +// ParsePublicKey tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKIXPublicKey(pubKey) + if err != nil { + block, _ := pem.Decode(pubKey) + if block != nil { + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err) + } + } else { + key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } + } + } + return key, err +} + +// IsPublicKey returns true in case the given byte array represents a public key +func IsPublicKey(data []byte) bool { + _, err := ParsePublicKey(data, "") + return err == nil +} + +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + +// ParseCertificate tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { + x509Cert, err := x509.ParseCertificate(certBytes) + if err != nil { + block, _ := pem.Decode(certBytes) + if block == nil { + return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) + } + x509Cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err) + } + } + return x509Cert, err +} + +// IsCertificate returns true in case the given byte array represents an x.509 certificate +func IsCertificate(data []byte) bool { + _, err := ParseCertificate(data, "") + return err == nil +} + +// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file +func IsGPGPrivateKeyRing(data []byte) bool { + r := bytes.NewBuffer(data) + _, err := openpgp.ReadKeyRing(r) + return err == nil +} + +// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into +// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 +// certificate +func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { + dcparameters := make(map[string][][]byte) + + for _, b64Item := range strings.Split(b64ItemList, ",") { + var password []byte + b64Data := strings.Split(b64Item, ":") + keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key") + } + if len(b64Data) == 2 { + password, err = base64.StdEncoding.DecodeString(b64Data[1]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key password") + } + } + var key string + isPrivKey, err := IsPrivateKey(keyData, password) + if IsPasswordError(err) { + return nil, err + } + if isPrivKey { + key = "privkeys" + if _, ok := dcparameters["privkeys-passwords"]; !ok { + dcparameters["privkeys-passwords"] = [][]byte{password} + } else { + dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) + } + } else if IsCertificate(keyData) { + key = "x509s" + } else if IsGPGPrivateKeyRing(keyData) { + key = "gpg-privatekeys" + } + if key != "" { + values := dcparameters[key] + if values == nil { + dcparameters[key] = [][]byte{keyData} + } else { + dcparameters[key] = append(dcparameters[key], keyData) + } + } else { + return nil, errors.New("Unknown decryption key type") + } + } + + return dcparameters, nil +} diff --git a/tools/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/tools/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 000000000..591211595 --- /dev/null +++ b/tools/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 000000000..92574a3f4 --- /dev/null +++ b/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 000000000..661f41055 --- /dev/null +++ b/tools/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/tools/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/tools/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index cb1252b53..ca0e3c62c 100644 --- a/tools/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/tools/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,51 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.4.1] - 2025-01-28 ## + +### Fixed ### +- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was + found to be too strict and caused some regressions when folks tried to + update, so this restriction has been relaxed to only return an error if the + path contains a `..` component. We still recommend users use `filepath.Clean` + (and even `filepath.EvalSymlinks`) on the `root` path they are using, but at + least you will no longer be punished for "trivial" unclean paths. + +## [0.4.0] - 2025-01-13 ## + +### Breaking #### +- `SecureJoin(VFS)` will now return an error if the provided `root` is not a + `filepath.Clean`'d path. + + While it is ultimately the responsibility of the caller to ensure the root is + a safe path to use, passing a path like `/symlink/..` as a root would result + in the `SecureJoin`'d path being placed in `/` even though `/symlink/..` + might be a different directory, and so we should more strongly discourage + such usage. + + All major users of `securejoin.SecureJoin` already ensure that the paths they + provide are safe (and this is ultimately a question of user error), but + removing this foot-gun is probably a good idea. Of course, this is + necessarily a breaking API change (though we expect no real users to be + affected by it). + + Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially + reported this issue as a possible security issue. + +- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument + instead of a raw `unix.S_*`-style mode argument, which may cause compile-time + type errors depending on how you use `filepath-securejoin`. For most users, + there will be no change in behaviour aside from the type change (as the + bottom `0o777` bits are the same in both formats, and most users are probably + only using those bits). + + However, if you were using `unix.S_ISVTX` to set the sticky bit with + `MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you + will get a runtime error with this update. In addition, the error message you + will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as + they are treated as invalid bits now (note that previously passing said bits + was also an error). + ## [0.3.6] - 2024-12-17 ## ### Compatibility ### @@ -193,7 +238,9 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...HEAD +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD +[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 [0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 [0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5 [0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 diff --git a/tools/vendor/github.com/cyphar/filepath-securejoin/VERSION b/tools/vendor/github.com/cyphar/filepath-securejoin/VERSION index 449d7e73a..267577d47 100644 --- a/tools/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/tools/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.3.6 +0.4.1 diff --git a/tools/vendor/github.com/cyphar/filepath-securejoin/join.go b/tools/vendor/github.com/cyphar/filepath-securejoin/join.go index e0ee3f2b5..e6634d477 100644 --- a/tools/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/tools/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,5 +1,5 @@ // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -24,6 +24,31 @@ func IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } +// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path +// that contains ".." components. +var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components") + +// stripVolume just gets rid of the Windows volume included in a path. Based on +// some godbolt tests, the Go compiler is smart enough to make this a no-op on +// Linux. +func stripVolume(path string) string { + return path[len(filepath.VolumeName(path)):] +} + +// hasDotDot checks if the path contains ".." components in a platform-agnostic +// way. +func hasDotDot(path string) bool { + // If we are on Windows, strip any volume letters. It turns out that + // C:..\foo may (or may not) be a valid pathname and we need to handle that + // leading "..". + path = stripVolume(path) + // Look for "/../" in the path, but we need to handle leading and trailing + // ".."s by adding separators. Doing this with filepath.Separator is ugly + // so just convert to Unix-style "/" first. + path = filepath.ToSlash(path) + return strings.Contains("/"+path+"/", "/../") +} + // SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except // that the returned path is guaranteed to be scoped inside the provided root // path (when evaluated). Any symbolic links in the path are evaluated with the @@ -46,7 +71,22 @@ func IsNotExist(err error) bool { // provided via direct input or when evaluating symlinks. Therefore: // // "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" +// +// If the provided root is not [filepath.Clean] then an error will be returned, +// as such root paths are bordering on somewhat unsafe and using such paths is +// not best practice. We also strongly suggest that any root path is first +// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to +// avoid containing symlink components. Of course, the root also *must not* be +// attacker-controlled. func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // The root path must not contain ".." components, otherwise when we join + // the subpath we will end up with a weird path. We could work around this + // in other ways but users shouldn't be giving us non-lexical root paths in + // the first place. + if hasDotDot(root) { + return "", errUnsafeRoot + } + // Use the os.* VFS implementation if none was specified. if vfs == nil { vfs = osVFS{} @@ -59,9 +99,10 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { linksWalked int ) for remainingPath != "" { - if v := filepath.VolumeName(remainingPath); v != "" { - remainingPath = remainingPath[len(v):] - } + // On Windows, if we managed to end up at a path referencing a volume, + // drop the volume to make sure we don't end up with broken paths or + // escaping the root volume. + remainingPath = stripVolume(remainingPath) // Get the next path component. var part string diff --git a/tools/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/tools/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go index 5e559bb7a..a17ae3b03 100644 --- a/tools/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ b/tools/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -21,6 +21,33 @@ var ( errPossibleAttack = errors.New("possible attack detected") ) +// modePermExt is like os.ModePerm except that it also includes the set[ug]id +// and sticky bits. +const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky + +//nolint:cyclop // this function needs to handle a lot of cases +func toUnixMode(mode os.FileMode) (uint32, error) { + sysMode := uint32(mode.Perm()) + if mode&os.ModeSetuid != 0 { + sysMode |= unix.S_ISUID + } + if mode&os.ModeSetgid != 0 { + sysMode |= unix.S_ISGID + } + if mode&os.ModeSticky != 0 { + sysMode |= unix.S_ISVTX + } + // We don't allow file type bits. + if mode&os.ModeType != 0 { + return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode) + } + // We don't allow other unknown modes. + if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 { + return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode) + } + return sysMode, nil +} + // MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use // in two respects: // @@ -39,17 +66,17 @@ var ( // a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after // doing [MkdirAll]. If you intend to open the directory after creating it, you // should use MkdirAllHandle. -func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { - // Make sure there are no os.FileMode bits set. - if mode&^0o7777 != 0 { - return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) +func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { + unixMode, err := toUnixMode(mode) + if err != nil { + return nil, err } // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid // bits. We could also silently ignore them but since we have very few // users it seems more prudent to return an error so users notice that // these bits will not be set. - if mode&^0o1777 != 0 { - return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) + if unixMode&^0o1777 != 0 { + return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) } // Try to open as much of the path as possible. @@ -104,9 +131,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) } - // Make sure the mode doesn't have any type bits. - mode &^= unix.S_IFMT - // Create the remaining components. for _, part := range remainingParts { switch part { @@ -123,7 +147,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // directory at the same time as us. In that case, just continue on as // if we created it (if the created inode is not a directory, the // following open call will fail). - if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil && !errors.Is(err, unix.EEXIST) { + if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} // Make the error a bit nicer if the directory is dead. if deadErr := isDeadInode(currentDir); deadErr != nil { @@ -196,10 +220,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // If you plan to open the directory after you have created it or want to use // an open directory handle as the root, you should use [MkdirAllHandle] instead. // This function is a wrapper around [MkdirAllHandle]. -// -// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not -// the Go generic mode bits ([os.FileMode]...). -func MkdirAll(root, unsafePath string, mode int) error { +func MkdirAll(root, unsafePath string, mode os.FileMode) error { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return err diff --git a/tools/vendor/github.com/docker/cli/AUTHORS b/tools/vendor/github.com/docker/cli/AUTHORS index ad1abd496..c5a480b5e 100644 --- a/tools/vendor/github.com/docker/cli/AUTHORS +++ b/tools/vendor/github.com/docker/cli/AUTHORS @@ -48,6 +48,7 @@ Alfred Landrum Ali Rostami Alicia Lauerman Allen Sun +Allie Sadler Alvin Deng Amen Belayneh Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> @@ -81,6 +82,7 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh +Archimedes Trajano Arko Dasgupta Arnaud Porterie Arnaud Rebillout @@ -88,6 +90,7 @@ Arthur Peka Ashly Mathew Ashwini Oruganti Aslam Ahemad +Austin Vazquez Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -132,6 +135,7 @@ Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +Carston Schilds Casey Korver Ce Gao Cedric Davies @@ -189,6 +193,7 @@ Daisuke Ito dalanlan Damien Nadé Dan Cotora +Dan Wallis Danial Gharib Daniel Artine Daniel Cassidy @@ -237,6 +242,7 @@ Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali Dieter Reuter +Dilep Dev <34891655+DilepDev@users.noreply.github.com> Dima Stopel Dimitry Andric Ding Fei @@ -308,6 +314,8 @@ George MacRorie George Margaritis George Xie Gianluca Borello +Giau. Tran Minh +Giedrius Jonikas Gildas Cuisinier Gio d'Amelio Gleb Stsenov @@ -344,6 +352,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain MacDonald Iain Samuel McLean Elder Ian Campbell Ian Philpot @@ -393,6 +402,7 @@ Jesse Adametz Jessica Frazelle Jezeniel Zapanta Jian Zhang +Jianyong Wu Jie Luo Jilles Oldenbeuving Jim Chen @@ -446,6 +456,7 @@ Julian Julien Barbier Julien Kassar Julien Maitrehenry +Julio Cesar Garcia Justas Brazauskas Justin Chadwell Justin Cormack @@ -490,19 +501,22 @@ Kunal Kushwaha Kyle Mitofsky Lachlan Cooper Lai Jiangshan +Lajos Papp Lars Kellogg-Stedman Laura Brehm Laura Frank Laurent Erignoux +Laurent Goderre Lee Gaines Lei Jitang Lennie +lentil32 Leo Gallucci Leonid Skorospelov Lewis Daly Li Fu Bang Li Yi -Li Yi +Li Zeghong Liang-Chi Hsieh Lihua Tang Lily Guo @@ -515,6 +529,7 @@ lixiaobing10051267 Lloyd Dewolf Lorenzo Fontana Louis Opter +Lovekesh Kumar Luca Favatella Luca Marturana Lucas Chan @@ -559,6 +574,7 @@ Matt Robenolt Matteo Orefice Matthew Heon Matthieu Hauglustaine +Matthieu MOREL Mauro Porras P Max Shytikov Max-Julian Pogner @@ -566,6 +582,7 @@ Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao Melroy van den Berg +Mert Şişmanoğlu Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith @@ -598,7 +615,9 @@ Mindaugas Rukas Miroslav Gula Misty Stanley-Jones Mohammad Banikazemi +Mohammad Hossein Mohammed Aaqib Ansari +Mohammed Aminu Futa Mohini Anne Dsouza Moorthy RS Morgan Bauer @@ -633,9 +652,11 @@ Nicolas De Loof Nikhil Chawla Nikolas Garofil Nikolay Milovanov +NinaLua Nir Soffer Nishant Totla NIWA Hideyuki +Noah Silas Noah Treuhaft O.S. Tezer Oded Arbel @@ -653,10 +674,12 @@ Patrick Böänziger Patrick Daigle <114765035+pdaig@users.noreply.github.com> Patrick Hemmer Patrick Lang +Patrick St. laurent Paul Paul Kehrer Paul Lietar Paul Mulders +Paul Rogalski Paul Seyfert Paul Weaver Pavel Pospisil @@ -678,7 +701,6 @@ Philip Alexander Etling Philipp Gillé Philipp Schmied Phong Tran -pidster Pieter E Smit pixelistik Pratik Karki @@ -738,6 +760,7 @@ Samuel Cochran Samuel Karp Sandro Jäckel Santhosh Manohar +Sarah Sanders Sargun Dhillon Saswat Bhattacharya Saurabh Kumar @@ -770,6 +793,7 @@ Spencer Brown Spring Lee squeegels Srini Brahmaroutu +Stavros Panakakis Stefan S. Stefan Scherer Stefan Weil @@ -780,6 +804,7 @@ Steve Durrheimer Steve Richards Steven Burgess Stoica-Marcu Floris-Andrei +Stuart Williams Subhajit Ghosh Sun Jianbo Sune Keller @@ -867,6 +892,7 @@ Wang Yumu <37442693@qq.com> Wataru Ishida Wayne Song Wen Cheng Ma +Wenlong Zhang Wenzhi Liang Wes Morgan Wewang Xiaorenfine @@ -908,3 +934,4 @@ Zhuo Zhi Átila Camurça Alves Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 +林博仁 Buo-ren Lin diff --git a/tools/vendor/github.com/docker/cli/cli/config/config.go b/tools/vendor/github.com/docker/cli/cli/config/config.go index 5a5184326..cbb34486a 100644 --- a/tools/vendor/github.com/docker/cli/cli/config/config.go +++ b/tools/vendor/github.com/docker/cli/cli/config/config.go @@ -58,7 +58,7 @@ func resetConfigDir() { // getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker // as dependency for consumers that only need to read the config-file. // -// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get func getHomeDir() string { home, _ := os.UserHomeDir() if home == "" && runtime.GOOS != "windows" { @@ -69,6 +69,11 @@ func getHomeDir() string { return home } +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + // Dir returns the directory the configuration file is stored in func Dir() string { initConfigDir.Do(func() { @@ -143,7 +148,7 @@ func load(configDir string) (*configfile.ConfigFile, error) { defer file.Close() err = configFile.LoadFromReader(file) if err != nil { - err = errors.Wrapf(err, "loading config file: %s: ", filename) + err = errors.Wrapf(err, "parsing config file (%s)", filename) } return configFile, err } diff --git a/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go b/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go index ae9dcb337..530c52285 100644 --- a/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/tools/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,12 +3,14 @@ package configfile import ( "encoding/base64" "encoding/json" + "fmt" "io" "os" "path/filepath" "strings" "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/memorystore" "github.com/docker/cli/cli/config/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -36,14 +38,41 @@ type ConfigFile struct { NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Features map[string]string `json:"features,omitempty"` + + // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release. + Experimental string `json:"experimental,omitempty"` +} + +type configEnvAuth struct { + Auth string `json:"auth"` } +type configEnv struct { + AuthConfigs map[string]configEnvAuth `json:"auths"` +} + +// DockerEnvConfigKey is an environment variable that contains a JSON encoded +// credential config. It only supports storing the credentials as a base64 +// encoded string in the format base64("username:pat"). +// +// Adding additional fields will produce a parsing error. +// +// Example: +// +// { +// "auths": { +// "example.test": { +// "auth": base64-encoded-username-pat +// } +// } +// } +const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG" + // ProxyConfig contains proxy configuration settings type ProxyConfig struct { HTTPProxy string `json:"httpProxy,omitempty"` @@ -150,7 +179,8 @@ func (configFile *ConfigFile) Save() (retErr error) { return err } defer func() { - temp.Close() + // ignore error as the file may already be closed when we reach this. + _ = temp.Close() if retErr != nil { if err := os.Remove(temp.Name()); err != nil { logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") @@ -167,10 +197,16 @@ func (configFile *ConfigFile) Save() (retErr error) { return errors.Wrap(err, "error closing temp file") } - // Handle situation where the configfile is a symlink + // Handle situation where the configfile is a symlink, and allow for dangling symlinks cfgFile := configFile.Filename - if f, err := os.Readlink(cfgFile); err == nil { + if f, err := filepath.EvalSymlinks(cfgFile); err == nil { cfgFile = f + } else if os.IsNotExist(err) { + // extract the path from the error if the configfile does not exist or is a dangling symlink + var pathError *os.PathError + if errors.As(err, &pathError) { + cfgFile = pathError.Path + } } // Try copying the current config file (if any) ownership and permissions @@ -254,10 +290,64 @@ func decodeAuth(authStr string) (string, string, error) { // GetCredentialsStore returns a new credentials store from the settings in the // configuration file func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + store := credentials.NewFileStore(configFile) + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - return newNativeStore(configFile, helper) + store = newNativeStore(configFile, helper) + } + + envConfig := os.Getenv(DockerEnvConfigKey) + if envConfig == "" { + return store + } + + authConfig, err := parseEnvConfig(envConfig) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + // use DOCKER_AUTH_CONFIG if set + // it uses the native or file store as a fallback to fetch and store credentials + envStore, err := memorystore.New( + memorystore.WithAuthConfig(authConfig), + memorystore.WithFallbackStore(store), + ) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + return envStore +} + +func parseEnvConfig(v string) (map[string]types.AuthConfig, error) { + envConfig := &configEnv{} + decoder := json.NewDecoder(strings.NewReader(v)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + if decoder.More() { + return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object") + } + + authConfigs := make(map[string]types.AuthConfig) + for addr, envAuth := range envConfig.AuthConfigs { + if envAuth.Auth == "" { + return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr) + } + username, password, err := decodeAuth(envAuth.Auth) + if err != nil { + return nil, err + } + authConfigs[addr] = types.AuthConfig{ + Username: username, + Password: password, + ServerAddress: addr, + } } - return credentials.NewFileStore(configFile) + return authConfigs, nil } // var for unit testing. diff --git a/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index 3b8955994..c69312b01 100644 --- a/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/tools/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -1,9 +1,12 @@ package credentials import ( + "fmt" "net" "net/url" + "os" "strings" + "sync/atomic" "github.com/docker/cli/cli/config/types" ) @@ -25,8 +28,13 @@ func NewFileStore(file store) Store { return &fileStore{file: file} } -// Erase removes the given credentials from the file store. +// Erase removes the given credentials from the file store.This function is +// idempotent and does not update the file if credentials did not change. func (c *fileStore) Erase(serverAddress string) error { + if _, exists := c.file.GetAuthConfigs()[serverAddress]; !exists { + // nothing to do; no credentials found for the given serverAddress + return nil + } delete(c.file.GetAuthConfigs(), serverAddress) return c.file.Save() } @@ -52,19 +60,43 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } -// Store saves the given credentials in the file store. +// unencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +const unencryptedWarning = ` +WARNING! Your credentials are stored unencrypted in '%s'. +Configure a credential helper to remove this warning. See +https://docs.docker.com/go/credential-store/ +` + +// alreadyPrinted ensures that we only print the unencryptedWarning once per +// CLI invocation (no need to warn the user multiple times per command). +var alreadyPrinted atomic.Bool + +// Store saves the given credentials in the file store. This function is +// idempotent and does not update the file if credentials did not change. func (c *fileStore) Store(authConfig types.AuthConfig) error { authConfigs := c.file.GetAuthConfigs() + if oldAuthConfig, ok := authConfigs[authConfig.ServerAddress]; ok && oldAuthConfig == authConfig { + // Credentials didn't change, so skip updating the configuration file. + return nil + } authConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} + if err := c.file.Save(); err != nil { + return err + } -func (c *fileStore) GetFilename() string { - return c.file.GetFilename() -} + if !alreadyPrinted.Load() && authConfig.Password != "" { + // Display a warning if we're storing the users password (not a token). + // + // FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename())) + alreadyPrinted.Store(true) + } -func (c *fileStore) IsFileStore() bool { - return true + return nil } // ConvertToHostname converts a registry url which has http|https prepended diff --git a/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go new file mode 100644 index 000000000..199083464 --- /dev/null +++ b/tools/vendor/github.com/docker/cli/cli/config/memorystore/store.go @@ -0,0 +1,126 @@ +//go:build go1.23 + +package memorystore + +import ( + "errors" + "fmt" + "maps" + "os" + "sync" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +var errValueNotFound = errors.New("value not found") + +func IsErrValueNotFound(err error) bool { + return errors.Is(err, errValueNotFound) +} + +type Config struct { + lock sync.RWMutex + memoryCredentials map[string]types.AuthConfig + fallbackStore credentials.Store +} + +func (e *Config) Erase(serverAddress string) error { + e.lock.Lock() + defer e.lock.Unlock() + delete(e.memoryCredentials, serverAddress) + + if e.fallbackStore != nil { + err := e.fallbackStore.Erase(serverAddress) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } + } + + return nil +} + +func (e *Config) Get(serverAddress string) (types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + authConfig, ok := e.memoryCredentials[serverAddress] + if !ok { + if e.fallbackStore != nil { + return e.fallbackStore.Get(serverAddress) + } + return types.AuthConfig{}, errValueNotFound + } + return authConfig, nil +} + +func (e *Config) GetAll() (map[string]types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + creds := make(map[string]types.AuthConfig) + + if e.fallbackStore != nil { + fileCredentials, err := e.fallbackStore.GetAll() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } else { + creds = fileCredentials + } + } + + maps.Copy(creds, e.memoryCredentials) + return creds, nil +} + +func (e *Config) Store(authConfig types.AuthConfig) error { + e.lock.Lock() + defer e.lock.Unlock() + e.memoryCredentials[authConfig.ServerAddress] = authConfig + + if e.fallbackStore != nil { + return e.fallbackStore.Store(authConfig) + } + return nil +} + +// WithFallbackStore sets a fallback store. +// +// Write operations will be performed on both the memory store and the +// fallback store. +// +// Read operations will first check the memory store, and if the credential +// is not found, it will then check the fallback store. +// +// Retrieving all credentials will return from both the memory store and the +// fallback store, merging the results from both stores into a single map. +// +// Data stored in the memory store will take precedence over data in the +// fallback store. +func WithFallbackStore(store credentials.Store) Options { + return func(s *Config) error { + s.fallbackStore = store + return nil + } +} + +// WithAuthConfig allows to set the initial credentials in the memory store. +func WithAuthConfig(config map[string]types.AuthConfig) Options { + return func(s *Config) error { + s.memoryCredentials = config + return nil + } +} + +type Options func(*Config) error + +// New creates a new in memory credential store +func New(opts ...Options) (credentials.Store, error) { + m := &Config{ + memoryCredentials: make(map[string]types.AuthConfig), + } + for _, opt := range opts { + if err := opt(m); err != nil { + return nil, err + } + } + return m, nil +} diff --git a/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go index 056af6b84..95eb27c86 100644 --- a/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ b/tools/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -7,8 +7,8 @@ type AuthConfig struct { Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. + // + // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` diff --git a/tools/vendor/github.com/docker/distribution/.dockerignore b/tools/vendor/github.com/docker/distribution/.dockerignore deleted file mode 100644 index e660fd93d..000000000 --- a/tools/vendor/github.com/docker/distribution/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -bin/ diff --git a/tools/vendor/github.com/docker/distribution/.golangci.yml b/tools/vendor/github.com/docker/distribution/.golangci.yml deleted file mode 100644 index 61dd0e00e..000000000 --- a/tools/vendor/github.com/docker/distribution/.golangci.yml +++ /dev/null @@ -1,33 +0,0 @@ -linters: - enable: - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -linters-settings: - revive: - rules: - # TODO(thaJeztah): temporarily disabled the "unused-parameter" check. - # It produces many warnings, and some of those may need to be looked at. - - name: unused-parameter - disabled: true - -run: - deadline: 2m - skip-dirs: - - vendor - -issues: - exclude-rules: - # io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch. - - text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16" - linters: - - staticcheck diff --git a/tools/vendor/github.com/docker/distribution/.mailmap b/tools/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d7b832d9e..000000000 --- a/tools/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,54 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita -Yu Wang yuwaMSFT2 -Yu Wang Yu Wang (UC) -Olivier Gambier dmp -Olivier Gambier Olivier -Olivier Gambier Olivier -Elsan Li 李楠 elsanli(李楠) -Rui Cao ruicao -Gwendolynne Barr gbarr01 -Haibing Zhou 周海兵 zhouhaibing089 -Feng Honglin tifayuki -Helen Xie Helen-xie -Mike Brown Mike Brown -Manish Tomar Manish Tomar -Sakeven Jiang sakeven -Milos Gajdos Milos Gajdos -Derek McGowan Derek McGowa -Adrian Plata Adrian Plata <@users.noreply.github.com> -Sebastiaan van Stijn Sebastiaan van Stijn -Vishesh Jindal Vishesh Jindal -Wang Yan Wang Yan -Chris Patterson Chris Patterson -Eohyung Lee Eohyung Lee -João Pereira <484633+joaodrp@users.noreply.github.com> -Smasherr Smasherr -Thomas Berger Thomas Berger -Samuel Karp Samuel Karp -Justin Cormack -sayboras -CrazyMax <1951866+crazy-max@users.noreply.github.com> -Hayley Swimelar -Jose D. Gomez R -Shengjing Zhu -Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> -James Hewitt -Marcus Pettersen Irgens -Ben Manuel diff --git a/tools/vendor/github.com/docker/distribution/BUILDING.md b/tools/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index 4c43b03cb..000000000 --- a/tools/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry --version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `BUILDTAGS`. diff --git a/tools/vendor/github.com/docker/distribution/CONTRIBUTING.md b/tools/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 4c067d9e7..000000000 --- a/tools/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/tools/vendor/github.com/docker/distribution/Dockerfile b/tools/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index ebd42c242..000000000 --- a/tools/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# syntax=docker/dockerfile:1 - -ARG GO_VERSION=1.20.8 -ARG ALPINE_VERSION=3.18 -ARG XX_VERSION=1.2.1 - -FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base -COPY --from=xx / / -RUN apk add --no-cache bash coreutils file git -ENV GO111MODULE=auto -ENV CGO_ENABLED=0 -WORKDIR /go/src/github.com/docker/distribution - -FROM base AS version -ARG PKG="github.com/docker/distribution" -RUN --mount=target=. \ - VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ - echo -n "${VERSION}" | tee /tmp/.version; - -FROM base AS build -ARG TARGETPLATFORM -ARG LDFLAGS="-s -w" -ARG BUILDTAGS="include_oss,include_gcs" -RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=target=/go/pkg/mod,type=cache \ - --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ - set -x ; xx-go build -tags "${BUILDTAGS}" -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ - && xx-verify --static /usr/bin/registry - -FROM scratch AS binary -COPY --from=build /usr/bin/registry / - -FROM base AS releaser -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -WORKDIR /work -RUN --mount=from=binary,target=/build \ - --mount=type=bind,target=/src \ - --mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \ - VERSION=$(cat /tmp/.version) \ - && mkdir -p /out \ - && cp /build/registry /src/README.md /src/LICENSE . \ - && tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \ - && sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256" - -FROM scratch AS artifact -COPY --from=releaser /out / - -FROM alpine:${ALPINE_VERSION} -RUN apk add --no-cache ca-certificates -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=binary /registry /bin/registry -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/tools/vendor/github.com/docker/distribution/MAINTAINERS b/tools/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index 3183620c5..000000000 --- a/tools/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,243 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# - -[Rules] - - [Rules.maintainers] - - title = "What is a maintainer?" - - text = """ -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. -""" - - [Rules.reviewer] - - title = "What is a reviewer?" - - text = """ -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM count towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. -""" - - [Rules.adding-maintainers] - - title = "How are maintainers added?" - - text = """ -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed on the maintainers mailing list. - -After a candidate has been announced on the maintainers mailing list, the -existing maintainers are given five business days to discuss the candidate, -raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing -list. Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The candidate becomes a maintainer once the pull request is -merged. -""" - - [Rules.stepping-down-policy] - - title = "Stepping down policy" - - text = """ -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. -""" - - [Rules.inactive-maintainers] - - title = "Removal of inactive maintainers" - - text = """ -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of -the current maintainers. An e-mail is sent to the -mailing list, inviting maintainers of the project to vote. The voting period is -five business days. Issues related to a maintainer's performance should be -discussed with them among the other maintainers so that they are not surprised -by a pull request removing them. -""" - - [Rules.decisions] - - title = "How are decisions made?" - - text = """ -Short answer: EVERYTHING IS A PULL REQUEST. - -distribution is an open-source project with an open design philosophy. This means -that the repository is the source of truth for EVERY aspect of the project, -including its philosophy, design, road map, and APIs. *If it's part of the -project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. -""" - - [Rules.DCO] - - title = "Helping contributors with the DCO" - - text = """ -The [DCO or `Sign your work`]( -https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some distribution contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. -""" - - [Rules."no direct push"] - - title = "I'm a maintainer. Should I make pull requests too?" - - text = """ -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. -""" - - [Rules.tsc] - - title = "Conflict Resolution and technical disputes" - - text = """ -distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." - """ - - [Rules.meta] - - title = "How is this process changed?" - - text = "Just like everything else: by making a pull request :)" - -# Current project organization -[Org] - - [Org.Maintainers] - people = [ - "dmcgowan", - "dmp42", - "stevvooe", - ] - [Org.Reviewers] - people = [ - "manishtomar", - "caervs", - "davidswu", - "RobbKistler" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.caervs] - Name = "Ryan Abrams" - Email = "rdabrams@gmail.com" - GitHub = "caervs" - - [people.davidswu] - Name = "David Wu" - Email = "dwu7401@gmail.com" - GitHub = "davidswu" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.manishtomar] - Name = "Manish Tomar" - Email = "manish.tomar@docker.com" - GitHub = "manishtomar" - - [people.RobbKistler] - Name = "Robb Kistler" - Email = "robb.kistler@docker.com" - GitHub = "RobbKistler" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/tools/vendor/github.com/docker/distribution/Makefile b/tools/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index dcdbcb547..000000000 --- a/tools/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -# Root directory of the project (absolute path). -ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) -REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) - - -PKG=github.com/docker/distribution - -# Project packages. -PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) -INTEGRATION_PACKAGE=${PKG} -COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) - - -# Project binaries. -COMMANDS=registry digest registry-api-descriptor-template - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -WHALE = "+" - -# Go files -# -TESTFLAGS_RACE= -GOFILES=$(shell find . -type f -name '*.go') -GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' - -BINARIES=$(addprefix bin/,$(COMMANDS)) - -# Flags passed to `go test` -TESTFLAGS ?= -v $(TESTFLAGS_RACE) -TESTFLAGS_PARALLEL ?= 8 - -.PHONY: all build binaries check clean test test-race test-full integration coverage -.DEFAULT: all - -all: binaries - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - @echo "$(WHALE) $@" - ./version/version.sh > $@ - -check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") - @echo "$(WHALE) $@" - @GO111MODULE=off golangci-lint --build-tags "${BUILDTAGS}" run - -test: ## run tests, except integration test with test.short - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-race: ## run tests, except integration test with test.short and race - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-full: ## run tests, except integration tests - @echo "$(WHALE) $@" - @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -integration: ## run integration tests - @echo "$(WHALE) $@" - @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} - -coverage: ## generate coverprofiles from the unit tests - @echo "$(WHALE) $@" - @rm -f coverage.txt - @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null - @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ - go test ${GO_TAGS} ${TESTFLAGS} \ - -cover \ - -coverprofile=profile.out \ - -covermode=atomic $$pkg || exit; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi; \ - done ) - -FORCE: - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - @echo "$(WHALE) $@${BINARY_SUFFIX}" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -build: - @echo "$(WHALE) $@" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) diff --git a/tools/vendor/github.com/docker/distribution/README.md b/tools/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index e513c18e9..000000000 --- a/tools/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository provides the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - -
- - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/tools/vendor/github.com/docker/distribution/ROADMAP.md b/tools/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afe..000000000 --- a/tools/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/tools/vendor/github.com/docker/distribution/blobs.go b/tools/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 671372abf..000000000 --- a/tools/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,265 +0,0 @@ -package distribution - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *v1.Platform `json:"platform,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/tools/vendor/github.com/docker/distribution/doc.go b/tools/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708..000000000 --- a/tools/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/tools/vendor/github.com/docker/distribution/docker-bake.hcl b/tools/vendor/github.com/docker/distribution/docker-bake.hcl deleted file mode 100644 index 91686e608..000000000 --- a/tools/vendor/github.com/docker/distribution/docker-bake.hcl +++ /dev/null @@ -1,56 +0,0 @@ -group "default" { - targets = ["image-local"] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "binary" { - target = "binary" - output = ["./bin"] -} - -target "artifact" { - target = "artifact" - output = ["./bin"] -} - -target "artifact-all" { - inherits = ["artifact"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "image" { - inherits = ["docker-metadata-action"] -} - -target "image-local" { - inherits = ["image"] - output = ["type=docker"] -} - -target "image-all" { - inherits = ["image"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} diff --git a/tools/vendor/github.com/docker/distribution/errors.go b/tools/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 8e0b788d6..000000000 --- a/tools/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,119 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 -// manifest but the registry is configured to reject it -var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/tools/vendor/github.com/docker/distribution/manifests.go b/tools/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 8f84a220a..000000000 --- a/tools/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "context" - "fmt" - "mime" - - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/tools/vendor/github.com/docker/distribution/metrics/prometheus.go b/tools/vendor/github.com/docker/distribution/metrics/prometheus.go deleted file mode 100644 index b5a532144..000000000 --- a/tools/vendor/github.com/docker/distribution/metrics/prometheus.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import "github.com/docker/go-metrics" - -const ( - // NamespacePrefix is the namespace of prometheus metrics - NamespacePrefix = "registry" -) - -var ( - // StorageNamespace is the prometheus namespace of blob/cache related operations - StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) -) diff --git a/tools/vendor/github.com/docker/distribution/registry.go b/tools/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index d0deee65d..000000000 --- a/tools/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,118 +0,0 @@ -package distribution - -import ( - "context" - - "github.com/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// RepositoryRemover removes given repository -type RepositoryRemover interface { - Remove(ctx context.Context, name reference.Named) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// WithManifestMediaTypes lists the media types the client wishes -// the server to provide. -func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { - return WithManifestMediaTypesOption{mediaTypes} -} - -// WithManifestMediaTypesOption holds a list of accepted media types -type WithManifestMediaTypesOption struct{ MediaTypes []string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/tools/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/tools/vendor/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d957..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/auth/session.go b/tools/vendor/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index aad8a0e6f..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,530 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges challenge.Manager - handlers []AuthenticationHandler -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time - - logger Logger -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// Logger defines the injectable logging interface, used on TokenHandlers. -type Logger interface { - Debugf(format string, args ...interface{}) -} - -func logDebugf(logger Logger, format string, args ...interface{}) { - if logger == nil { - return - } - logger.Debugf(format, args...) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope - Logger Logger -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - logger: options.Logger, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - if hasScope(scopes, scope) { - continue - } - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -func hasScope(scopes []string, scope string) bool { - for _, s := range scopes { - if s == scope { - return true - } - } - return false -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/tools/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index dac030c73..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/errors.go b/tools/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index ce9902034..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,160 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(resp *http.Response) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - statusCode := resp.StatusCode - ctHeader := resp.Header.Get("Content-Type") - - if ctHeader == "" { - return makeError(statusCode, string(body)) - } - - contentType, _, err := mime.ParseMediaType(ctHeader) - if err != nil { - return fmt.Errorf("failed parsing content-type: %w", err) - } - - if contentType != "application/json" && contentType != "application/vnd.api+json" { - return makeError(statusCode, string(body)) - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - return makeError(statusCode, detailsErr.Details) - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeError(statusCode int, details string) error { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(details) - case http.StatusForbidden: - return errcode.ErrorCodeDenied.WithMessage(details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(details) - default: - return errcode.ErrorCodeUnknown.WithMessage(details) - } -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - return mergeErrors(err, parseHTTPErrorResponse(resp)) - } - } - err := parseHTTPErrorResponse(resp) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/repository.go b/tools/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index fd42a1e66..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,870 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/distribution/reference" - "github.com/docker/distribution" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - copy(entries, ctlg.Repositories) - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - resp, err := bs.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/tools/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index 9120dbed6..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,249 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == io.SeekStart && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case io.SeekCurrent: - newOffset += offset - case io.SeekEnd: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case io.SeekStart: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/tools/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/tools/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/tools/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/tools/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a390919..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/tools/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/tools/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index ac4c45211..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,129 +0,0 @@ -package cache - -import ( - "context" - - "github.com/docker/distribution" - prometheus "github.com/docker/distribution/metrics" - "github.com/opencontainers/go-digest" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -var ( - // cacheCount is the number of total cache request received/hits/misses - cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") -) - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/tools/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/tools/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index f2953b02c..000000000 --- a/tools/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/distribution/reference" - "github.com/docker/distribution" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/tools/vendor/github.com/docker/distribution/tags.go b/tools/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index f22df2b85..000000000 --- a/tools/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/tools/vendor/github.com/docker/distribution/vendor.conf b/tools/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index 20818428f..000000000 --- a/tools/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,52 +0,0 @@ -github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b -github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 -github.com/distribution/reference 49c28499d219290c3226822e9cfcd4ede6d75379 # v0.5.0 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f -github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 v2.2.1 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest ea51bea511f75cfa3ef6098cc253c5c3609b037a # v1.0.0 -github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go b/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go index 1936234be..93863480b 100644 --- a/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/tools/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -15,27 +15,30 @@ type Program interface { // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, nil) + } } -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + return createProgramCmdRedirectErr(command, args, env) } } -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Environ(), k+"="+v) + ec.Env = append(ec.Environ(), k+"="+v) } } - programCmd.Stderr = os.Stderr - return programCmd + ec.Stderr = os.Stderr + return &Shell{cmd: ec} } // Shell invokes shell commands to talk with a remote credentials-helper. diff --git a/tools/vendor/github.com/docker/docker/AUTHORS b/tools/vendor/github.com/docker/docker/AUTHORS index 5f93eeb4e..c7c649471 100644 --- a/tools/vendor/github.com/docker/docker/AUTHORS +++ b/tools/vendor/github.com/docker/docker/AUTHORS @@ -2,7 +2,10 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +17neverends +7sunarni <710720732@qq.com> Aanand Prasad +Aarni Koskela Aaron Davidson Aaron Feng Aaron Hnatiw @@ -11,6 +14,7 @@ Aaron L. Xu Aaron Lehmann Aaron Welch Aaron Yoshitake +Abdur Rehman Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -24,9 +28,11 @@ Adam Avilla Adam Dobrawy Adam Eijdenberg Adam Kunk +Adam Lamers Adam Miller Adam Mills Adam Pointer +Adam Simon Adam Singer Adam Thornton Adam Walz @@ -119,6 +125,7 @@ amangoel Amen Belayneh Ameya Gawde Amir Goldstein +AmirBuddy Amit Bakshi Amit Krishnan Amit Shukla @@ -168,6 +175,7 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins +Andrés Maldonado Andy Chambers andy diller Andy Goldstein @@ -182,6 +190,7 @@ Anes Hasicic Angel Velazquez Anil Belur Anil Madhavapeddy +Anirudh Aithal Ankit Jain Ankush Agarwal Anonmily @@ -219,7 +228,8 @@ Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge -Austin Vazquez +Ashly Mathew +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -285,6 +295,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brendon Smith Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins @@ -339,12 +350,14 @@ Casey Bisson Catalin Pirvu Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Swenson Chance Zibolski Chander Govindarajan Chanhun Jeong Chao Wang +Charity Kathure Charles Chan Charles Hooper Charles Law @@ -366,6 +379,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chengyu Zhu Chentianze Chenyang Yan chenyuzhu @@ -480,6 +494,7 @@ Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Grunwell +Daniel Guns Daniel Helfand Daniel Hiltgen Daniel J Walsh @@ -763,6 +778,7 @@ Frank Macreery Frank Rosquin Frank Villaro-Dixon Frank Yang +François Scala Fred Lifton Frederick F. Kautz IV Frederico F. de Oliveira @@ -798,6 +814,7 @@ GennadySpb Geoff Levand Geoffrey Bachelet Geon Kim +George Adams George Kontridze George Ma George MacRorie @@ -826,6 +843,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grace Choi Grant Millar Grant Reaber Graydon Hoare @@ -966,6 +984,7 @@ James Nugent James Sanders James Turnbull James Watkins-Harvey +Jameson Hyde Jamie Hannaford Jamshid Afshar Jan Breig @@ -1064,13 +1083,16 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song +jinjiadu Jinsoo Park Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +jjimbo137 <115816493+jjimbo137@users.noreply.github.com> Joakim Roubert +Joan Grau Joao Fernandes Joao Trindade Joe Beda @@ -1155,6 +1177,7 @@ Josiah Kiehl José Tomás Albornoz Joyce Jang JP +JSchltggr Julian Taylor Julien Barbier Julien Bisconti @@ -1189,6 +1212,7 @@ K. Heller Kai Blin Kai Qiang Wu (Kennan) Kaijie Chen +Kaita Nakamura Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1263,6 +1287,7 @@ Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene +Kristian Heljas Kristina Zabunova Krystian Wojcicki Kunal Kushwaha @@ -1289,6 +1314,7 @@ Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux +Laurent Goderre Laurie Voss Leandro Motta Barros Leandro Siqueira @@ -1369,6 +1395,7 @@ Madhan Raj Mookkandy Madhav Puri Madhu Venugopal Mageee +maggie44 <64841595+maggie44@users.noreply.github.com> Mahesh Tiyyagura malnick Malte Janduda @@ -1462,6 +1489,7 @@ Matthias Kühnle Matthias Rampke Matthieu Fronton Matthieu Hauglustaine +Matthieu MOREL Mattias Jernberg Mauricio Garavaglia mauriyouth @@ -1579,6 +1607,7 @@ Muayyad Alsadi Muhammad Zohaib Aslam Mustafa Akın Muthukumar R +Myeongjoon Kim Máximo Cuadros Médi-Rémi Hashim Nace Oroz @@ -1593,6 +1622,7 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Baulch Nathan Carlson Nathan Herald Nathan Hsieh @@ -1655,6 +1685,7 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified +Octol1ttle Odin Ugedal Oguz Bilgic Oh Jinkyun @@ -1689,6 +1720,7 @@ Patrick Hemmer Patrick St. laurent Patrick Stapleton Patrik Cyvoct +Patrik Leifert pattichen Paul "TBBle" Hampson Paul @@ -1763,6 +1795,7 @@ Pierre Carrier Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE +pinglanlu Piotr Bogdan Piotr Karbowski Porjo @@ -1790,6 +1823,7 @@ Quentin Tayssier r0n22 Rachit Sharma Radostin Stoyanov +Rafael Fernández López Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1845,6 +1879,7 @@ Robert Obryk Robert Schneider Robert Shade Robert Stern +Robert Sturla Robert Terhaar Robert Wallis Robert Wang @@ -1856,7 +1891,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho -Rodrigo Campos +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1995,6 +2030,7 @@ Sevki Hasirci Shane Canon Shane da Silva Shaun Kaasten +Shaun Thompson shaunol Shawn Landden Shawn Siefkas @@ -2013,6 +2049,7 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar +Shreenidhi Shedi Shu-Wai Chow shuai-z Shukui Yang @@ -2100,6 +2137,7 @@ Sébastien Stormacq Sören Tempel Tabakhase Tadej Janež +Tadeusz Dudkiewicz Takuto Sato tang0th Tangi Colin @@ -2107,6 +2145,7 @@ Tatsuki Sugiura Tatsushi Inagaki Taylan Isikdemir Taylor Jones +tcpdumppy <847462026@qq.com> Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju @@ -2391,6 +2430,7 @@ You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF Youfu Zhang +YR Chen Yu Changchun Yu Chengxia Yu Peng diff --git a/tools/vendor/github.com/docker/docker/api/types/filters/errors.go b/tools/vendor/github.com/docker/docker/api/types/filters/errors.go deleted file mode 100644 index f52f69440..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/filters/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -package filters - -import "fmt" - -// invalidFilter indicates that the provided filter or its value is invalid -type invalidFilter struct { - Filter string - Value []string -} - -func (e invalidFilter) Error() string { - msg := "invalid filter" - if e.Filter != "" { - msg += " '" + e.Filter - if e.Value != nil { - msg = fmt.Sprintf("%s=%s", msg, e.Value) - } - msg += "'" - } - return msg -} - -// InvalidParameter marks this error as ErrInvalidParameter -func (e invalidFilter) InvalidParameter() {} - -// unreachableCode is an error indicating that the code path was not expected to be reached. -type unreachableCode struct { - Filter string - Value []string -} - -// System marks this error as ErrSystem -func (e unreachableCode) System() {} - -func (e unreachableCode) Error() string { - return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) -} diff --git a/tools/vendor/github.com/docker/docker/api/types/filters/parse.go b/tools/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 0c39ab5f1..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, &invalidFilter{} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testK, testV, hasValue := strings.Cut(value, "=") - - v, ok := sources[testK] - if !ok { - return false - } - if hasValue && testV != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. -// Error is not nil only if the filter values are not valid boolean or are conflicting. -func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] - - if !ok { - return defaultValue, nil - } - - if len(fieldValues) == 0 { - return defaultValue, &invalidFilter{key, nil} - } - - isFalse := fieldValues["0"] || fieldValues["false"] - isTrue := fieldValues["1"] || fieldValues["true"] - - conflicting := isFalse && isTrue - invalid := !isFalse && !isTrue - - if conflicting || invalid { - return defaultValue, &invalidFilter{key, args.Get(key)} - } else if isFalse { - return false, nil - } else if isTrue { - return true, nil - } - - // This code shouldn't be reached. - return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return &invalidFilter{name, nil} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/tools/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/tools/vendor/github.com/docker/docker/api/types/registry/authconfig.go deleted file mode 100644 index 8e383f6e6..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ /dev/null @@ -1,97 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" -import ( - "encoding/base64" - "encoding/json" - "io" - "strings" - - "github.com/pkg/errors" -) - -// AuthHeader is the name of the header used to send encoded registry -// authorization credentials for registry operations (push/pull). -const AuthHeader = "X-Registry-Auth" - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// EncodeAuthConfig serializes the auth configuration as a base64url encoded -// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", errInvalidParameter{err} - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON -// authentication information as sent through the X-Registry-Auth header. -// -// This function always returns an [AuthConfig], even if an error occurs. It is up -// to the caller to decide if authentication is required, and if the error can -// be ignored. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == "" { - return &AuthConfig{}, nil - } - - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - return decodeAuthConfigFromReader(authJSON) -} - -// DecodeAuthConfigBody decodes authentication information as sent as JSON in the -// body of a request. This function is to provide backward compatibility with old -// clients and API versions. Current clients and API versions expect authentication -// to be provided through the X-Registry-Auth header. -// -// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an -// error occurs. It is up to the caller to decide if authentication is required, -// and if the error can be ignored. -func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { - return decodeAuthConfigFromReader(rdr) -} - -func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { - authConfig := &AuthConfig{} - if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { - // always return an (empty) AuthConfig to increase compatibility with - // the existing API. - return &AuthConfig{}, invalid(err) - } - return authConfig, nil -} - -func invalid(err error) error { - return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { return e.error } - -func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/tools/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/tools/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/tools/vendor/github.com/docker/docker/api/types/registry/registry.go b/tools/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 75ee07b15..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,96 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor ocispec.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []ocispec.Platform -} diff --git a/tools/vendor/github.com/docker/docker/api/types/registry/search.go b/tools/vendor/github.com/docker/docker/api/types/registry/search.go deleted file mode 100644 index a0a1eec54..000000000 --- a/tools/vendor/github.com/docker/docker/api/types/registry/search.go +++ /dev/null @@ -1,47 +0,0 @@ -package registry - -import ( - "context" - - "github.com/docker/docker/api/types/filters" -) - -// SearchOptions holds parameters to search images with. -type SearchOptions struct { - RegistryAuth string - - // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can - // supply to retry operations after getting an authorization error. - // - // It must return the registry authentication header value in base64 - // format, or an error if the privilege request fails. - PrivilegeFunc func(context.Context) (string, error) - Filters filters.Args - Limit int -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false". - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/tools/vendor/github.com/docker/docker/api/types/versions/compare.go b/tools/vendor/github.com/docker/docker/api/types/versions/compare.go index 621725a36..1a0325c7e 100644 --- a/tools/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/tools/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions // import "github.com/docker/docker/api/types/versions" +package versions import ( "strconv" diff --git a/tools/vendor/github.com/docker/docker/errdefs/defs.go b/tools/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e9..000000000 --- a/tools/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/tools/vendor/github.com/docker/docker/errdefs/doc.go b/tools/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/tools/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/tools/vendor/github.com/docker/docker/errdefs/helpers.go b/tools/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index 042de4b7b..000000000 --- a/tools/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/tools/vendor/github.com/docker/docker/errdefs/http_helpers.go b/tools/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index ebcd78930..000000000 --- a/tools/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,46 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} diff --git a/tools/vendor/github.com/docker/docker/errdefs/is.go b/tools/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index f94034cbd..000000000 --- a/tools/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,123 +0,0 @@ -package errdefs - -import ( - "context" - "errors" -) - -type causer interface { - Cause() error -} - -type wrapErr interface { - Unwrap() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - case wrapErr: - return getImplementer(e.Unwrap()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index c1cfa62fd..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,187 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. - readBlock bool // check read BytesPipe is Wait() or not -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - defer bp.mu.Unlock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - if bp.readBlock { - bp.wait.Broadcast() - } - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - return 0, bp.closeErr - } - bp.readBlock = true - bp.wait.Wait() - bp.readBlock = false - if bp.bufLen == 0 && bp.closeErr != nil { - return 0, bp.closeErr - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 05da97b0e..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,163 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -// NOTE: umask is not considered for the file's permissions. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename and with the specified permission bits. -// NOTE: umask is not considered for the file's permissions. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index e03d3fee7..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,172 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "io" - "runtime/debug" - "sync/atomic" - - // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered - // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. - _ "crypto/sha256" - _ "crypto/sha512" - - "github.com/containerd/log" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error - closed atomic.Bool -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - if !r.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("ReadCloserWrapper") - return nil - } - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter - closed atomic.Bool -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - if !p.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("cancelReadCloser") - return nil - } - p.closeWithError(io.EOF) - return nil -} - -func subsequentCloseWarn(name string) { - log.G(context.TODO()).Error("subsequent attempt to close " + name) - if log.GetLevel() >= log.DebugLevel { - log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) - } -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d1826..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/tools/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/tools/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 1f50602f2..000000000 --- a/tools/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,74 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync/atomic" -) - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error - closed atomic.Bool -} - -func (r *writeCloserWrapper) Close() error { - if !r.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("WriteCloserWrapper") - return nil - } - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/tools/vendor/github.com/docker/docker/registry/auth.go b/tools/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 905ccf5f5..000000000 --- a/tools/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,201 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// AuthClientID is used the ClientID used for the token server -const AuthClientID = "docker" - -type loginCredentialStore struct { - authConfig *registry.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *registry.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *registry.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *registry.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - var ( - endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - modifiers = Headers(userAgent, nil) - authTransport = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...) - credentialAuthConfig = *authConfig - creds = loginCredentialStore{authConfig: &credentialAuthConfig} - ) - - log.G(context.TODO()).Debugf("attempting v2 login to registry endpoint %s", endpointStr) - - loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - } - - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - return "", "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { - challengeManager, err := PingV2Registry(endpoint, authTransport) - if err != nil { - return nil, err - } - - authHandlers := []auth.AuthenticationHandler{ - auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - }), - auth.NewBasicHandler(creds), - } - - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, authHandlers...)) - - return &http.Client{ - Transport: transport.NewTransport(authTransport, modifiers...), - Timeout: 15 * time.Second, - }, nil -} - -// ConvertToHostname normalizes a registry URL which has http|https prepended -// to just its hostname. It is used to match credentials, which may be either -// stored as hostname or as hostname including scheme (in legacy configuration -// files). -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - return strings.SplitN(stripped, "/", 2)[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]registry.AuthConfig, index *registry.IndexInfo) registry.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registryURL, ac := range authConfigs { - if configKey == ConvertToHostname(registryURL) { - return ac - } - } - - // When all else fails, return an empty auth config - return registry.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types. -// If a response is received but cannot be interpreted, a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, error) { - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return nil, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, PingResponseError{ - Err: err, - } - } - - return challengeManager, nil -} diff --git a/tools/vendor/github.com/docker/docker/registry/config.go b/tools/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 84b0a63ad..000000000 --- a/tools/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,437 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/containerd/log" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig registry.ServiceConfig - -// TODO(thaJeztah) both the "index.docker.io" and "registry-1.docker.io" domains -// are here for historic reasons and backward-compatibility. These domains -// are still supported by Docker Hub (and will continue to be supported), but -// there are new domains already in use, and plans to consolidate all legacy -// domains to new "canonical" domains. Once those domains are decided on, we -// should update these consts (but making sure to preserve compatibility with -// existing installs, clients, and user configuration). -const ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryHost is the hostname for the default (Docker Hub) registry - // used for pushing and pulling images. This hostname is hard-coded to handle - // the conversion from image references without registry name (e.g. "ubuntu", - // or "ubuntu:latest"), as well as references using the "docker.io" domain - // name, which is used as canonical reference for images on Docker Hub, but - // does not match the domain-name of Docker Hub's registry. - DefaultRegistryHost = "registry-1.docker.io" - // IndexHostname is the index hostname, used for authentication and image search. - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" -) - -var ( - // DefaultV2Registry is the URI of the default (Docker Hub) registry. - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: DefaultRegistryHost, - } - - emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) - validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) - - // for mocking in unit tests - lookupIP = net.LookupIP - - // certsDir is used to override defaultCertsDir. - certsDir string -) - -// SetCertsDir allows the default certs directory to be changed. This function -// is used at daemon startup to set the correct location when running in -// rootless mode. -func SetCertsDir(path string) { - certsDir = path -} - -// CertsDir is the directory where certificates are stored. -func CertsDir() string { - if certsDir != "" { - return certsDir - } - return defaultCertsDir -} - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { - config := &serviceConfig{} - if err := config.loadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { - return nil, err - } - if err := config.loadMirrors(options.Mirrors); err != nil { - return nil, err - } - if err := config.loadInsecureRegistries(options.InsecureRegistries); err != nil { - return nil, err - } - - return config, nil -} - -// copy constructs a new ServiceConfig with a copy of the configuration in config. -func (config *serviceConfig) copy() *registry.ServiceConfig { - ic := make(map[string]*registry.IndexInfo) - for key, value := range config.IndexConfigs { - ic[key] = value - } - return ®istry.ServiceConfig{ - AllowNondistributableArtifactsCIDRs: append([]*registry.NetIPNet(nil), config.AllowNondistributableArtifactsCIDRs...), - AllowNondistributableArtifactsHostnames: append([]string(nil), config.AllowNondistributableArtifactsHostnames...), - InsecureRegistryCIDRs: append([]*registry.NetIPNet(nil), config.InsecureRegistryCIDRs...), - IndexConfigs: ic, - Mirrors: append([]string(nil), config.Mirrors...), - } -} - -// loadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. -func (config *serviceConfig) loadAllowNondistributableArtifacts(registries []string) error { - cidrs := map[string]*registry.NetIPNet{} - hostnames := map[string]bool{} - - for _, r := range registries { - if _, err := ValidateIndexName(r); err != nil { - return err - } - if hasScheme(r) { - return invalidParamf("allow-nondistributable-artifacts registry %s should not contain '://'", r) - } - - if _, ipnet, err := net.ParseCIDR(r); err == nil { - // Valid CIDR. - cidrs[ipnet.String()] = (*registry.NetIPNet)(ipnet) - } else if err = validateHostPort(r); err == nil { - // Must be `host:port` if not CIDR. - hostnames[r] = true - } else { - return invalidParamWrapf(err, "allow-nondistributable-artifacts registry %s is not valid", r) - } - } - - config.AllowNondistributableArtifactsCIDRs = make([]*registry.NetIPNet, 0, len(cidrs)) - for _, c := range cidrs { - config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) - } - - config.AllowNondistributableArtifactsHostnames = make([]string, 0, len(hostnames)) - for h := range hostnames { - config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) - } - - return nil -} - -// loadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) loadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs = map[string]*registry.IndexInfo{ - IndexName: { - Name: IndexName, - Mirrors: unique, - Secure: true, - Official: true, - }, - } - - return nil -} - -// loadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) loadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry. This is a - // stop-gap for people who are running a private registry on localhost. - registries = append(registries, "127.0.0.0/8") - - var ( - insecureRegistryCIDRs = make([]*registry.NetIPNet, 0) - indexConfigs = make(map[string]*registry.IndexInfo) - ) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - log.G(context.TODO()).Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - log.G(context.TODO()).Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if hasScheme(r) { - return invalidParamf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registry.NetIPNet)(ipnet) - for _, value := range insecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - insecureRegistryCIDRs = append(insecureRegistryCIDRs, data) - } else { - if err := validateHostPort(r); err != nil { - return invalidParamWrapf(err, "insecure registry %s is not valid", r) - } - // Assume `host:port` if not CIDR. - indexConfigs[r] = ®istry.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - indexConfigs[IndexName] = ®istry.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - config.InsecureRegistryCIDRs = insecureRegistryCIDRs - config.IndexConfigs = indexConfigs - - return nil -} - -// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries -// that allow push of nondistributable artifacts. -// -// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP -// of the registry specified by hostname, true is returned. -// -// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If -// resolution fails, CIDR matching is not performed. -func (config *serviceConfig) allowNondistributableArtifacts(hostname string) bool { - for _, h := range config.AllowNondistributableArtifactsHostnames { - if h == hostname { - return true - } - } - - return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func (config *serviceConfig) isSecureIndex(indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) -} - -// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) -// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be -// resolved to IP addresses for matching. If resolution fails, false is returned. -func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool { - host, _, err := net.SplitHostPort(URLHost) - if err != nil { - // Assume URLHost is of the form `host` without the port and go on. - host = URLHost - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range cidrs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return true - } - } - } - - return false -} - -// ValidateMirror validates an HTTP(S) registry mirror. It is used by the daemon -// to validate the daemon configuration. -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", invalidParamWrapf(err, "invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", invalidParamf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if uri.RawQuery != "" || uri.Fragment != "" { - return "", invalidParamf("invalid mirror: query or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", invalidParamf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. It is used by the daemon to -// validate the daemon configuration. -func ValidateIndexName(val string) (string, error) { - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - val = "docker.io" - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", invalidParamf("invalid index name (%s). Cannot begin or end with a hyphen", val) - } - return val, nil -} - -func hasScheme(reposName string) bool { - return strings.Contains(reposName, "://") -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be splitted, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return invalidParamf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return invalidParamf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registry.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - return ®istry.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Secure: config.isSecureIndex(indexName), - Official: false, - }, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registry.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, reference.Domain(name)) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(reference.FamiliarName(name), '/') - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a -// [RepositoryInfo], but lacks registry configuration. -// -// It is used by the Docker cli to interact with registry-related endpoints. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} diff --git a/tools/vendor/github.com/docker/docker/registry/config_unix.go b/tools/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index 214204930..000000000 --- a/tools/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows - -package registry // import "github.com/docker/docker/registry" - -// defaultCertsDir is the platform-specific default directory where certificates -// are stored. On Linux, it may be overridden through certsDir, for example, when -// running in rootless mode. -const defaultCertsDir = "/etc/docker/certs.d" - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/tools/vendor/github.com/docker/docker/registry/config_windows.go b/tools/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 2674f2818..000000000 --- a/tools/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "os" - "path/filepath" - "strings" -) - -// defaultCertsDir is the platform-specific default directory where certificates -// are stored. On Linux, it may be overridden through certsDir, for example, when -// running in rootless mode. -var defaultCertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.ReplaceAll(s, ":", "")) -} diff --git a/tools/vendor/github.com/docker/docker/registry/errors.go b/tools/vendor/github.com/docker/docker/registry/errors.go deleted file mode 100644 index 7dc20ad8f..000000000 --- a/tools/vendor/github.com/docker/docker/registry/errors.go +++ /dev/null @@ -1,36 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} - -func invalidParam(err error) error { - return errdefs.InvalidParameter(err) -} - -func invalidParamf(format string, args ...interface{}) error { - return errdefs.InvalidParameter(errors.Errorf(format, args...)) -} - -func invalidParamWrapf(err error, format string, args ...interface{}) error { - return errdefs.InvalidParameter(errors.Wrapf(err, format, args...)) -} diff --git a/tools/vendor/github.com/docker/docker/registry/registry.go b/tools/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 7866dcd0d..000000000 --- a/tools/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,136 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/tlsconfig" -) - -// HostCertsDir returns the config directory for a specific host. -func HostCertsDir(hostname string) string { - return filepath.Join(CertsDir(), cleanPath(hostname)) -} - -// newTLSConfig constructs a client TLS configuration based on server defaults -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir() != "" { - hostDir := HostCertsDir(hostname) - log.G(context.TODO()).Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.DirEntry, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := os.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return invalidParam(err) - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return invalidParamWrapf(err, "unable to get system cert pool") - } - tlsConfig.RootCAs = systemPool - } - log.G(context.TODO()).Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := os.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - log.G(context.TODO()).Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - log.G(context.TODO()).Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return invalidParamf("missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// Headers returns request modifiers with a User-Agent and metaHeaders -func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func newTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } -} diff --git a/tools/vendor/github.com/docker/docker/registry/search.go b/tools/vendor/github.com/docker/docker/registry/search.go deleted file mode 100644 index 4ce90f55d..000000000 --- a/tools/vendor/github.com/docker/docker/registry/search.go +++ /dev/null @@ -1,162 +0,0 @@ -package registry - -import ( - "context" - "net/http" - "strconv" - "strings" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, // Deprecated: the "is_automated" field is deprecated and will always be false in the future. - "is-official": true, - "stars": true, -} - -// Search queries the public registry for repositories matching the specified -// search term and filters. -func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error) { - if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { - return nil, err - } - - isAutomated, err := searchFilters.GetBoolOrDefault("is-automated", false) - if err != nil { - return nil, err - } - - // "is-automated" is deprecated and filtering for `true` will yield no results. - if isAutomated { - return []registry.SearchResult{}, nil - } - - isOfficial, err := searchFilters.GetBoolOrDefault("is-official", false) - if err != nil { - return nil, err - } - - hasStarFilter := 0 - if searchFilters.Contains("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { - iHasStar, err := strconv.Atoi(hasStar) - if err != nil { - return nil, errdefs.InvalidParameter(errors.Wrapf(err, "invalid filter 'stars=%s'", hasStar)) - } - if iHasStar > hasStarFilter { - hasStarFilter = iHasStar - } - } - } - - unfilteredResult, err := s.searchUnfiltered(ctx, term, limit, authConfig, headers) - if err != nil { - return nil, err - } - - filteredResults := []registry.SearchResult{} - for _, result := range unfilteredResult.Results { - if searchFilters.Contains("is-official") { - if isOfficial != result.IsOfficial { - continue - } - } - if searchFilters.Contains("stars") { - if result.StarCount < hasStarFilter { - continue - } - } - // "is-automated" is deprecated and the value in Docker Hub search - // results is untrustworthy. Force it to false so as to not mislead our - // clients. - result.IsAutomated = false //nolint:staticcheck // ignore SA1019 (field is deprecated) - filteredResults = append(filteredResults, result) - } - - return filteredResults, nil -} - -func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, headers http.Header) (*registry.SearchResults, error) { - // TODO Use ctx when searching for repositories - if hasScheme(term) { - return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.RLock() - index, err := newIndexInfo(s.config, indexName) - s.mu.RUnlock() - - if err != nil { - return nil, err - } - if index.Official { - // If pull "library/foo", it's stored locally under "foo" - remoteName = strings.TrimPrefix(remoteName, "library/") - } - - endpoint, err := newV1Endpoint(index, headers) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - - // TODO(thaJeztah); is there a reason not to include other headers here? (originally added in 19d48f0b8ba59eea9f2cac4ad1c7977712a6b7ac) - modifiers := Headers(headers.Get("User-Agent"), nil) - v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, []auth.Scope{ - auth.RegistryScope{Name: "catalog", Actions: []string{"search"}}, - }) - if err != nil { - return nil, err - } - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - log.G(ctx).Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } else { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - return newSession(client, endpoint).searchRepositories(remoteName, limit) -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -// -// TODO(thaJeztah) this function is only used by the CLI, and used to get -// information of the registry (to provide credentials if needed). We should -// move this function (or equivalent) to the CLI, as it's doing too much just -// for that. -func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - return newIndexInfo(emptyServiceConfig, indexName) -} diff --git a/tools/vendor/github.com/docker/docker/registry/search_endpoint_v1.go b/tools/vendor/github.com/docker/docker/registry/search_endpoint_v1.go deleted file mode 100644 index f6c369a93..000000000 --- a/tools/vendor/github.com/docker/docker/registry/search_endpoint_v1.go +++ /dev/null @@ -1,200 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "encoding/json" - "net/http" - "net/url" - "strings" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types/registry" -) - -// v1PingResult contains the information returned when pinging a registry. It -// indicates whether the registry claims to be a standalone registry. -type v1PingResult struct { - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// v1Endpoint stores basic information about a V1 registry endpoint. -type v1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// newV1Endpoint parses the given address to return a registry endpoint. -// TODO: remove. This is only used by search. -func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, headers) - if err != nil { - return nil, err - } - - if endpoint.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fall back to http in case of error) - return endpoint, nil - } - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fall back to HTTP. - return nil, invalidParamf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // registry is insecure and HTTPS failed, fallback to HTTP. - log.G(context.TODO()).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) - endpoint.URL.Scheme = "http" - if _, err2 := endpoint.ping(); err2 != nil { - return nil, invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - } - - return endpoint, nil -} - -// trimV1Address trims the "v1" version suffix off the address and returns -// the trimmed address. It returns an error on "v2" endpoints. -func trimV1Address(address string) (string, error) { - trimmed := strings.TrimSuffix(address, "/") - if strings.HasSuffix(trimmed, "/v2") { - return "", invalidParamf("search is not supported on v2 endpoints: %s", address) - } - return strings.TrimSuffix(trimmed, "/v1"), nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, headers http.Header) (*v1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, invalidParam(err) - } - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := newTransport(tlsConfig) - - return &v1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: uri, - client: httpClient(transport.NewTransport(tr, Headers("", headers)...)), - }, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *v1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// ping returns a v1PingResult which indicates whether the registry is standalone or not. -func (e *v1Endpoint) ping() (v1PingResult, error) { - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return v1PingResult{}, nil - } - - pingURL := e.String() + "_ping" - log.G(context.TODO()).WithField("url", pingURL).Debug("attempting v1 ping for registry endpoint") - req, err := http.NewRequest(http.MethodGet, pingURL, nil) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - resp, err := e.client.Do(req) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - defer resp.Body.Close() - - if v := resp.Header.Get("X-Docker-Registry-Standalone"); v != "" { - info := v1PingResult{} - // Accepted values are "1", and "true" (case-insensitive). - if v == "1" || strings.EqualFold(v, "true") { - info.Standalone = true - } - log.G(context.TODO()).Debugf("v1PingResult.Standalone (from X-Docker-Registry-Standalone header): %t", info.Standalone) - return info, nil - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := v1PingResult{ - Standalone: true, - } - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - log.G(context.TODO()).WithError(err).Debug("error unmarshaling _ping response") - // don't stop here. Just assume sane defaults - } - - log.G(context.TODO()).Debugf("v1PingResult.Standalone: %t", info.Standalone) - return info, nil -} - -// httpClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func httpClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if len(via) != 0 && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} diff --git a/tools/vendor/github.com/docker/docker/registry/search_session.go b/tools/vendor/github.com/docker/docker/registry/search_session.go deleted file mode 100644 index c334143c6..000000000 --- a/tools/vendor/github.com/docker/docker/registry/search_session.go +++ /dev/null @@ -1,218 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - // this is required for some certificates - "context" - _ "crypto/sha512" - "encoding/json" - "fmt" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "sync" - - "github.com/containerd/log" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/pkg/errors" -) - -// A session is used to communicate with a V1 registry -type session struct { - indexEndpoint *v1Endpoint - client *http.Client -} - -type authTransport struct { - http.RoundTripper - *registry.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// newAuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, alwaysSetBasicAuth bool) *authTransport { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpoint *v1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - log.G(context.TODO()).Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = newAuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errdefs.System(errors.New("cookiejar.New is not supposed to return an error")) - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, endpoint *v1Endpoint) *session { - return &session{ - client: client, - indexEndpoint: endpoint, - } -} - -// defaultSearchLimit is the default value for maximum number of returned search results. -const defaultSearchLimit = 25 - -// searchRepositories performs a search against the remote repository -func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) { - if limit == 0 { - limit = defaultSearchLimit - } - if limit < 1 || limit > 100 { - return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) - } - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - log.G(context.TODO()).WithField("url", u).Debug("searchRepositories") - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, invalidParamWrapf(err, "error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // TODO(thaJeztah): return upstream response body for errors (see https://github.com/moby/moby/issues/27286). - return nil, errdefs.Unknown(fmt.Errorf("Unexpected status code %d", res.StatusCode)) - } - result := ®istry.SearchResults{} - err = json.NewDecoder(res.Body).Decode(result) - if err != nil { - return nil, errdefs.System(errors.Wrap(err, "error decoding registry search results")) - } - return result, nil -} diff --git a/tools/vendor/github.com/docker/docker/registry/service.go b/tools/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index 6881c1105..000000000 --- a/tools/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,145 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net/url" - "strings" - "sync" - - "github.com/containerd/log" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// Service is a registry service. It tracks configuration data such as a list -// of mirrors. -type Service struct { - config *serviceConfig - mu sync.RWMutex -} - -// NewService returns a new instance of [Service] ready to be installed into -// an engine. -func NewService(options ServiceOptions) (*Service, error) { - config, err := newServiceConfig(options) - - return &Service{config: config}, err -} - -// ServiceConfig returns a copy of the public registry service's configuration. -func (s *Service) ServiceConfig() *registry.ServiceConfig { - s.mu.RLock() - defer s.mu.RUnlock() - return s.config.copy() -} - -// ReplaceConfig prepares a transaction which will atomically replace the -// registry service's configuration when the returned commit function is called. -func (s *Service) ReplaceConfig(options ServiceOptions) (commit func(), err error) { - config, err := newServiceConfig(options) - if err != nil { - return nil, err - } - return func() { - s.mu.Lock() - defer s.mu.Unlock() - s.config = config - }, nil -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - registryHostName := IndexHostname - - if authConfig.ServerAddress != "" { - serverAddress := authConfig.ServerAddress - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", invalidParamWrapf(err, "unable to parse server address") - } - registryHostName = u.Host - } - - // Lookup endpoints for authentication using "LookupPushEndpoints", which - // excludes mirrors to prevent sending credentials of the upstream registry - // to a mirror. - endpoints, err := s.LookupPushEndpoints(registryHostName) - if err != nil { - return "", "", invalidParam(err) - } - - for _, endpoint := range endpoints { - status, token, err = loginV2(authConfig, endpoint, userAgent) - if err == nil { - return - } - if errdefs.IsUnauthorized(err) { - // Failed to authenticate; don't continue with (non-TLS) endpoints. - return status, token, err - } - log.G(ctx).WithError(err).Infof("Error logging in to endpoint, trying next endpoint") - } - - return "", "", err -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.RLock() - defer s.mu.RUnlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion // Deprecated: v1 registries are deprecated, and endpoints are always v2. - AllowNondistributableArtifacts bool - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. -// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. -func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.lookupV2Endpoints(hostname) -} - -// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. -// It gives preference to HTTPS over plain HTTP. Mirrors are not included. -func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - allEndpoints, err := s.lookupV2Endpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -// IsInsecureRegistry returns true if the registry at given host is configured as -// insecure registry. -func (s *Service) IsInsecureRegistry(host string) bool { - s.mu.RLock() - defer s.mu.RUnlock() - return !s.config.isSecureIndex(host) -} diff --git a/tools/vendor/github.com/docker/docker/registry/service_v2.go b/tools/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 5d09e11c9..000000000 --- a/tools/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,80 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - ana := s.config.allowNondistributableArtifacts(hostname) - - if hostname == DefaultNamespace || hostname == IndexHostname { - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, invalidParam(err) - } - mirrorTLSConfig, err := newTLSConfig(mirrorURL.Host, s.config.isSecureIndex(mirrorURL.Host)) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. - Official: true, - TrimHostname: true, - TLSConfig: tlsconfig.ServerDefault(), - - AllowNondistributableArtifacts: ana, - }) - - return endpoints, nil - } - - tlsConfig, err := newTLSConfig(hostname, s.config.isSecureIndex(hostname)) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. - AllowNondistributableArtifacts: ana, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. - AllowNondistributableArtifacts: ana, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/tools/vendor/github.com/docker/docker/registry/types.go b/tools/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 54aa0bd19..000000000 --- a/tools/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,41 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" -) - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -// -// Deprecated: v1 registries are deprecated, and endpoints are always v2. -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - APIVersion1 APIVersion = 1 // Deprecated: v1 registries are deprecated, and endpoints are always v2. - APIVersion2 APIVersion = 2 // Deprecated: v1 registries are deprecated, and endpoints are always v2. -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registry.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/tools/vendor/github.com/docker/go-connections/tlsconfig/config.go b/tools/vendor/github.com/docker/go-connections/tlsconfig/config.go index 606c98a38..8b0264f68 100644 --- a/tools/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/tools/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -34,51 +34,37 @@ type Options struct { // the system pool will be used. ExclusiveRootPools bool MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted. - // - // Deprecated: Use of encrypted TLS private keys has been deprecated, and - // will be removed in a future release. Golang has deprecated support for - // legacy PEM encryption (as specified in RFC 1423), as it is insecure by - // design (see https://go-review.googlesource.com/c/go/+/264159). - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) +var DefaultServerAcceptedCiphers = defaultCipherSuites + +// defaultCipherSuites is shared by both client and server as the default set. +var defaultCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsConfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsConfig) - } - - return tlsConfig + return defaultConfig(ops...) } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + return defaultConfig(ops...) +} + +// defaultConfig is the default config used by both client and server TLS configuration. +func defaultConfig(ops ...func(*tls.Config)) *tls.Config { tlsConfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum + // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, + CipherSuites: defaultCipherSuites, } for _, op := range ops { @@ -92,13 +78,13 @@ func ClientDefault(ops ...func(*tls.Config)) *tls.Config { func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca var ( - certPool *x509.CertPool - err error + pool *x509.CertPool + err error ) if exclusivePool { - certPool = x509.NewCertPool() + pool = x509.NewCertPool() } else { - certPool, err = SystemCertPool() + pool, err = SystemCertPool() if err != nil { return nil, fmt.Errorf("failed to read system certificates: %v", err) } @@ -107,10 +93,10 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pemData) { + if !pool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } - return certPool, nil + return pool, nil } // allTLSVersions lists all the TLS versions and is used by the code that validates @@ -144,34 +130,32 @@ func adjustMinVersion(options Options, config *tls.Config) error { return nil } -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key. +// errEncryptedKeyDeprecated is produced when we encounter an encrypted +// (password-protected) key. From https://go-review.googlesource.com/c/go/+/264159; // -// Deprecated: Use of encrypted TLS private keys has been deprecated, and -// will be removed in a future release. Golang has deprecated support for -// legacy PEM encryption (as specified in RFC 1423), as it is insecure by -// design (see https://go-review.googlesource.com/c/go/+/264159). -func IsErrEncryptedKey(err error) bool { - return errors.Is(err, x509.IncorrectPasswordError) -} +// > Legacy PEM encryption as specified in RFC 1423 is insecure by design. Since +// > it does not authenticate the ciphertext, it is vulnerable to padding oracle +// > attacks that can let an attacker recover the plaintext +// > +// > It's unfortunate that we don't implement PKCS#8 encryption so we can't +// > recommend an alternative but PEM encryption is so broken that it's worth +// > deprecating outright. +// +// Also see https://docs.docker.com/go/deprecated/ +var errEncryptedKeyDeprecated = errors.New("private key is encrypted; encrypted private keys are obsolete, and not supported") // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { +// It returns an error if the file could not be decoded or was protected by +// a passphrase. +func getPrivateKey(keyBytes []byte) ([]byte, error) { // this section makes some small changes to code from notary/tuf/utils/x509.go pemBlock, _ := pem.Decode(keyBytes) if pemBlock == nil { return nil, fmt.Errorf("no valid private key found") } - var err error if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) - if err != nil { - return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + return nil, errEncryptedKeyDeprecated } return keyBytes, nil @@ -195,7 +179,7 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, err } - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + prKeyBytes, err = getPrivateKey(prKeyBytes) if err != nil { return nil, err } @@ -210,7 +194,7 @@ func getCert(options Options) ([]tls.Certificate, error) { // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() + tlsConfig := defaultConfig() tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) @@ -235,7 +219,7 @@ func Client(options Options) (*tls.Config, error) { // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() + tlsConfig := defaultConfig() tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { diff --git a/tools/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/tools/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index a82f9fa52..000000000 --- a/tools/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/tools/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/tools/vendor/github.com/docker/go-metrics/CONTRIBUTING.md deleted file mode 100644 index b8a512c36..000000000 --- a/tools/vendor/github.com/docker/go-metrics/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/tools/vendor/github.com/docker/go-metrics/LICENSE.docs b/tools/vendor/github.com/docker/go-metrics/LICENSE.docs deleted file mode 100644 index e26cd4fc8..000000000 --- a/tools/vendor/github.com/docker/go-metrics/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/tools/vendor/github.com/docker/go-metrics/NOTICE b/tools/vendor/github.com/docker/go-metrics/NOTICE deleted file mode 100644 index 8915f0277..000000000 --- a/tools/vendor/github.com/docker/go-metrics/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/tools/vendor/github.com/docker/go-metrics/README.md b/tools/vendor/github.com/docker/go-metrics/README.md deleted file mode 100644 index a9e947cb5..000000000 --- a/tools/vendor/github.com/docker/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) - -This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -## Best Practices - -This packages is meant to be used for collecting metrics in Docker projects. -It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. -If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). - -The following are a few Docker specific rules that will help you name and work with metrics in your project. - -1. Namespace and Subsystem - -This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. - -```go -ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, -}) -``` - -In the example above we are creating metrics for the Docker engine's daemon package. -`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. - -A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. - -2. Declaring your Metrics - -Try to keep all your metric declarations in one file. -This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. - -3. Use labels instead of multiple metrics - -Labels allow you to define one metric such as the time it takes to perform a certain action on an object. -If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. - - -```go -containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") -``` - -The last parameter is the label name or key. -When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. - -```go -containerActions.WithValues("create").UpdateSince(start) -``` - -4. Always use a unit - -The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. -For a timer, the standard unit is seconds and a counter's standard unit is a total. -For gauges you must provide the unit. -This package provides a standard set of units for use within the Docker projects. - -```go -Nanoseconds Unit = "nanoseconds" -Seconds Unit = "seconds" -Bytes Unit = "bytes" -Total Unit = "total" -``` - -If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. - -## Docs - -Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). - -## HTTP Metrics - -To instrument a http handler, you can wrap the code like this: - -```go -namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) -httpMetrics := namespace.NewDefaultHttpMetrics() -metrics.Register(namespace) -instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) -``` -Note: The `handler` label must be provided when a new namespace is created. - -## Additional Metrics - -Additional metrics are also defined here that are not available in the prometheus client. -If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. - - -## Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/tools/vendor/github.com/docker/go-metrics/counter.go b/tools/vendor/github.com/docker/go-metrics/counter.go deleted file mode 100644 index fe36316a4..000000000 --- a/tools/vendor/github.com/docker/go-metrics/counter.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Counter is a metrics that can only increment its current count -type Counter interface { - // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. - // - // If len(vs) == 0, increments the counter by 1. - Inc(vs ...float64) -} - -// LabeledCounter is counter that must have labels populated before use. -type LabeledCounter interface { - WithValues(vs ...string) Counter -} - -type labeledCounter struct { - pc *prometheus.CounterVec -} - -func (lc *labeledCounter) WithValues(vs ...string) Counter { - return &counter{pc: lc.pc.WithLabelValues(vs...)} -} - -func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { - lc.pc.Describe(ch) -} - -func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { - lc.pc.Collect(ch) -} - -type counter struct { - pc prometheus.Counter -} - -func (c *counter) Inc(vs ...float64) { - if len(vs) == 0 { - c.pc.Inc() - } - - c.pc.Add(sumFloat64(vs...)) -} - -func (c *counter) Describe(ch chan<- *prometheus.Desc) { - c.pc.Describe(ch) -} - -func (c *counter) Collect(ch chan<- prometheus.Metric) { - c.pc.Collect(ch) -} diff --git a/tools/vendor/github.com/docker/go-metrics/docs.go b/tools/vendor/github.com/docker/go-metrics/docs.go deleted file mode 100644 index 8fbdfc697..000000000 --- a/tools/vendor/github.com/docker/go-metrics/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -package metrics diff --git a/tools/vendor/github.com/docker/go-metrics/gauge.go b/tools/vendor/github.com/docker/go-metrics/gauge.go deleted file mode 100644 index 74296e877..000000000 --- a/tools/vendor/github.com/docker/go-metrics/gauge.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Gauge is a metric that allows incrementing and decrementing a value -type Gauge interface { - Inc(...float64) - Dec(...float64) - - // Add adds the provided value to the gauge's current value - Add(float64) - - // Set replaces the gauge's current value with the provided value - Set(float64) -} - -// LabeledGauge describes a gauge the must have values populated before use. -type LabeledGauge interface { - WithValues(labels ...string) Gauge -} - -type labeledGauge struct { - pg *prometheus.GaugeVec -} - -func (lg *labeledGauge) WithValues(labels ...string) Gauge { - return &gauge{pg: lg.pg.WithLabelValues(labels...)} -} - -func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { - lg.pg.Describe(c) -} - -func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { - lg.pg.Collect(c) -} - -type gauge struct { - pg prometheus.Gauge -} - -func (g *gauge) Inc(vs ...float64) { - if len(vs) == 0 { - g.pg.Inc() - } - - g.Add(sumFloat64(vs...)) -} - -func (g *gauge) Dec(vs ...float64) { - if len(vs) == 0 { - g.pg.Dec() - } - - g.Add(-sumFloat64(vs...)) -} - -func (g *gauge) Add(v float64) { - g.pg.Add(v) -} - -func (g *gauge) Set(v float64) { - g.pg.Set(v) -} - -func (g *gauge) Describe(c chan<- *prometheus.Desc) { - g.pg.Describe(c) -} - -func (g *gauge) Collect(c chan<- prometheus.Metric) { - g.pg.Collect(c) -} diff --git a/tools/vendor/github.com/docker/go-metrics/handler.go b/tools/vendor/github.com/docker/go-metrics/handler.go deleted file mode 100644 index 05601e9ec..000000000 --- a/tools/vendor/github.com/docker/go-metrics/handler.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// HTTPHandlerOpts describes a set of configurable options of http metrics -type HTTPHandlerOpts struct { - DurationBuckets []float64 - RequestSizeBuckets []float64 - ResponseSizeBuckets []float64 -} - -const ( - InstrumentHandlerResponseSize = iota - InstrumentHandlerRequestSize - InstrumentHandlerDuration - InstrumentHandlerCounter - InstrumentHandlerInFlight -) - -type HTTPMetric struct { - prometheus.Collector - handlerType int -} - -var ( - defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} - defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G - defaultResponseSizeBuckets = defaultRequestSizeBuckets -) - -// Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests. This handler is no longer instrumented. -func Handler() http.Handler { - return promhttp.Handler() -} - -func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(metrics, handler.ServeHTTP) -} - -func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { - var handler http.Handler - handler = http.HandlerFunc(handlerFunc) - for _, metric := range metrics { - switch metric.handlerType { - case InstrumentHandlerResponseSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerResponseSize(collector, handler) - } - case InstrumentHandlerRequestSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerRequestSize(collector, handler) - } - case InstrumentHandlerDuration: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerDuration(collector, handler) - } - case InstrumentHandlerCounter: - if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { - handler = promhttp.InstrumentHandlerCounter(collector, handler) - } - case InstrumentHandlerInFlight: - if collector, ok := metric.Collector.(prometheus.Gauge); ok { - handler = promhttp.InstrumentHandlerInFlight(collector, handler) - } - } - } - return handler.ServeHTTP -} diff --git a/tools/vendor/github.com/docker/go-metrics/helpers.go b/tools/vendor/github.com/docker/go-metrics/helpers.go deleted file mode 100644 index 68b7f51b3..000000000 --- a/tools/vendor/github.com/docker/go-metrics/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package metrics - -func sumFloat64(vs ...float64) float64 { - var sum float64 - for _, v := range vs { - sum += v - } - - return sum -} diff --git a/tools/vendor/github.com/docker/go-metrics/namespace.go b/tools/vendor/github.com/docker/go-metrics/namespace.go deleted file mode 100644 index 798315451..000000000 --- a/tools/vendor/github.com/docker/go-metrics/namespace.go +++ /dev/null @@ -1,315 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -type Labels map[string]string - -// NewNamespace returns a namespaces that is responsible for managing a collection of -// metrics for a particual namespace and subsystem -// -// labels allows const labels to be added to all metrics created in this namespace -// and are commonly used for data like application version and git commit -func NewNamespace(name, subsystem string, labels Labels) *Namespace { - if labels == nil { - labels = make(map[string]string) - } - return &Namespace{ - name: name, - subsystem: subsystem, - labels: labels, - } -} - -// Namespace describes a set of metrics that share a namespace and subsystem. -type Namespace struct { - name string - subsystem string - labels Labels - mu sync.Mutex - metrics []prometheus.Collector -} - -// WithConstLabels returns a namespace with the provided set of labels merged -// with the existing constant labels on the namespace. -// -// Only metrics created with the returned namespace will get the new constant -// labels. The returned namespace must be registered separately. -func (n *Namespace) WithConstLabels(labels Labels) *Namespace { - n.mu.Lock() - ns := &Namespace{ - name: n.name, - subsystem: n.subsystem, - labels: mergeLabels(n.labels, labels), - } - n.mu.Unlock() - return ns -} - -func (n *Namespace) NewCounter(name, help string) Counter { - c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} - n.Add(c) - return c -} - -func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { - c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} - n.Add(c) - return c -} - -func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Total), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewTimer(name, help string) Timer { - t := &timer{ - m: prometheus.NewHistogram(n.newTimerOpts(name, help)), - } - n.Add(t) - return t -} - -func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { - t := &labeledTimer{ - m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), - } - n.Add(t) - return t -} - -func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { - return prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Seconds), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { - g := &gauge{ - pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), - } - n.Add(g) - return g -} - -func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { - g := &labeledGauge{ - pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), - } - n.Add(g) - return g -} - -func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, unit), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Describe(ch) - } -} - -func (n *Namespace) Collect(ch chan<- prometheus.Metric) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Collect(ch) - } -} - -func (n *Namespace) Add(collector prometheus.Collector) { - n.mu.Lock() - n.metrics = append(n.metrics, collector) - n.mu.Unlock() -} - -func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - name = makeName(name, unit) - namespace := n.name - if n.subsystem != "" { - namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) - } - name = fmt.Sprintf("%s_%s", namespace, name) - return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) -} - -// mergeLabels merges two or more labels objects into a single map, favoring -// the later labels. -func mergeLabels(lbs ...Labels) Labels { - merged := make(Labels) - - for _, target := range lbs { - for k, v := range target { - merged[k] = v - } - } - - return merged -} - -func makeName(name string, unit Unit) string { - if unit == "" { - return name - } - - return fmt.Sprintf("%s_%s", name, unit) -} - -func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: defaultDurationBuckets, - RequestSizeBuckets: defaultResponseSizeBuckets, - ResponseSizeBuckets: defaultResponseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: durationBuckets, - RequestSizeBuckets: requestSizeBuckets, - ResponseSizeBuckets: responseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { - var httpMetrics []*HTTPMetric - inFlightMetric := n.NewInFlightGaugeMetric(handlerName) - requestTotalMetric := n.NewRequestTotalMetric(handlerName) - requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) - requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) - responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) - httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) - return httpMetrics -} - -func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "in_flight_requests", - Help: "The in-flight HTTP requests", - ConstLabels: prometheus.Labels(labels), - }) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerInFlight, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: prometheus.Labels(labels), - }, - []string{"code", "method"}, - ) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerCounter, - } - n.Add(httpMetric) - return httpMetric -} -func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("DurationBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_duration_seconds", - Help: "The HTTP request latencies in seconds.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{"method"}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerDuration, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("RequestSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_size_bytes", - Help: "The HTTP request sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerRequestSize, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("ResponseSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "response_size_bytes", - Help: "The HTTP response sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metrics := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metrics, - handlerType: InstrumentHandlerResponseSize, - } - n.Add(httpMetric) - return httpMetric -} diff --git a/tools/vendor/github.com/docker/go-metrics/register.go b/tools/vendor/github.com/docker/go-metrics/register.go deleted file mode 100644 index 708358df0..000000000 --- a/tools/vendor/github.com/docker/go-metrics/register.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Register adds all the metrics in the provided namespace to the global -// metrics registry -func Register(n *Namespace) { - prometheus.MustRegister(n) -} - -// Deregister removes all the metrics in the provided namespace from the -// global metrics registry -func Deregister(n *Namespace) { - prometheus.Unregister(n) -} diff --git a/tools/vendor/github.com/docker/go-metrics/timer.go b/tools/vendor/github.com/docker/go-metrics/timer.go deleted file mode 100644 index 824c98739..000000000 --- a/tools/vendor/github.com/docker/go-metrics/timer.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// StartTimer begins a timer observation at the callsite. When the target -// operation is completed, the caller should call the return done func(). -func StartTimer(timer Timer) (done func()) { - start := time.Now() - return func() { - timer.Update(time.Since(start)) - } -} - -// Timer is a metric that allows collecting the duration of an action in seconds -type Timer interface { - // Update records an observation, duration, and converts to the target - // units. - Update(duration time.Duration) - - // UpdateSince will add the duration from the provided starting time to the - // timer's summary with the precisions that was used in creation of the timer - UpdateSince(time.Time) -} - -// LabeledTimer is a timer that must have label values populated before use. -type LabeledTimer interface { - WithValues(labels ...string) *labeledTimerObserver -} - -type labeledTimer struct { - m *prometheus.HistogramVec -} - -type labeledTimerObserver struct { - m prometheus.Observer -} - -func (lbo *labeledTimerObserver) Update(duration time.Duration) { - lbo.m.Observe(duration.Seconds()) -} - -func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { - lbo.m.Observe(time.Since(since).Seconds()) -} - -func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { - return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} -} - -func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { - lt.m.Describe(c) -} - -func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { - lt.m.Collect(c) -} - -type timer struct { - m prometheus.Observer -} - -func (t *timer) Update(duration time.Duration) { - t.m.Observe(duration.Seconds()) -} - -func (t *timer) UpdateSince(since time.Time) { - t.m.Observe(time.Since(since).Seconds()) -} - -func (t *timer) Describe(c chan<- *prometheus.Desc) { - c <- t.m.(prometheus.Metric).Desc() -} - -func (t *timer) Collect(c chan<- prometheus.Metric) { - // Are there any observers that don't implement Collector? It is really - // unclear what the point of the upstream change was, but we'll let this - // panic if we get an observer that doesn't implement collector. In this - // case, we should almost always see metricVec objects, so this should - // never panic. - t.m.(prometheus.Collector).Collect(c) -} diff --git a/tools/vendor/github.com/docker/go-metrics/unit.go b/tools/vendor/github.com/docker/go-metrics/unit.go deleted file mode 100644 index c96622f90..000000000 --- a/tools/vendor/github.com/docker/go-metrics/unit.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -// Unit represents the type or precision of a metric that is appended to -// the metrics fully qualified name -type Unit string - -const ( - Nanoseconds Unit = "nanoseconds" - Seconds Unit = "seconds" - Bytes Unit = "bytes" - Total Unit = "total" -) diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/tools/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca..6f24dfff5 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/tools/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,26 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) + +## [v3.12.1] - 2024-05-28 + +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/README.md b/tools/vendor/github.com/emicklei/go-restful/v3/README.md index 95a05a089..3fb40d198 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/tools/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,9 +2,8 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/compress.go b/tools/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99..80adf55fd 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/tools/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/curly.go b/tools/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5f..6fd2bcd5a 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/tools/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/tools/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e9..7f04bd905 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/tools/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && length == "" { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/tools/vendor/github.com/emicklei/go-restful/v3/route.go b/tools/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be7..a2056e2ac 100644 --- a/tools/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/tools/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/tools/vendor/github.com/evanphx/json-patch/README.md b/tools/vendor/github.com/evanphx/json-patch/README.md index 97e319b21..86fefd5bf 100644 --- a/tools/vendor/github.com/evanphx/json-patch/README.md +++ b/tools/vendor/github.com/evanphx/json-patch/README.md @@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie go get -u github.com/evanphx/json-patch/v5 ``` -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) diff --git a/tools/vendor/github.com/evanphx/json-patch/patch.go b/tools/vendor/github.com/evanphx/json-patch/patch.go index cd0274e1e..95136681b 100644 --- a/tools/vendor/github.com/evanphx/json-patch/patch.go +++ b/tools/vendor/github.com/evanphx/json-patch/patch.go @@ -3,11 +3,10 @@ package jsonpatch import ( "bytes" "encoding/json" + "errors" "fmt" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) } // From reads the "from" field of the Operation. @@ -294,7 +293,7 @@ func (o Operation) From() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) } func (o Operation) value() *lazyNode { @@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) { return v, nil } - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) } func isArray(buf []byte) bool { @@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing) } delete(*d, key) @@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error { if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } @@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) } sz := len(*d) + 1 @@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(ary) } @@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) { if idx < 0 { if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } return (*d)[idx], nil @@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(cur) } @@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error { func (p Patch) add(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.add(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) + return fmt.Errorf("error in add for path: '%s': %w", path, err) } return nil @@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error { func (p Patch) remove(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error { func (p Patch) replace(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") + return fmt.Errorf("replace operation failed to decode path: %w", err) } if path == "" { @@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error { if val.which == eRaw { if !val.tryDoc() { if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") + return fmt.Errorf("replace operation value must be object or array: %w", err) } } } @@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error { case eDoc: *doc = &val.doc case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") + return fmt.Errorf("replace operation hit impossible case: %w", err) } return nil @@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error { con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) } _, ok := con.get(key) if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) } err = con.set(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error { func (p Patch) move(doc *container, op Operation) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") + return fmt.Errorf("move operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") + return fmt.Errorf("move operation failed to decode path: %w", err) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } err = con.add(key, val) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) + return fmt.Errorf("error in move for path: '%s': %w", path, err) } return nil @@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error { func (p Patch) test(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") + return fmt.Errorf("test operation failed to decode path: %w", err) } if path == "" { @@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) + return fmt.Errorf("error in test for path: '%s': %w", path, err) } if val == nil { if op.value() == nil || op.value().raw == nil { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } if val.equal(op.value()) { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") + return fmt.Errorf("copy operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) + return fmt.Errorf("error in copy for from: '%s': %w", from, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } valCopy, sz, err := deepCopy(val) if err != nil { - return errors.Wrapf(err, "error while performing deep copy") + return fmt.Errorf("error while performing deep copy: %w", err) } (*accumulatedCopySize) += int64(sz) @@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er err = con.add(key, valCopy) if err != nil { - return errors.Wrapf(err, "error while adding value during copy") + return fmt.Errorf("error while adding value during copy: %w", err) } return nil diff --git a/tools/vendor/github.com/evanphx/json-patch/v5/merge.go b/tools/vendor/github.com/evanphx/json-patch/v5/merge.go index f79caf313..d60afadcf 100644 --- a/tools/vendor/github.com/evanphx/json-patch/v5/merge.go +++ b/tools/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -103,8 +103,8 @@ func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray { return ary } -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") // MergeMergePatches merges two merge patches together, such that @@ -121,11 +121,11 @@ func MergePatch(docData, patchData []byte) ([]byte, error) { func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { if !json.Valid(docData) { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } if !json.Valid(patchData) { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } options := NewApplyOptions() @@ -143,7 +143,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { patchErr := patch.UnmarshalJSON(patchData) if isSyntaxError(docErr) { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } if isSyntaxError(patchErr) { @@ -151,7 +151,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { } if docErr == nil && doc.obj == nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } if patchErr == nil && patch.obj == nil { @@ -175,7 +175,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { if json.Valid(patchData) { return patchData, nil } - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } pruneAryNulls(patchAry, options) @@ -183,7 +183,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { out, patchErr := json.Marshal(patchAry.nodes) if patchErr != nil { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } return out, nil @@ -256,12 +256,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := unmarshal(originalJSON, &originalDoc) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } err = unmarshal(modifiedJSON, &modifiedDoc) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } dest, err := getDiff(originalDoc, modifiedDoc) @@ -286,17 +286,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := unmarshal(originalJSON, &originalDocs) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } err = unmarshal(modifiedJSON, &modifiedDocs) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } total := len(originalDocs) if len(modifiedDocs) != total { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } result := []json.RawMessage{} diff --git a/tools/vendor/github.com/evanphx/json-patch/v5/patch.go b/tools/vendor/github.com/evanphx/json-patch/v5/patch.go index 7a7f71c8b..83102e557 100644 --- a/tools/vendor/github.com/evanphx/json-patch/v5/patch.go +++ b/tools/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -2,13 +2,13 @@ package jsonpatch import ( "bytes" + "errors" "fmt" "strconv" "strings" "unicode" "github.com/evanphx/json-patch/v5/internal/json" - "github.com/pkg/errors" ) const ( @@ -461,7 +461,7 @@ func (o Operation) Path() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) } // From reads the "from" field of the Operation. @@ -478,7 +478,7 @@ func (o Operation) From() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) } func (o Operation) value() *lazyNode { @@ -511,7 +511,7 @@ func (o Operation) ValueInterface() (interface{}, error) { return v, nil } - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) } func isArray(buf []byte) bool { @@ -610,7 +610,7 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { v, ok := d.obj[key] if !ok { - return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) + return v, fmt.Errorf("unable to get nonexistent key: %s: %w", key, ErrMissing) } return v, nil } @@ -625,7 +625,7 @@ func (d *partialDoc) remove(key string, options *ApplyOptions) error { if options.AllowMissingPathOnRemove { return nil } - return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key) + return fmt.Errorf("unable to remove nonexistent key: %s: %w", key, ErrMissing) } idx := -1 for i, k := range d.keys { @@ -649,10 +649,10 @@ func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) err if idx < 0 { if !options.SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(d.nodes) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(d.nodes) } @@ -669,7 +669,7 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err idx, err := strconv.Atoi(key) if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) } sz := len(d.nodes) + 1 @@ -679,15 +679,15 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err cur := d if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !options.SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(ary) } @@ -713,16 +713,16 @@ func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) if idx < 0 { if !options.SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(d.nodes) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(d.nodes) } if idx >= len(d.nodes) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } return d.nodes[idx], nil @@ -740,18 +740,18 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error { if options.AllowMissingPathOnRemove { return nil } - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !options.SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(cur.nodes) { if options.AllowMissingPathOnRemove { return nil } - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(cur.nodes) } @@ -768,7 +768,7 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error { func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) } // special case, adding to empty means replacing the container with the value given @@ -809,12 +809,12 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { con, key := findObject(doc, path, options) if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.add(key, op.value(), options) if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) + return fmt.Errorf("error in add for path: '%s': %w", path, err) } return nil @@ -867,11 +867,11 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { if arrIndex < 0 { if !options.SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex) + return fmt.Errorf("Unable to ensure path for invalid index: %d: %w", arrIndex, ErrInvalidIndex) } if arrIndex < -1 { - return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex) + return fmt.Errorf("Unable to ensure path for negative index other than -1: %d: %w", arrIndex, ErrInvalidIndex) } arrIndex = 0 @@ -918,11 +918,11 @@ func validateOperation(op Operation) error { switch op.Kind() { case "add", "replace": if _, err := op.ValueInterface(); err != nil { - return errors.Wrapf(err, "failed to decode 'value'") + return fmt.Errorf("failed to decode 'value': %w", err) } case "move", "copy": if _, err := op.From(); err != nil { - return errors.Wrapf(err, "failed to decode 'from'") + return fmt.Errorf("failed to decode 'from': %w", err) } case "remove", "test": default: @@ -930,7 +930,7 @@ func validateOperation(op Operation) error { } if _, err := op.Path(); err != nil { - return errors.Wrapf(err, "failed to decode 'path'") + return fmt.Errorf("failed to decode 'path': %w", err) } return nil @@ -941,10 +941,10 @@ func validatePatch(p Patch) error { if err := validateOperation(op); err != nil { opData, infoErr := json.Marshal(op) if infoErr != nil { - return errors.Wrapf(err, "invalid operation") + return fmt.Errorf("invalid operation: %w", err) } - return errors.Wrapf(err, "invalid operation %s", opData) + return fmt.Errorf("invalid operation %s: %w", opData, err) } } @@ -954,7 +954,7 @@ func validatePatch(p Patch) error { func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path, options) @@ -963,12 +963,12 @@ func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error if options.AllowMissingPathOnRemove { return nil } - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.remove(key, options) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -977,7 +977,7 @@ func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") + return fmt.Errorf("replace operation failed to decode path: %w", err) } if path == "" { @@ -986,7 +986,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro if val.which == eRaw { if !val.tryDoc() { if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") + return fmt.Errorf("replace operation value must be object or array: %w", err) } } else { val.doc.opts = options @@ -999,7 +999,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro case eDoc: *doc = val.doc case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") + return fmt.Errorf("replace operation hit impossible case: %w", err) } return nil @@ -1008,17 +1008,17 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro con, key := findObject(doc, path, options) if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) } _, ok := con.get(key, options) if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) } err = con.set(key, op.value(), options) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -1027,43 +1027,43 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") + return fmt.Errorf("move operation failed to decode from: %w", err) } if from == "" { - return errors.Wrapf(ErrInvalid, "unable to move entire document to another path") + return fmt.Errorf("unable to move entire document to another path: %w", ErrInvalid) } con, key := findObject(doc, from, options) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key, options) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } err = con.remove(key, options) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") + return fmt.Errorf("move operation failed to decode path: %w", err) } con, key = findObject(doc, path, options) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } err = con.add(key, val, options) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) + return fmt.Errorf("error in move for path: '%s': %w", path, err) } return nil @@ -1072,7 +1072,7 @@ func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error { func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") + return fmt.Errorf("test operation failed to decode path: %w", err) } if path == "" { @@ -1091,18 +1091,18 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } con, key := findObject(doc, path, options) if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) } val, err := con.get(key, options) - if err != nil && errors.Cause(err) != ErrMissing { - return errors.Wrapf(err, "error in test for path: '%s'", path) + if err != nil && errors.Unwrap(err) != ErrMissing { + return fmt.Errorf("error in test for path: '%s': %w", path, err) } ov := op.value() @@ -1111,49 +1111,49 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { if ov.isNull() { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } else if ov.isNull() { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } if val.equal(op.value()) { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") + return fmt.Errorf("copy operation failed to decode from: %w", err) } con, key := findObject(doc, from, options) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: \"%s\"", from) + return fmt.Errorf("copy operation does not apply: doc is missing from path: \"%s\": %w", from, ErrMissing) } val, err := con.get(key, options) if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) + return fmt.Errorf("error in copy for from: '%s': %w", from, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) } con, key = findObject(doc, path, options) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } valCopy, sz, err := deepCopy(val, options) if err != nil { - return errors.Wrapf(err, "error while performing deep copy") + return fmt.Errorf("error while performing deep copy: %w", err) } (*accumulatedCopySize) += int64(sz) @@ -1163,7 +1163,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op err = con.add(key, valCopy, options) if err != nil { - return errors.Wrapf(err, "error while adding value during copy") + return fmt.Errorf("error while adding value during copy: %w", err) } return nil diff --git a/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index f4e7dbf37..7f257e99a 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-14-1 + image_family: freebsd-14-2 install_script: - pkg update -f - pkg install -y go diff --git a/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index fa854785d..6468d2cf4 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,39 @@ # Changelog -1.8.0 2023-10-31 +1.9.0 2024-04-04 +---------------- + +### Changes and fixes + +- all: make BufferedWatcher buffered again ([#657]) + +- inotify: fix race when adding/removing watches while a watched path is being + deleted ([#678], [#686]) + +- inotify: don't send empty event if a watched path is unmounted ([#655]) + +- inotify: don't register duplicate watches when watching both a symlink and its + target; previously that would get "half-added" and removing the second would + panic ([#679]) + +- kqueue: fix watching relative symlinks ([#681]) + +- kqueue: correctly mark pre-existing entries when watching a link to a dir on + kqueue ([#682]) + +- illumos: don't send error if changed file is deleted while processing the + event ([#678]) + + +[#657]: https://github.com/fsnotify/fsnotify/pull/657 +[#678]: https://github.com/fsnotify/fsnotify/pull/678 +[#686]: https://github.com/fsnotify/fsnotify/pull/686 +[#655]: https://github.com/fsnotify/fsnotify/pull/655 +[#681]: https://github.com/fsnotify/fsnotify/pull/681 +[#679]: https://github.com/fsnotify/fsnotify/pull/679 +[#682]: https://github.com/fsnotify/fsnotify/pull/682 + +1.8.0 2024-10-31 ---------------- ### Additions diff --git a/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index e4ac2a2ff..4cc40fa59 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported. debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in parallel by default, so -parallel=1 is probably a good idea). + print [any strings] # Print text to stdout; for debugging. touch path mkdir [-p] dir diff --git a/tools/vendor/github.com/fsnotify/fsnotify/README.md b/tools/vendor/github.com/fsnotify/fsnotify/README.md index e480733d1..1f4eb583d 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/README.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/README.md @@ -15,7 +15,6 @@ Platform support: | ReadDirectoryChangesW | Windows | Supported | | FEN | illumos | Supported | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | | USN Journals | Windows | [Needs support in x/sys/windows][usn] | | Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | @@ -25,7 +24,6 @@ untested. [fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 [usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go index c349c326c..57fc69284 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -9,6 +9,7 @@ package fsnotify import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "sync" @@ -19,27 +20,25 @@ import ( ) type fen struct { + *shared Events chan Event Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine dirs map[string]Op // Explicitly watched directories watches map[string]Op // Explicitly watched non-directories } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { w := &fen{ + shared: newShared(ev, errs), Events: ev, Errors: errs, dirs: make(map[string]Op), watches: make(map[string]Op), - done: make(chan struct{}), } var err error @@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendEvent(name string, op Op) (sent bool) { - select { - case <-w.done: - return false - case w.Events <- Event{Name: name, Op: op}: - return true - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendError(err error) (sent bool) { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *fen) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *fen) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { + if w.shared.close() { return nil } - close(w.done) return w.port.Close() } @@ -209,7 +169,7 @@ func (w *fen) readEvents() { return } // There was an error not caused by calling w.Close() - if !w.sendError(err) { + if !w.sendError(fmt.Errorf("port.Get: %w", err)) { return } } @@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { + if !w.sendEvent(Event{Name: path, Op: Rename}) { return nil } // Don't keep watching the new file name @@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // inotify reports a Remove event in this case, so we simulate this // here. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't keep watching the file that was removed @@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Suppress extra write events on removed directories; they are not @@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't return the error @@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { return err } } else { - if !w.sendEvent(path, Write) { + if !w.sendEvent(Event{Name: path, Op: Write}) { return nil } } @@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { + if !w.sendEvent(Event{Name: path, Op: Chmod}) { return nil } } @@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory - return w.associateFile(path, stat, isWatched) + err := w.associateFile(path, stat, isWatched) + if errors.Is(err, fs.ErrNotExist) { + // Path may have been removed since the stat. + err = nil + } + return err } return nil } +// The directory was modified, so we must find unwatched entities and watch +// them. If something was removed from the directory, nothing will happen, as +// everything else should still be watched. func (w *fen) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. files, err := os.ReadDir(path) if err != nil { + // Directory no longer exists: probably just deleted since we got the + // event. + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } @@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) + if errors.Is(err, fs.ErrNotExist) { + // File may have disappeared between getting the dir listing and + // adding the port: that's okay to ignore. + continue + } if !w.sendError(err) { return nil } - if !w.sendEvent(path, Create) { + if !w.sendEvent(Event{Name: path, Op: Create}) { return nil } } @@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) if err != nil && !errors.Is(err, unix.ENOENT) { - return err + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) } } @@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if true { events |= unix.FILE_ATTRIB } - return w.port.AssociatePath(path, stat, events, stat.Mode()) + err := w.port.AssociatePath(path, stat, events, stat.Mode()) + if err != nil { + return fmt.Errorf("port.AssociatePath(%q): %w", path, err) + } + return nil } func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } - return w.port.DissociatePath(path) + err := w.port.DissociatePath(path) + if err != nil { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) + } + return nil } func (w *fen) WatchList() []string { diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 36c311694..a36cb89d7 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -19,6 +19,7 @@ import ( ) type inotify struct { + *shared Events chan Event Errors chan error @@ -27,8 +28,6 @@ type inotify struct { fd int inotifyFile *os.File watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close // Store rename cookies in an array, with the index wrapping to 0. Almost @@ -52,7 +51,6 @@ type inotify struct { type ( watches struct { - mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } @@ -75,34 +73,13 @@ func newWatches() *watches { } } -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - watch := w.wd[wd] // Could have had Remove() called. See #616. - if watch == nil { - return - } - delete(w.path, watch.path) - delete(w.wd, wd) -} +func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] } +func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] } +func (w *watches) len() int { return len(w.wd) } +func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd } +func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) } func (w *watches) removePath(path string) ([]uint32, error) { - w.mu.Lock() - defer w.mu.Unlock() - path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { @@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { wds := make([]uint32, 0, 8) wds = append(wds, wd) for p, rwd := range w.path { - if filepath.HasPrefix(p, path) { + if strings.HasPrefix(p, path) { delete(w.path, p) delete(w.wd, rwd) wds = append(wds, rwd) @@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { return wds, nil } -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - var existing *watch wd, ok := w.path[path] if ok { @@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error } w := &inotify{ + shared: newShared(ev, errs), Events: ev, Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *inotify) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *inotify) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *inotify) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *inotify) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -244,9 +168,7 @@ func (w *inotify) Close() error { return err } - // Wait for goroutine to close - <-w.doneResp - + <-w.doneResp // Wait for readEvents() to finish. return nil } @@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } + add := func(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) + } + + w.mu.Lock() + defer w.mu.Unlock() path, recurse := recursivePath(path) if recurse { return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { @@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { w.sendEvent(Event{Name: root, Op: Create}) } - return w.add(root, with, true) + return add(root, with, true) }) } - return w.add(path, with, false) -} - -func (w *inotify) add(path string, with withOpts, recurse bool) error { - var flags uint32 - if with.noFollow { - flags |= unix.IN_DONT_FOLLOW - } - if with.op.Has(Create) { - flags |= unix.IN_CREATE - } - if with.op.Has(Write) { - flags |= unix.IN_MODIFY - } - if with.op.Has(Remove) { - flags |= unix.IN_DELETE | unix.IN_DELETE_SELF - } - if with.op.Has(Rename) { - flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF - } - if with.op.Has(Chmod) { - flags |= unix.IN_ATTRIB - } - if with.op.Has(xUnportableOpen) { - flags |= unix.IN_OPEN - } - if with.op.Has(xUnportableRead) { - flags |= unix.IN_ACCESS - } - if with.op.Has(xUnportableCloseWrite) { - flags |= unix.IN_CLOSE_WRITE - } - if with.op.Has(xUnportableCloseRead) { - flags |= unix.IN_CLOSE_NOWRITE - } - return w.register(path, flags, recurse) + return add(path, with, false) } func (w *inotify) register(path string, flags uint32, recurse bool) error { @@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error { return nil, err } + if e, ok := w.watches.wd[uint32(wd)]; ok { + return e, nil + } + if existing == nil { return &watch{ wd: uint32(wd), @@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error { fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", time.Now().Format("15:04:05.000000000"), name) } + + w.mu.Lock() + defer w.mu.Unlock() return w.remove(filepath.Clean(name)) } @@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string { return nil } + w.mu.Lock() + defer w.mu.Unlock() entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } - w.watches.mu.RUnlock() - return entries } @@ -418,21 +348,17 @@ func (w *inotify) readEvents() { close(w.Events) }() - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) + var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events for { - // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: + if err != nil { + if errors.Is(err, os.ErrClosed) { + return + } if !w.sendError(err) { return } @@ -440,13 +366,9 @@ func (w *inotify) readEvents() { } if n < unix.SizeofInotifyEvent { - var err error + err := errors.New("notify: short read in readEvents()") // Read was too short. if n == 0 { err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -454,132 +376,135 @@ func (w *inotify) readEvents() { continue } - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... + // We don't know how many events we just read into the buffer While the + // offset points to at least one whole event. var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - // Move to the next event in the buffer - next = func() { offset += unix.SizeofInotifyEvent + nameLen } - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { + // Point to the event in the buffer. + inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } - /// If the event happened to the watched directory or the watched - /// file, the kernel doesn't append the filename to the event, but - /// we would like to always fill the the "Name" field with a valid - /// filename. We retrieve the path of the watch from the "paths" - /// map. - watch := w.watches.byWd(uint32(raw.Wd)) - /// Can be nil if Remove() was called in another goroutine for this - /// path inbetween reading the events from the kernel and reading - /// the internal state. Not much we can do about it, so just skip. - /// See #616. - if watch == nil { - next() - continue + ev, ok := w.handleEvent(inEvent, &buf, offset) + if !ok { + return } - - name := watch.path - if nameLen > 0 { - /// Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - /// The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + if !w.sendEvent(ev) { + return } - if debug { - internal.Debug(name, raw.Mask, raw.Cookie) - } + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + inEvent.Len + } + } +} - if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 - next() - continue - } +func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) { + w.mu.Lock() + defer w.mu.Unlock() - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } + /// If the event happened to the watched directory or the watched file, the + /// kernel doesn't append the filename to the event, but we would like to + /// always fill the the "Name" field with a valid filename. We retrieve the + /// path of the watch from the "paths" map. + /// + /// Can be nil if Remove() was called in another goroutine for this path + /// inbetween reading the events from the kernel and reading the internal + /// state. Not much we can do about it, so just skip. See #616. + watch := w.watches.byWd(uint32(inEvent.Wd)) + if watch == nil { + return Event{}, true + } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - if watch.recurse { - next() // Do nothing - continue - } + var ( + name = watch.path + nameLen = uint32(inEvent.Len) + ) + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bb := *buf + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00") + } - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } + if debug { + internal.Debug(name, inEvent.Mask, inEvent.Cookie) + } + + if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 { + w.watches.remove(watch) + return Event{}, true + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch) + } + + // We can't really update the state when a watched path is moved; only + // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch. + if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { // Do nothing + return Event{}, true + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return Event{}, false } + } + } - /// Skip if we're watching both this path and the parent; the parent - /// will already send a delete so no need to do it twice. - if mask&unix.IN_DELETE_SELF != 0 { - if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { - next() - continue - } + /// Skip if we're watching both this path and the parent; the parent will + /// already send a delete so no need to do it twice. + if inEvent.Mask&unix.IN_DELETE_SELF != 0 { + _, ok := w.watches.path[filepath.Dir(watch.path)] + if ok { + return Event{}, true + } + } + + ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return Event{}, false } - ev := w.newEvent(name, mask, raw.Cookie) - // Need to update watch path for recurse. - if watch.recurse { - isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR - /// New directory created: set up watch on it. - if isDir && ev.Has(Create) { - err := w.register(ev.Name, watch.flags, true) - if !w.sendError(err) { - return + // This was a directory rename, so we need to update all the + // children. + // + // TODO: this is of course pretty slow; we should use a better data + // structure for storing all of this, e.g. store children in the + // watch. I have some code for this in my kqueue refactor we can use + // in the future. For now I'm okay with this as it's not publicly + // available. Correctness first, performance second. + if ev.renamedFrom != "" { + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue } - - // This was a directory rename, so we need to update all - // the children. - // - // TODO: this is of course pretty slow; we should use a - // better data structure for storing all of this, e.g. store - // children in the watch. I have some code for this in my - // kqueue refactor we can use in the future. For now I'm - // okay with this as it's not publicly available. - // Correctness first, performance second. - if ev.renamedFrom != "" { - w.watches.mu.Lock() - for k, ww := range w.watches.wd { - if k == watch.wd || ww.path == ev.Name { - continue - } - if strings.HasPrefix(ww.path, ev.renamedFrom) { - ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) - w.watches.wd[k] = ww - } - } - w.watches.mu.Unlock() + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww } } } - - /// Send the events that are not ignored on the events channel - if !w.sendEvent(ev) { - return - } - next() } } + + return ev, true } func (w *inotify) isRecursive(path string) bool { @@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool { } func (w *inotify) state() { - w.watches.mu.Lock() - defer w.watches.mu.Unlock() + w.mu.Lock() + defer w.mu.Unlock() for wd, ww := range w.watches.wd { fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index d8de5ab76..340aeec06 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -16,14 +16,13 @@ import ( ) type kqueue struct { + *shared Events chan Event Errors chan error kq int // File descriptor (as returned by the kqueue() syscall). closepipe [2]int // Pipe used for closing kq. watches *watches - done chan struct{} - doneMu sync.Mutex } type ( @@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) { return info, ok } -func (w *watches) updateDirFlags(path string, flags uint32) { +func (w *watches) updateDirFlags(path string, flags uint32) bool { w.mu.Lock() defer w.mu.Unlock() - fd := w.path[path] + fd, ok := w.path[path] + if !ok { // Already deleted: don't re-set it here. + return false + } info := w.wd[fd] info.dirFlags = flags w.wd[fd] = info + return true } func (w *watches) remove(fd int, path string) bool { @@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool { return ok } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } w := &kqueue{ + shared: newShared(ev, errs), Events: ev, Errors: errs, kq: kq, closepipe: closepipe, - done: make(chan struct{}), watches: newWatches(), } @@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() - if kq == -1 { + if err != nil { return kq, closepipe, err } @@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) { return kq, closepipe, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *kqueue) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *kqueue) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *kqueue) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *kqueue) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) + unix.Close(w.closepipe[1]) // Send "quit" message to readEvents return nil } @@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } - _, err := w.addWatch(name, noteAllEvents) + _, err := w.addWatch(name, noteAllEvents, false) if err != nil { return err } @@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *kqueue) addWatch(name string, flags uint32) (string, error) { +func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) { if w.isClosed() { return "", ErrClosed } @@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // Follow symlinks, but only for paths added with Add(), and not paths + // we're adding from internalWatch from a listdir. + if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil + return "", err + } + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) } _, alreadyWatching = w.watches.byPath(link) @@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { name = link fi, err = os.Lstat(name) if err != nil { - return "", nil + return "", err } } @@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if errors.Is(err, unix.EINTR) { continue } - return "", err } @@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) - w.watches.updateDirFlags(name, flags) + if !w.watches.updateDirFlags(name, flags) { + return "", nil + } if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { + d := name + if info.linkName != "" { + d = info.linkName + } + if err := w.watchDirectoryFiles(d); err != nil { return "", err } } @@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.dirChange: %w", err) + return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err) } for _, f := range files { fi, err := f.Info() if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } return fmt.Errorf("fsnotify.dirChange: %w", err) } err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("fsnotify.dirChange: %w", err) @@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory info, _ := w.watches.byPath(name) - return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true) } - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) + // Watch file to mimic Linux inotify. + return w.addWatch(name, noteAllEvents, true) } // Register events with the queue. @@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { } func (w *kqueue) xSupports(op Op) bool { - if runtime.GOOS == "freebsd" { - //return true // Supports everything. - } + //if runtime.GOOS == "freebsd" { + // return true // Supports everything. + //} if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { return false diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go index 5eb5dbc66..b8c0ad722 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -9,12 +9,11 @@ type other struct { Errors chan error } +var defaultBufferSize = 0 + func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - return newBackend(ev, errs) -} func (w *other) Close() error { return nil } func (w *other) WatchList() []string { return nil } func (w *other) Add(name string) error { return nil } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go index c54a63083..3433642d6 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -28,18 +28,16 @@ type readDirChangesW struct { port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + done chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(50, ev, errs) -} +var defaultBufferSize = 50 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error port: port, watches: make(watchMap), input: make(chan *input, 1), - quit: make(chan chan<- error, 1), + done: make(chan chan<- error, 1), } go w.readEvents() return w, nil @@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool event := w.newEvent(name, uint32(mask)) event.renamedFrom = renamedFrom select { - case ch := <-w.quit: - w.quit <- ch + case ch := <-w.done: + w.done <- ch case w.Events <- event: } return true @@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool { return true } select { + case <-w.done: + return false case w.Errors <- err: return true - case <-w.quit: - return false } } @@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error { w.closed = true w.mu.Unlock() - // Send "quit" message to the reader goroutine + // Send "done" message to the reader goroutine ch := make(chan error) - w.quit <- ch + w.done <- ch if err := w.wakeupReader(); err != nil { return err } @@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() { watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { - case ch := <-w.quit: + case ch := <-w.done: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { diff --git a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0760efe91..f64be4bf9 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -244,12 +244,13 @@ var ( // ErrUnsupported is returned by AddWith() when WithOps() specified an // Unportable event that's not supported on this platform. + //lint:ignore ST1012 not relevant xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) + ev, errs := make(chan Event, defaultBufferSize), make(chan error) b, err := newBackend(ev, errs) if err != nil { return nil, err @@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) { // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBufferedBackend(sz, ev, errs) + ev, errs := make(chan Event, sz), make(chan error) + b, err := newBackend(ev, errs) if err != nil { return nil, err } @@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // -// Returns nil if [Watcher.Close] was called. +// The order is undefined, and may differ per call. Returns nil if +// [Watcher.Close] was called. func (w *Watcher) WatchList() []string { return w.b.WatchList() } // Supports reports if all the listed operations are supported by this platform. diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go index b0eab1009..0b01bc182 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -9,14 +9,14 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 -// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ var l syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) if err == nil && l.Cur != l.Max { diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go index 547df1df8..5ac8b5079 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go index 30976ce97..b251fb803 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -1,4 +1,4 @@ -//go:build !windows && !darwin && !freebsd +//go:build !windows && !darwin && !freebsd && !plan9 package internal @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go index a72c64954..896bc2e5a 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -10,8 +10,8 @@ import ( // Just a dummy. var ( - SyscallEACCES = errors.New("dummy") - UnixEACCES = errors.New("dummy") + ErrSyscallEACCES = errors.New("dummy") + ErrUnixEACCES = errors.New("dummy") ) func SetRlimit() {} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/shared.go b/tools/vendor/github.com/fsnotify/fsnotify/shared.go new file mode 100644 index 000000000..3ee9b58f1 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/shared.go @@ -0,0 +1,64 @@ +package fsnotify + +import "sync" + +type shared struct { + Events chan Event + Errors chan error + done chan struct{} + mu sync.Mutex +} + +func newShared(ev chan Event, errs chan error) *shared { + return &shared{ + Events: ev, + Errors: errs, + done: make(chan struct{}), + } +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *shared) sendEvent(e Event) bool { + if e.Op == 0 { + return true + } + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *shared) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *shared) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Mark as closed; returns true if it was already closed. +func (w *shared) close() bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return true + } + close(w.done) + return false +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/tools/vendor/github.com/fsnotify/fsnotify/staticcheck.conf new file mode 100644 index 000000000..8fa7351f0 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/staticcheck.conf @@ -0,0 +1,3 @@ +checks = ['all', + '-U1000', # Don't complain about unused functions. +] diff --git a/tools/vendor/github.com/fxamacker/cbor/v2/README.md b/tools/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507..da9f9e6f0 100644 --- a/tools/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/tools/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,6 +1,4 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). @@ -8,23 +6,26 @@ CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name `fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,201 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty fields when encoding +- `omitzero`: omit zero-value fields when encoding ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +290,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +351,9 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. -
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +421,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +443,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,7 +469,7 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ``` @@ -439,7 +478,7 @@ if data, err := cbor.Marshal(v); err != nil { ### Functions and Interfaces -

Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -472,11 +511,24 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +v2.8.0 (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +v2.8.0 and v2.7.1 fixes these 3 functions (when called directly by user apps) to use same error handling on bad inputs as `cbor.Unmarshal()`: +- `ByteString.UnmarshalCBOR()` +- `RawTag.UnmarshalCBOR()` +- `SimpleValue.UnmarshalCBOR()` + +The above 3 `UnmarshalCBOR()` functions were initially created for internal use and are deprecated now, so please use `Unmarshal()` or `UnmarshalFirst()` instead. To preserve backward compatibility, these deprecated functions were added to fuzz tests and will not be removed in v2. + +The minimum version of Go required to build: +- v2.8.0 requires go 1.20. +- v2.7.1 and older releases require go 1.17. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. @@ -489,7 +541,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. 0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, - PENDING_BYTE, 0, 1, 0))==0 ){ + + /* Flags for the LockFileEx() call. This should be an exclusive lock if + ** this call is to obtain EXCLUSIVE, or a shared lock if this call is to + ** obtain SHARED. */ + int flags = LOCKFILE_FAIL_IMMEDIATELY; + if( locktype==EXCLUSIVE_LOCK ){ + flags |= LOCKFILE_EXCLUSIVE_LOCK; + } + while( cnt>0 ){ /* Try 3 times to get the pending lock. This is needed to work ** around problems caused by indexing and/or anti-virus software on ** Windows systems. + ** ** If you are using this code as a model for alternative VFSes, do not - ** copy this retry logic. It is a hack intended for Windows only. - */ + ** copy this retry logic. It is a hack intended for Windows only. */ + res = winLockFile(&pFile->h, flags, PENDING_BYTE, 0, 1, 0); + if( res ) break; + lastErrno = osGetLastError(); OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", - pFile->h, cnt, res)); + pFile->h, cnt, res + )); + if( lastErrno==ERROR_INVALID_HANDLE ){ pFile->lastErrno = lastErrno; rc = SQLITE_IOERR_LOCK; OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", - pFile->h, cnt, sqlite3ErrName(rc))); + pFile->h, cnt, sqlite3ErrName(rc) + )); return rc; } - if( cnt ) sqlite3_win32_sleep(1); + + cnt--; + if( cnt>0 ) sqlite3_win32_sleep(1); } gotPendingLock = res; - if( !res ){ - lastErrno = osGetLastError(); - } } /* Acquire a shared lock */ if( locktype==SHARED_LOCK && res ){ assert( pFile->locktype==NO_LOCK ); - res = winGetReadLock(pFile); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + res = winGetReadLock(pFile, pFile->bBlockOnConnect); +#else + res = winGetReadLock(pFile, 0); +#endif if( res ){ newLocktype = SHARED_LOCK; }else{ @@ -50002,7 +50772,7 @@ static int winLock(sqlite3_file *id, int locktype){ newLocktype = EXCLUSIVE_LOCK; }else{ lastErrno = osGetLastError(); - winGetReadLock(pFile); + winGetReadLock(pFile, 0); } } @@ -50082,7 +50852,7 @@ static int winUnlock(sqlite3_file *id, int locktype){ type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ + if( locktype==SHARED_LOCK && !winGetReadLock(pFile, 0) ){ /* This should never happen. We should always be able to ** reacquire the read lock */ rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), @@ -50251,6 +51021,11 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ return SQLITE_OK; } #endif + case SQLITE_FCNTL_NULL_IO: { + (void)osCloseHandle(pFile->h); + pFile->h = NULL; + return SQLITE_OK; + } case SQLITE_FCNTL_TEMPFILENAME: { char *zTFile = 0; int rc = winGetTempname(pFile->pVfs, &zTFile); @@ -50287,6 +51062,28 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ return rc; } #endif + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + case SQLITE_FCNTL_LOCK_TIMEOUT: { + int iOld = pFile->iBusyTimeout; + int iNew = *(int*)pArg; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 + pFile->iBusyTimeout = (iNew < 0) ? INFINITE : (DWORD)iNew; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = (DWORD)(!!iNew); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif + *(int*)pArg = iOld; + return SQLITE_OK; + } + case SQLITE_FCNTL_BLOCK_ON_CONNECT: { + int iNew = *(int*)pArg; + pFile->bBlockOnConnect = iNew; + return SQLITE_OK; + } +#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ + } OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); return SQLITE_NOTFOUND; @@ -50312,7 +51109,7 @@ static int winSectorSize(sqlite3_file *id){ */ static int winDeviceCharacteristics(sqlite3_file *id){ winFile *p = (winFile*)id; - return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | + return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | SQLITE_IOCAP_SUBPAGE_READ | ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); } @@ -50367,23 +51164,27 @@ static int winShmMutexHeld(void) { ** ** The following fields are read-only after the object is created: ** -** fid ** zFilename ** ** Either winShmNode.mutex must be held or winShmNode.nRef==0 and ** winShmMutexHeld() is true when reading or writing any other field ** in this structure. ** +** File-handle hSharedShm is used to (a) take the DMS lock, (b) truncate +** the *-shm file if the DMS-locking protocol demands it, and (c) map +** regions of the *-shm file into memory using MapViewOfFile() or +** similar. Other locks are taken by individual clients using the +** winShm.hShm handles. */ struct winShmNode { sqlite3_mutex *mutex; /* Mutex to access this object */ char *zFilename; /* Name of the file */ - winFile hFile; /* File handle from winOpen */ + HANDLE hSharedShm; /* File handle open on zFilename */ + int isUnlocked; /* DMS lock has not yet been obtained */ + int isReadonly; /* True if read-only */ int szRegion; /* Size of shared-memory regions */ int nRegion; /* Size of array apRegion */ - u8 isReadonly; /* True if read-only */ - u8 isUnlocked; /* True if no DMS lock held */ struct ShmRegion { HANDLE hMap; /* File handle from CreateFileMapping */ @@ -50392,7 +51193,6 @@ struct winShmNode { DWORD lastErrno; /* The Windows errno from the last I/O error */ int nRef; /* Number of winShm objects pointing to this */ - winShm *pFirst; /* All winShm objects pointing to this */ winShmNode *pNext; /* Next in list of all winShmNode objects */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 nextShmId; /* Next available winShm.id value */ @@ -50408,23 +51208,15 @@ static winShmNode *winShmNodeList = 0; /* ** Structure used internally by this VFS to record the state of an -** open shared memory connection. -** -** The following fields are initialized when this object is created and -** are read-only thereafter: -** -** winShm.pShmNode -** winShm.id -** -** All other fields are read/write. The winShm.pShmNode->mutex must be held -** while accessing any read/write fields. +** open shared memory connection. There is one such structure for each +** winFile open on a wal mode database. */ struct winShm { winShmNode *pShmNode; /* The underlying winShmNode object */ - winShm *pNext; /* Next winShm with the same winShmNode */ - u8 hasMutex; /* True if holding the winShmNode mutex */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ + HANDLE hShm; /* File-handle on *-shm file. For locking. */ + int bReadonly; /* True if hShm is opened read-only */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 id; /* Id of this connection with its winShmNode */ #endif @@ -50436,50 +51228,6 @@ struct winShm { #define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ -/* -** Apply advisory locks for all n bytes beginning at ofst. -*/ -#define WINSHM_UNLCK 1 -#define WINSHM_RDLCK 2 -#define WINSHM_WRLCK 3 -static int winShmSystemLock( - winShmNode *pFile, /* Apply locks to this open shared-memory segment */ - int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */ - int ofst, /* Offset to first byte to be locked/unlocked */ - int nByte /* Number of bytes to lock or unlock */ -){ - int rc = 0; /* Result code form Lock/UnlockFileEx() */ - - /* Access to the winShmNode object is serialized by the caller */ - assert( pFile->nRef==0 || sqlite3_mutex_held(pFile->mutex) ); - - OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", - pFile->hFile.h, lockType, ofst, nByte)); - - /* Release/Acquire the system-level lock */ - if( lockType==WINSHM_UNLCK ){ - rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); - }else{ - /* Initialize the locking parameters */ - DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; - if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; - rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); - } - - if( rc!= 0 ){ - rc = SQLITE_OK; - }else{ - pFile->lastErrno = osGetLastError(); - rc = SQLITE_BUSY; - } - - OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", - pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" : - "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); - - return rc; -} - /* Forward references to VFS methods */ static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); static int winDelete(sqlite3_vfs *,const char*,int); @@ -50511,11 +51259,7 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); UNUSED_VARIABLE_VALUE(bRc); } - if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ - SimulateIOErrorBenign(1); - winClose((sqlite3_file *)&p->hFile); - SimulateIOErrorBenign(0); - } + winHandleClose(p->hSharedShm); if( deleteFlag ){ SimulateIOErrorBenign(1); sqlite3BeginBenignMalloc(); @@ -50533,42 +51277,239 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ } /* -** The DMS lock has not yet been taken on shm file pShmNode. Attempt to -** take it now. Return SQLITE_OK if successful, or an SQLite error -** code otherwise. -** -** If the DMS cannot be locked because this is a readonly_shm=1 -** connection and no other process already holds a lock, return -** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1. +** The DMS lock has not yet been taken on the shm file associated with +** pShmNode. Take the lock. Truncate the *-shm file if required. +** Return SQLITE_OK if successful, or an SQLite error code otherwise. */ -static int winLockSharedMemory(winShmNode *pShmNode){ - int rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1); +static int winLockSharedMemory(winShmNode *pShmNode, DWORD nMs){ + HANDLE h = pShmNode->hSharedShm; + int rc = SQLITE_OK; + assert( sqlite3_mutex_held(pShmNode->mutex) ); + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 1, 0); if( rc==SQLITE_OK ){ + /* We have an EXCLUSIVE lock on the DMS byte. This means that this + ** is the first process to open the file. Truncate it to zero bytes + ** in this case. */ if( pShmNode->isReadonly ){ - pShmNode->isUnlocked = 1; - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return SQLITE_READONLY_CANTINIT; - }else if( winTruncate((sqlite3_file*)&pShmNode->hFile, 0) ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), - "winLockSharedMemory", pShmNode->zFilename); + rc = SQLITE_READONLY_CANTINIT; + }else{ + rc = winHandleTruncate(h, 0); } + + /* Release the EXCLUSIVE lock acquired above. */ + winUnlockFile(&h, WIN_SHM_DMS, 0, 1, 0); + }else if( (rc & 0xFF)==SQLITE_BUSY ){ + rc = SQLITE_OK; } if( rc==SQLITE_OK ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + /* Take a SHARED lock on the DMS byte. */ + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 0, nMs); + if( rc==SQLITE_OK ){ + pShmNode->isUnlocked = 0; + } } - return winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1); + return rc; } + /* -** Open the shared-memory area associated with database file pDbFd. +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function +** +** On Cygwin, 3 possible input forms are accepted: +** - If the filename starts with ":/" or ":\", +** it is converted to UTF-16 as-is. +** - If the filename contains '/', it is assumed to be a +** Cygwin absolute path, it is converted to a win32 +** absolute path in UTF-16. +** - Otherwise it must be a filename only, the win32 filename +** is returned in UTF-16. +** Note: If the function cygwin_conv_path() fails, only +** UTF-8 -> UTF-16 conversion will be done. This can only +** happen when the file path >32k, in which case winUtf8ToUnicode() +** will fail too. +*/ +static void *winConvertFromUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( osIsNT() ){ +#ifdef __CYGWIN__ + int nChar; + LPWSTR zWideFilename; + + if( osCygwin_conv_path && !(winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2])) ){ + i64 nByte; + int convertflag = CCP_POSIX_TO_WIN_W; + if( !strchr(zFilename, '/') ) convertflag |= CCP_RELATIVE; + nByte = (i64)osCygwin_conv_path(convertflag, + zFilename, 0, 0); + if( nByte>0 ){ + zConverted = sqlite3MallocZero(12+(u64)nByte); + if ( zConverted==0 ){ + return zConverted; + } + zWideFilename = zConverted; + /* Filenames should be prefixed, except when converted + * full path already starts with "\\?\". */ + if( osCygwin_conv_path(convertflag, zFilename, + zWideFilename+4, nByte)==0 ){ + if( (convertflag&CCP_RELATIVE) ){ + memmove(zWideFilename, zWideFilename+4, nByte); + }else if( memcmp(zWideFilename+4, L"\\\\", 4) ){ + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( zWideFilename[6]!='?' ){ + memmove(zWideFilename+6, zWideFilename+4, nByte); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + }else{ + memmove(zWideFilename, zWideFilename+4, nByte); + } + return zConverted; + } + sqlite3_free(zConverted); + } + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); + if( nChar==0 ){ + return 0; + } + zWideFilename = sqlite3MallocZero( nChar*sizeof(WCHAR)+12 ); + if( zWideFilename==0 ){ + return 0; + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, + zWideFilename, nChar); + if( nChar==0 ){ + sqlite3_free(zWideFilename); + zWideFilename = 0; + }else if( nChar>MAX_PATH + && winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2]) ){ + memmove(zWideFilename+4, zWideFilename, nChar*sizeof(WCHAR)); + zWideFilename[2] = '\\'; + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( nChar>MAX_PATH + && winIsDirSep(zFilename[0]) && winIsDirSep(zFilename[1]) + && zFilename[2] != '?' ){ + memmove(zWideFilename+6, zWideFilename, nChar*sizeof(WCHAR)); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + } + zConverted = zWideFilename; +#else + zConverted = winUtf8ToUnicode(zFilename); +#endif /* __CYGWIN__ */ + } +#if defined(SQLITE_WIN32_HAS_ANSI) && defined(_WIN32) + else{ + zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); + } +#endif + /* caller will handle out of memory */ + return zConverted; +} + +/* +** This function is used to open a handle on a *-shm file. ** -** When opening a new shared-memory file, if no other instances of that -** file are currently open, in this process or in other processes, then -** the file must be truncated to zero length or have its header cleared. +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined at build time, then the file +** is opened with FILE_FLAG_OVERLAPPED specified. If not, it is not. +*/ +static int winHandleOpen( + const char *zUtf8, /* File to open */ + int *pbReadonly, /* IN/OUT: True for readonly handle */ + HANDLE *ph /* OUT: New HANDLE for file */ +){ + int rc = SQLITE_OK; + void *zConverted = 0; + int bReadonly = *pbReadonly; + HANDLE h = INVALID_HANDLE_VALUE; + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + const DWORD flag_overlapped = FILE_FLAG_OVERLAPPED; +#else + const DWORD flag_overlapped = 0; +#endif + + /* Convert the filename to the system encoding. */ + zConverted = winConvertFromUtf8Filename(zUtf8); + if( zConverted==0 ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8)); + rc = SQLITE_IOERR_NOMEM_BKPT; + goto winopenfile_out; + } + + /* Ensure the file we are trying to open is not actually a directory. */ + if( winIsDir(zConverted) ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8)); + rc = SQLITE_CANTOPEN_ISDIR; + goto winopenfile_out; + } + + /* TODO: platforms. + ** TODO: retry-on-ioerr. + */ + if( osIsNT() ){ +#if SQLITE_OS_WINRT + CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; + memset(&extendedParameters, 0, sizeof(extendedParameters)); + extendedParameters.dwSize = sizeof(extendedParameters); + extendedParameters.dwFileAttributes = FILE_ATTRIBUTE_NORMAL; + extendedParameters.dwFileFlags = flag_overlapped; + extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; + h = osCreateFile2((LPCWSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)),/* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + OPEN_ALWAYS, /* dwCreationDisposition */ + &extendedParameters + ); +#else + h = osCreateFileW((LPCWSTR)zConverted, /* lpFileName */ + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + }else{ + /* Due to pre-processor directives earlier in this file, + ** SQLITE_WIN32_HAS_ANSI is always defined if osIsNT() is false. */ +#ifdef SQLITE_WIN32_HAS_ANSI + h = osCreateFileA((LPCSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + } + + if( h==INVALID_HANDLE_VALUE ){ + if( bReadonly==0 ){ + bReadonly = 1; + rc = winHandleOpen(zUtf8, &bReadonly, &h); + }else{ + rc = SQLITE_CANTOPEN_BKPT; + } + } + + winopenfile_out: + sqlite3_free(zConverted); + *pbReadonly = bReadonly; + *ph = h; + return rc; +} + + +/* +** Open the shared-memory area associated with database file pDbFd. */ static int winOpenSharedMemory(winFile *pDbFd){ struct winShm *p; /* The connection to be opened */ @@ -50580,98 +51521,83 @@ static int winOpenSharedMemory(winFile *pDbFd){ assert( pDbFd->pShm==0 ); /* Not previously opened */ /* Allocate space for the new sqlite3_shm object. Also speculatively - ** allocate space for a new winShmNode and filename. - */ + ** allocate space for a new winShmNode and filename. */ p = sqlite3MallocZero( sizeof(*p) ); if( p==0 ) return SQLITE_IOERR_NOMEM_BKPT; nName = sqlite3Strlen30(pDbFd->zPath); - pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); + pNew = sqlite3MallocZero( sizeof(*pShmNode) + (i64)nName + 17 ); if( pNew==0 ){ sqlite3_free(p); return SQLITE_IOERR_NOMEM_BKPT; } pNew->zFilename = (char*)&pNew[1]; + pNew->hSharedShm = INVALID_HANDLE_VALUE; + pNew->isUnlocked = 1; sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); + /* Open a file-handle on the *-shm file for this connection. This file-handle + ** is only used for locking. The mapping of the *-shm file is created using + ** the shared file handle in winShmNode.hSharedShm. */ + p->bReadonly = sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0); + rc = winHandleOpen(pNew->zFilename, &p->bReadonly, &p->hShm); + /* Look to see if there is an existing winShmNode that can be used. - ** If no matching winShmNode currently exists, create a new one. - */ + ** If no matching winShmNode currently exists, then create a new one. */ winShmEnterMutex(); for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ /* TBD need to come up with better match here. Perhaps - ** use FILE_ID_BOTH_DIR_INFO Structure. - */ + ** use FILE_ID_BOTH_DIR_INFO Structure. */ if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; } - if( pShmNode ){ - sqlite3_free(pNew); - }else{ - int inFlags = SQLITE_OPEN_WAL; - int outFlags = 0; - + if( pShmNode==0 ){ pShmNode = pNew; - pNew = 0; - ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; - pShmNode->pNext = winShmNodeList; - winShmNodeList = pShmNode; + /* Allocate a mutex for this winShmNode object, if one is required. */ if( sqlite3GlobalConfig.bCoreMutex ){ pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_IOERR_NOMEM_BKPT; - goto shm_open_err; - } + if( pShmNode->mutex==0 ) rc = SQLITE_IOERR_NOMEM_BKPT; } - if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ - inFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; - }else{ - inFlags |= SQLITE_OPEN_READONLY; - } - rc = winOpen(pDbFd->pVfs, pShmNode->zFilename, - (sqlite3_file*)&pShmNode->hFile, - inFlags, &outFlags); - if( rc!=SQLITE_OK ){ - rc = winLogError(rc, osGetLastError(), "winOpenShm", - pShmNode->zFilename); - goto shm_open_err; + /* Open a file-handle to use for mappings, and for the DMS lock. */ + if( rc==SQLITE_OK ){ + HANDLE h = INVALID_HANDLE_VALUE; + pShmNode->isReadonly = p->bReadonly; + rc = winHandleOpen(pNew->zFilename, &pShmNode->isReadonly, &h); + pShmNode->hSharedShm = h; } - if( outFlags==SQLITE_OPEN_READONLY ) pShmNode->isReadonly = 1; - rc = winLockSharedMemory(pShmNode); - if( rc!=SQLITE_OK && rc!=SQLITE_READONLY_CANTINIT ) goto shm_open_err; + /* If successful, link the new winShmNode into the global list. If an + ** error occurred, free the object. */ + if( rc==SQLITE_OK ){ + pShmNode->pNext = winShmNodeList; + winShmNodeList = pShmNode; + pNew = 0; + }else{ + sqlite3_mutex_free(pShmNode->mutex); + if( pShmNode->hSharedShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(pShmNode->hSharedShm); + } + } } - /* Make the new connection a child of the winShmNode */ - p->pShmNode = pShmNode; + /* If no error has occurred, link the winShm object to the winShmNode and + ** the winShm to pDbFd. */ + if( rc==SQLITE_OK ){ + p->pShmNode = pShmNode; + pShmNode->nRef++; #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) - p->id = pShmNode->nextShmId++; + p->id = pShmNode->nextShmId++; #endif - pShmNode->nRef++; - pDbFd->pShm = p; - winShmLeaveMutex(); - - /* The reference count on pShmNode has already been incremented under - ** the cover of the winShmEnterMutex() mutex and the pointer from the - ** new (struct winShm) object to the pShmNode has been set. All that is - ** left to do is to link the new object into the linked list starting - ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex - ** mutex. - */ - sqlite3_mutex_enter(pShmNode->mutex); - p->pNext = pShmNode->pFirst; - pShmNode->pFirst = p; - sqlite3_mutex_leave(pShmNode->mutex); - return rc; + pDbFd->pShm = p; + }else if( p ){ + winHandleClose(p->hShm); + sqlite3_free(p); + } - /* Jump here on any error */ -shm_open_err: - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ - sqlite3_free(p); - sqlite3_free(pNew); + assert( rc!=SQLITE_OK || pShmNode->isUnlocked==0 || pShmNode->nRegion==0 ); winShmLeaveMutex(); + sqlite3_free(pNew); return rc; } @@ -50686,27 +51612,19 @@ static int winShmUnmap( winFile *pDbFd; /* Database holding shared-memory */ winShm *p; /* The connection to be closed */ winShmNode *pShmNode; /* The underlying shared-memory file */ - winShm **pp; /* For looping over sibling connections */ pDbFd = (winFile*)fd; p = pDbFd->pShm; if( p==0 ) return SQLITE_OK; - pShmNode = p->pShmNode; - - /* Remove connection p from the set of connections associated - ** with pShmNode */ - sqlite3_mutex_enter(pShmNode->mutex); - for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} - *pp = p->pNext; + if( p->hShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(p->hShm); + } - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - sqlite3_mutex_leave(pShmNode->mutex); + pShmNode = p->pShmNode; + winShmEnterMutex(); /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too */ - winShmEnterMutex(); + ** shared-memory file, too. */ assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ @@ -50714,6 +51632,9 @@ static int winShmUnmap( } winShmLeaveMutex(); + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; return SQLITE_OK; } @@ -50728,10 +51649,9 @@ static int winShmLock( ){ winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ winShm *p = pDbFd->pShm; /* The shared memory being locked */ - winShm *pX; /* For looping over all siblings */ winShmNode *pShmNode; int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (u16)((1U<<(ofst+n)) - (1U<pShmNode; @@ -50745,85 +51665,81 @@ static int winShmLock( || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); - mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); - if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ - - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } + /* Check that, if this to be a blocking lock, no locks that occur later + ** in the following list than the lock being obtained are already held: + ** + ** 1. Recovery lock (ofst==2). + ** 2. Checkpointer lock (ofst==1). + ** 3. Write lock (ofst==0). + ** 4. Read locks (ofst>=3 && ofstexclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2 || lockMask==0) + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ - rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - } + rc = winHandleUnlock(p->hShm, ofst+WIN_SHM_BASE, n); - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; + /* If successful, also clear the bits in sharedMask/exclMask */ + if( rc==SQLITE_OK ){ + p->exclMask = (p->exclMask & ~mask); + p->sharedMask = (p->sharedMask & ~mask); } - } - - /* Get the exclusive locks at the system level. Then if successful - ** also mark the local connection as being locked. - */ - if( rc==SQLITE_OK ){ - rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n); + }else{ + int bExcl = ((flags & SQLITE_SHM_EXCLUSIVE) ? 1 : 0); + DWORD nMs = winFileBusyTimeout(pDbFd); + rc = winHandleLockTimeout(p->hShm, ofst+WIN_SHM_BASE, n, bExcl, nMs); if( rc==SQLITE_OK ){ - assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + if( bExcl ){ + p->exclMask = (p->exclMask | mask); + }else{ + p->sharedMask = (p->sharedMask | mask); + } } } } - sqlite3_mutex_leave(pShmNode->mutex); - OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", - osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, - sqlite3ErrName(rc))); + + OSTRACE(( + "SHM-LOCK(%d,%d,%d) pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x," + " rc=%s\n", + ofst, n, flags, + osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, + sqlite3ErrName(rc)) + ); return rc; } @@ -50885,13 +51801,15 @@ static int winShmMap( sqlite3_mutex_enter(pShmNode->mutex); if( pShmNode->isUnlocked ){ - rc = winLockSharedMemory(pShmNode); + /* Take the DMS lock. */ + assert( pShmNode->nRegion==0 ); + rc = winLockSharedMemory(pShmNode, winFileBusyTimeout(pDbFd)); if( rc!=SQLITE_OK ) goto shmpage_out; - pShmNode->isUnlocked = 0; } - assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); if( pShmNode->nRegion<=iRegion ){ + HANDLE hShared = pShmNode->hSharedShm; struct ShmRegion *apNew; /* New aRegion[] array */ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ sqlite3_int64 sz; /* Current size of wal-index file */ @@ -50902,10 +51820,9 @@ static int winShmMap( ** Check to see if it has been allocated (i.e. if the wal-index file is ** large enough to contain the requested region). */ - rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); + rc = winHandleSize(hShared, &sz); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap1", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap1", pDbFd->zPath); goto shmpage_out; } @@ -50914,19 +51831,17 @@ static int winShmMap( ** zero, exit early. *pp will be set to NULL and SQLITE_OK returned. ** ** Alternatively, if isWrite is non-zero, use ftruncate() to allocate - ** the requested memory region. - */ + ** the requested memory region. */ if( !isWrite ) goto shmpage_out; - rc = winTruncate((sqlite3_file *)&pShmNode->hFile, nByte); + rc = winHandleTruncate(hShared, nByte); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap2", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap2", pDbFd->zPath); goto shmpage_out; } } /* Map the requested memory region into this processes address space. */ - apNew = (struct ShmRegion *)sqlite3_realloc64( + apNew = (struct ShmRegion*)sqlite3_realloc64( pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) ); if( !apNew ){ @@ -50945,18 +51860,13 @@ static int winShmMap( void *pMap = 0; /* Mapped memory region */ #if SQLITE_OS_WINRT - hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, - NULL, protect, nByte, NULL - ); + hMap = osCreateFileMappingFromApp(hShared, NULL, protect, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_WIDE) - hMap = osCreateFileMappingW(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingW(hShared, NULL, protect, 0, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA - hMap = osCreateFileMappingA(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingA(hShared, NULL, protect, 0, nByte, NULL); #endif + OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", osGetCurrentProcessId(), pShmNode->nRegion, nByte, hMap ? "ok" : "failed")); @@ -50999,7 +51909,9 @@ static int winShmMap( }else{ *pp = 0; } - if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; + if( pShmNode->isReadonly && rc==SQLITE_OK ){ + rc = SQLITE_READONLY; + } sqlite3_mutex_leave(pShmNode->mutex); return rc; } @@ -51319,47 +52231,6 @@ static winVfsAppData winNolockAppData = { ** sqlite3_vfs object. */ -#if defined(__CYGWIN__) -/* -** Convert a filename from whatever the underlying operating system -** supports for filenames into UTF-8. Space to hold the result is -** obtained from malloc and must be freed by the calling function. -*/ -static char *winConvertToUtf8Filename(const void *zFilename){ - char *zConverted = 0; - if( osIsNT() ){ - zConverted = winUnicodeToUtf8(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} -#endif - -/* -** Convert a UTF-8 filename into whatever form the underlying -** operating system wants filenames in. Space to hold the result -** is obtained from malloc and must be freed by the calling -** function. -*/ -static void *winConvertFromUtf8Filename(const char *zFilename){ - void *zConverted = 0; - if( osIsNT() ){ - zConverted = winUtf8ToUnicode(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} - /* ** This function returns non-zero if the specified UTF-8 string buffer ** ends with a directory separator character or one was successfully @@ -51372,7 +52243,14 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){ if( winIsDirSep(zBuf[nLen-1]) ){ return 1; }else if( nLen+1mxPathname; nBuf = nMax + 2; + nMax = pVfs->mxPathname; + nBuf = 2 + (i64)nMax; zBuf = sqlite3MallocZero( nBuf ); if( !zBuf ){ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); @@ -51449,7 +52328,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ } #if defined(__CYGWIN__) - else{ + else if( osGetenv!=NULL ){ static const char *azDirs[] = { 0, /* getenv("SQLITE_TMPDIR") */ 0, /* getenv("TMPDIR") */ @@ -51465,11 +52344,11 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ unsigned int i; const char *zDir = 0; - if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); - if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); - if( !azDirs[2] ) azDirs[2] = getenv("TMP"); - if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); - if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); + if( !azDirs[0] ) azDirs[0] = osGetenv("SQLITE_TMPDIR"); + if( !azDirs[1] ) azDirs[1] = osGetenv("TMPDIR"); + if( !azDirs[2] ) azDirs[2] = osGetenv("TMP"); + if( !azDirs[3] ) azDirs[3] = osGetenv("TEMP"); + if( !azDirs[4] ) azDirs[4] = osGetenv("USERPROFILE"); for(i=0; inOut ){ + /* SQLite assumes that xFullPathname() nul-terminates the output buffer + ** even if it returns an error. */ + zOut[iOff] = '\0'; + return SQLITE_CANTOPEN_BKPT; + } + sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath); + return SQLITE_OK; +} +#endif /* __CYGWIN__ */ /* ** Turn a relative pathname into a full pathname. Write the full @@ -52252,8 +53180,8 @@ static int winFullPathnameNoMutex( int nFull, /* Size of output buffer in bytes */ char *zFull /* Output buffer */ ){ -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) - DWORD nByte; +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + int nByte; void *zConverted; char *zOut; #endif @@ -52266,64 +53194,82 @@ static int winFullPathnameNoMutex( zRelative++; } -#if defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); - UNUSED_PARAMETER(nFull); - assert( nFull>=pVfs->mxPathname ); - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a slash. - */ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | - CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname1", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); - } - }else{ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), - zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname2", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); + +#ifdef __CYGWIN__ + if( osGetcwd ){ + zFull[nFull-1] = '\0'; + if( !winIsDriveLetterAndColon(zRelative) || !winIsDirSep(zRelative[2]) ){ + int rc = SQLITE_OK; + int nLink = 1; /* Number of symbolic links followed so far */ + const char *zIn = zRelative; /* Input path for each iteration of loop */ + char *zDel = 0; + struct stat buf; + + UNUSED_PARAMETER(pVfs); + + do { + /* Call lstat() on path zIn. Set bLink to true if the path is a symbolic + ** link, or false otherwise. */ + int bLink = 0; + if( osLstat && osReadlink ) { + if( osLstat(zIn, &buf)!=0 ){ + int myErrno = osErrno; + if( myErrno!=ENOENT ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)myErrno, "lstat", zIn); + } + }else{ + bLink = ((buf.st_mode & 0170000) == 0120000); + } + + if( bLink ){ + if( zDel==0 ){ + zDel = sqlite3MallocZero(nFull); + if( zDel==0 ) rc = SQLITE_NOMEM; + }else if( ++nLink>SQLITE_MAX_SYMLINKS ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + if( rc==SQLITE_OK ){ + nByte = osReadlink(zIn, zDel, nFull-1); + if( nByte ==(DWORD)-1 ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)osErrno, "readlink", zIn); + }else{ + if( zDel[0]!='/' ){ + int n; + for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--); + if( nByte+n+1>nFull ){ + rc = SQLITE_CANTOPEN_BKPT; + }else{ + memmove(&zDel[n], zDel, nByte+1); + memcpy(zDel, zIn, n); + nByte += n; + } + } + zDel[nByte] = '\0'; + } + } + + zIn = zDel; + } + } + + assert( rc!=SQLITE_OK || zIn!=zFull || zIn[0]=='/' ); + if( rc==SQLITE_OK && zIn!=zFull ){ + rc = mkFullPathname(zIn, zFull, nFull); + } + if( bLink==0 ) break; + zIn = zFull; + }while( rc==SQLITE_OK ); + + sqlite3_free(zDel); + winSimplifyName(zFull); + return rc; } } - return SQLITE_OK; -#endif +#endif /* __CYGWIN__ */ -#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) +#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && defined(_WIN32) SimulateIOError( return SQLITE_ERROR ); /* WinCE has no concept of a relative pathname, or so I am told. */ /* WinRT has no way to convert a relative path to an absolute one. */ @@ -52342,7 +53288,8 @@ static int winFullPathnameNoMutex( return SQLITE_OK; #endif -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT +#if defined(_WIN32) /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the @@ -52360,6 +53307,7 @@ static int winFullPathnameNoMutex( sqlite3_data_directory, winGetDirSep(), zRelative); return SQLITE_OK; } +#endif zConverted = winConvertFromUtf8Filename(zRelative); if( zConverted==0 ){ return SQLITE_IOERR_NOMEM_BKPT; @@ -52398,13 +53346,12 @@ static int winFullPathnameNoMutex( return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname3", zRelative); } - nByte += 3; - zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); + zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) + 3*sizeof(zTemp[0]) ); if( zTemp==0 ){ sqlite3_free(zConverted); return SQLITE_IOERR_NOMEM_BKPT; } - nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + nByte = osGetFullPathNameA((char*)zConverted, nByte+3, zTemp, 0); if( nByte==0 ){ sqlite3_free(zConverted); sqlite3_free(zTemp); @@ -52417,7 +53364,26 @@ static int winFullPathnameNoMutex( } #endif if( zOut ){ +#ifdef __CYGWIN__ + if( memcmp(zOut, "\\\\?\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); + }else if( memcmp(zOut+4, "UNC\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+4); + }else{ + char *p = zOut+6; + *p = '\\'; + if( osGetcwd ){ + /* On Cygwin, UNC paths use forward slashes */ + while( *p ){ + if( *p=='\\' ) *p = '/'; + ++p; + } + } + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+6); + } +#else sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); +#endif /* __CYGWIN__ */ sqlite3_free(zOut); return SQLITE_OK; }else{ @@ -52447,25 +53413,8 @@ static int winFullPathname( */ static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ HANDLE h; -#if defined(__CYGWIN__) - int nFull = pVfs->mxPathname+1; - char *zFull = sqlite3MallocZero( nFull ); - void *zConverted = 0; - if( zFull==0 ){ - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ - sqlite3_free(zFull); - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - zConverted = winConvertFromUtf8Filename(zFull); - sqlite3_free(zFull); -#else void *zConverted = winConvertFromUtf8Filename(zFilename); UNUSED_PARAMETER(pVfs); -#endif if( zConverted==0 ){ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; @@ -52814,7 +53763,7 @@ SQLITE_API int sqlite3_os_init(void){ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ - assert( ArraySize(aSyscall)==80 ); + assert( ArraySize(aSyscall)==89 ); /* get memory map allocation granularity */ memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); @@ -53433,13 +54382,13 @@ static int memdbOpen( } if( p==0 ){ MemStore **apNew; - p = sqlite3Malloc( sizeof(*p) + szName + 3 ); + p = sqlite3Malloc( sizeof(*p) + (i64)szName + 3 ); if( p==0 ){ sqlite3_mutex_leave(pVfsMutex); return SQLITE_NOMEM; } apNew = sqlite3Realloc(memdb_g.apMemStore, - sizeof(apNew[0])*(memdb_g.nMemStore+1) ); + sizeof(apNew[0])*(1+(i64)memdb_g.nMemStore) ); if( apNew==0 ){ sqlite3_free(p); sqlite3_mutex_leave(pVfsMutex); @@ -53872,7 +54821,7 @@ SQLITE_PRIVATE int sqlite3MemdbInit(void){ ** no fewer collisions than the no-op *1. */ #define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) -#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) +#define BITVEC_NPTR ((u32)(BITVEC_USIZE/sizeof(Bitvec *))) /* @@ -54021,7 +54970,9 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ }else{ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); memset(p->u.apSub, 0, sizeof(p->u.apSub)); - p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; + p->iDivisor = p->iSize/BITVEC_NPTR; + if( (p->iSize%BITVEC_NPTR)!=0 ) p->iDivisor++; + if( p->iDivisoriDivisor = BITVEC_NBIT; rc = sqlite3BitvecSet(p, i); for(j=0; jiSize<=BITVEC_NBIT ){ - p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); + p->u.aBitmap[i/BITVEC_SZELEM] &= ~(BITVEC_TELEM)(1<<(i&(BITVEC_SZELEM-1))); }else{ unsigned int j; u32 *aiValues = pBuf; @@ -54106,7 +55057,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ ** individual bits within V. */ #define SETBIT(V,I) V[I>>3] |= (1<<(I&7)) -#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) +#define CLEARBIT(V,I) V[I>>3] &= ~(BITVEC_TELEM)(1<<(I&7)) #define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 /* @@ -54149,7 +55100,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ /* Allocate the Bitvec to be tested and a linear array of ** bits to act as the reference */ pBitvec = sqlite3BitvecCreate( sz ); - pV = sqlite3MallocZero( (sz+7)/8 + 1 ); + pV = sqlite3MallocZero( (7+(i64)sz)/8 + 1 ); pTmpSpace = sqlite3_malloc64(BITVEC_SZ); if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; @@ -54731,6 +55682,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( pPgHdr->pData = pPage->pBuf; pPgHdr->pExtra = (void *)&pPgHdr[1]; memset(pPgHdr->pExtra, 0, 8); + assert( EIGHT_BYTE_ALIGNMENT( pPgHdr->pExtra ) ); pPgHdr->pCache = pCache; pPgHdr->pgno = pgno; pPgHdr->flags = PGHDR_CLEAN; @@ -55389,10 +56341,6 @@ static SQLITE_WSD struct PCacheGlobal { sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ - /* The following value requires a mutex to change. We skip the mutex on - ** reading because (1) most platforms read a 32-bit integer atomically and - ** (2) even if an incorrect value is read, no great harm is done since this - ** is really just an optimization. */ int bUnderPressure; /* True if low on PAGECACHE memory */ } pcache1_g; @@ -55440,7 +56388,7 @@ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; - pcache1.bUnderPressure = 0; + AtomicStore(&pcache1.bUnderPressure,0); while( n-- ){ p = (PgFreeslot*)pBuf; p->pNext = pcache1.pFree; @@ -55477,7 +56425,8 @@ static int pcache1InitBulk(PCache1 *pCache){ do{ PgHdr1 *pX = (PgHdr1*)&zBulk[pCache->szPage]; pX->page.pBuf = zBulk; - pX->page.pExtra = &pX[1]; + pX->page.pExtra = (u8*)pX + ROUND8(sizeof(*pX)); + assert( EIGHT_BYTE_ALIGNMENT( pX->page.pExtra ) ); pX->isBulkLocal = 1; pX->isAnchor = 0; pX->pNext = pCache->pFree; @@ -55507,7 +56456,7 @@ static void *pcache1Alloc(int nByte){ if( p ){ pcache1.pFree = pcache1.pFree->pNext; pcache1.nFreeSlot--; - pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte); sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1); @@ -55546,7 +56495,7 @@ static void pcache1Free(void *p){ pSlot->pNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; - pcache1.bUnderPressure = pcache1.nFreeSlotszPage]; p->page.pBuf = pPg; - p->page.pExtra = &p[1]; + p->page.pExtra = (u8*)p + ROUND8(sizeof(*p)); + assert( EIGHT_BYTE_ALIGNMENT( p->page.pExtra ) ); p->isBulkLocal = 0; p->isAnchor = 0; p->pLruPrev = 0; /* Initializing this saves a valgrind error */ @@ -55676,7 +56626,7 @@ SQLITE_PRIVATE void sqlite3PageFree(void *p){ */ static int pcache1UnderMemoryPressure(PCache1 *pCache){ if( pcache1.nSlot && (pCache->szPage+pCache->szExtra)<=pcache1.szSlot ){ - return pcache1.bUnderPressure; + return AtomicLoad(&pcache1.bUnderPressure); }else{ return sqlite3HeapNearlyFull(); } @@ -55693,12 +56643,12 @@ static int pcache1UnderMemoryPressure(PCache1 *pCache){ */ static void pcache1ResizeHash(PCache1 *p){ PgHdr1 **apNew; - unsigned int nNew; - unsigned int i; + u64 nNew; + u32 i; assert( sqlite3_mutex_held(p->pGroup->mutex) ); - nNew = p->nHash*2; + nNew = 2*(u64)p->nHash; if( nNew<256 ){ nNew = 256; } @@ -55921,7 +56871,7 @@ static void pcache1Destroy(sqlite3_pcache *p); static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ - int sz; /* Bytes of memory required to allocate the new cache */ + i64 sz; /* Bytes of memory required to allocate the new cache */ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); assert( szExtra < 300 ); @@ -57809,6 +58759,9 @@ struct Pager { Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3 *dbWal; +#endif }; /* @@ -57898,39 +58851,33 @@ static const unsigned char aJournalMagic[] = { # define USEFETCH(x) 0 #endif -/* -** The argument to this macro is a file descriptor (type sqlite3_file*). -** Return 0 if it is not open, or non-zero (but not 1) if it is. -** -** This is so that expressions can be written as: -** -** if( isOpen(pPager->jfd) ){ ... -** -** instead of -** -** if( pPager->jfd->pMethods ){ ... -*/ -#define isOpen(pFd) ((pFd)->pMethods!=0) - #ifdef SQLITE_DIRECT_OVERFLOW_READ /* ** Return true if page pgno can be read directly from the database file ** by the b-tree layer. This is the case if: ** -** * the database file is open, -** * there are no dirty pages in the cache, and -** * the desired page is not currently in the wal file. +** (1) the database file is open +** (2) the VFS for the database is able to do unaligned sub-page reads +** (3) there are no dirty pages in the cache, and +** (4) the desired page is not currently in the wal file. */ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ - if( pPager->fd->pMethods==0 ) return 0; - if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; + assert( pPager!=0 ); + assert( pPager->fd!=0 ); + if( pPager->fd->pMethods==0 ) return 0; /* Case (1) */ + if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; /* Failed (3) */ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return iRead==0; + if( iRead ) return 0; /* Case (4) */ } #endif + assert( pPager->fd->pMethods->xDeviceCharacteristics!=0 ); + if( (pPager->fd->pMethods->xDeviceCharacteristics(pPager->fd) + & SQLITE_IOCAP_SUBPAGE_READ)==0 ){ + return 0; /* Case (2) */ + } return 1; } #endif @@ -58406,7 +59353,7 @@ static void checkPage(PgHdr *pPg){ ** If an error occurs while reading from the journal file, an SQLite ** error code is returned. */ -static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u32 nSuper){ +static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u64 nSuper){ int rc; /* Return code */ u32 len; /* Length in bytes of super-journal name */ i64 szJ; /* Total size in bytes of journal file pJrnl */ @@ -58961,6 +59908,15 @@ static void pager_unlock(Pager *pPager){ if( pagerUseWal(pPager) ){ assert( !isOpen(pPager->jfd) ); + if( pPager->eState==PAGER_ERROR ){ + /* If an IO error occurs in wal.c while attempting to wrap the wal file, + ** then the Wal object may be holding a write-lock but no read-lock. + ** This call ensures that the write-lock is dropped as well. We cannot + ** have sqlite3WalEndReadTransaction() drop the write-lock, as it once + ** did, because this would break "BEGIN EXCLUSIVE" handling for + ** SQLITE_ENABLE_SETLK_TIMEOUT builds. */ + sqlite3WalEndWriteTransaction(pPager->pWal); + } sqlite3WalEndReadTransaction(pPager->pWal); pPager->eState = PAGER_OPEN; }else if( !pPager->exclusiveMode ){ @@ -59189,7 +60145,7 @@ static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){ } pPager->journalOff = 0; }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST - || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) + || (pPager->exclusiveMode && pPager->journalModetempFile); pPager->journalOff = 0; @@ -59642,12 +60598,12 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ char *zJournal; /* Pointer to one journal within MJ file */ char *zSuperPtr; /* Space to hold super-journal filename */ char *zFree = 0; /* Free this buffer */ - int nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ + i64 nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ /* Allocate space for both the pJournal and pSuper file descriptors. ** If successful, open the super-journal file for reading. */ - pSuper = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); + pSuper = (sqlite3_file *)sqlite3MallocZero(2 * (i64)pVfs->szOsFile); if( !pSuper ){ rc = SQLITE_NOMEM_BKPT; pJournal = 0; @@ -59665,11 +60621,14 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ */ rc = sqlite3OsFileSize(pSuper, &nSuperJournal); if( rc!=SQLITE_OK ) goto delsuper_out; - nSuperPtr = pVfs->mxPathname+1; + nSuperPtr = 1 + (i64)pVfs->mxPathname; + assert( nSuperJournal>=0 && nSuperPtr>0 ); zFree = sqlite3Malloc(4 + nSuperJournal + nSuperPtr + 2); if( !zFree ){ rc = SQLITE_NOMEM_BKPT; goto delsuper_out; + }else{ + assert( nSuperJournal<=0x7fffffff ); } zFree[0] = zFree[1] = zFree[2] = zFree[3] = 0; zSuperJournal = &zFree[4]; @@ -59930,7 +60889,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** for pageSize. */ zSuper = pPager->pTmpSpace; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); if( rc==SQLITE_OK && zSuper[0] ){ rc = sqlite3OsAccess(pVfs, zSuper, SQLITE_ACCESS_EXISTS, &res); } @@ -60069,7 +61028,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** which case it requires 4 0x00 bytes in memory immediately before ** the filename. */ zSuper = &pPager->pTmpSpace[4]; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); testcase( rc!=SQLITE_OK ); } if( rc==SQLITE_OK @@ -61173,6 +62132,7 @@ static int pagerAcquireMapPage( return SQLITE_NOMEM_BKPT; } p->pExtra = (void *)&p[1]; + assert( EIGHT_BYTE_ALIGNMENT( p->pExtra ) ); p->flags = PGHDR_MMAP; p->nRef = 1; p->pPager = pPager; @@ -61839,6 +62799,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( const char *zUri = 0; /* URI args to copy */ int nUriByte = 1; /* Number of bytes of URI args at *zUri */ + /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). */ journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); @@ -61864,8 +62825,8 @@ SQLITE_PRIVATE int sqlite3PagerOpen( */ if( zFilename && zFilename[0] ){ const char *z; - nPathname = pVfs->mxPathname+1; - zPathname = sqlite3DbMallocRaw(0, nPathname*2); + nPathname = pVfs->mxPathname + 1; + zPathname = sqlite3DbMallocRaw(0, 2*(i64)nPathname); if( zPathname==0 ){ return SQLITE_NOMEM_BKPT; } @@ -61952,14 +62913,14 @@ SQLITE_PRIVATE int sqlite3PagerOpen( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ - journalFileSize * 2 + /* The two journal files */ + (u64)journalFileSize * 2 + /* The two journal files */ SQLITE_PTRSIZE + /* Space to hold a pointer */ 4 + /* Database prefix */ - nPathname + 1 + /* database filename */ - nUriByte + /* query parameters */ - nPathname + 8 + 1 + /* Journal filename */ + (u64)nPathname + 1 + /* database filename */ + (u64)nUriByte + /* query parameters */ + (u64)nPathname + 8 + 1 + /* Journal filename */ #ifndef SQLITE_OMIT_WAL - nPathname + 4 + 1 + /* WAL filename */ + (u64)nPathname + 4 + 1 + /* WAL filename */ #endif 3 /* Terminator */ ); @@ -64682,6 +65643,11 @@ static int pagerOpenWal(Pager *pPager){ pPager->fd, pPager->zWal, pPager->exclusiveMode, pPager->journalSizeLimit, &pPager->pWal ); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_OK ){ + sqlite3WalDb(pPager->pWal, pPager->dbWal); + } +#endif } pagerFixMaplimit(pPager); @@ -64801,6 +65767,7 @@ SQLITE_PRIVATE int sqlite3PagerWalWriteLock(Pager *pPager, int bLock){ ** blocking locks are required. */ SQLITE_PRIVATE void sqlite3PagerWalDb(Pager *pPager, sqlite3 *db){ + pPager->dbWal = db; if( pagerUseWal(pPager) ){ sqlite3WalDb(pPager->pWal, db); } @@ -64956,7 +65923,7 @@ SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ ** 28: Checksum-2 (second part of checksum for first 24 bytes of header). ** ** Immediately following the wal-header are zero or more frames. Each -** frame consists of a 24-byte frame-header followed by a bytes +** frame consists of a 24-byte frame-header followed by bytes ** of page data. The frame-header is six big-endian 32-bit unsigned ** integer values, as follows: ** @@ -65414,6 +66381,11 @@ struct WalCkptInfo { /* ** An open write-ahead log file is represented by an instance of the ** following object. +** +** writeLock: +** This is usually set to 1 whenever the WRITER lock is held. However, +** if it is set to 2, then the WRITER lock is held but must be released +** by walHandleException() if a SEH exception is thrown. */ struct Wal { sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ @@ -65453,6 +66425,7 @@ struct Wal { #endif #ifdef SQLITE_ENABLE_SNAPSHOT WalIndexHdr *pSnapshot; /* Start transaction here if not NULL */ + int bGetSnapshot; /* Transaction opened for sqlite3_get_snapshot() */ #endif #ifdef SQLITE_ENABLE_SETLK_TIMEOUT sqlite3 *db; @@ -65503,9 +66476,13 @@ struct WalIterator { u32 *aPgno; /* Array of page numbers. */ int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ int iZero; /* Frame number associated with aPgno[0] */ - } aSegment[1]; /* One for every 32KB page in the wal-index */ + } aSegment[FLEXARRAY]; /* One for every 32KB page in the wal-index */ }; +/* Size (in bytes) of a WalIterator object suitable for N or fewer segments */ +#define SZ_WALITERATOR(N) \ + (offsetof(WalIterator,aSegment)*(N)*sizeof(struct WalSegment)) + /* ** Define the parameters of the hash tables in the wal-index file. There ** is a hash-table following every HASHTABLE_NPAGE page numbers in the @@ -65664,7 +66641,7 @@ static SQLITE_NOINLINE int walIndexPageRealloc( /* Enlarge the pWal->apWiData[] array if required */ if( pWal->nWiData<=iPage ){ - sqlite3_int64 nByte = sizeof(u32*)*(iPage+1); + sqlite3_int64 nByte = sizeof(u32*)*(1+(i64)iPage); volatile u32 **apNew; apNew = (volatile u32 **)sqlite3Realloc((void *)pWal->apWiData, nByte); if( !apNew ){ @@ -65773,10 +66750,8 @@ static void walChecksumBytes( s1 = s2 = 0; } - assert( nByte>=8 ); - assert( (nByte&0x00000007)==0 ); - assert( nByte<=65536 ); - assert( nByte%4==0 ); + /* nByte is a multiple of 8 between 8 and 65536 */ + assert( nByte>=8 && (nByte&7)==0 && nByte<=65536 ); if( !nativeCksum ){ do { @@ -66866,8 +67841,7 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ /* Allocate space for the WalIterator object. */ nSegment = walFramePage(iLast) + 1; - nByte = sizeof(WalIterator) - + (nSegment-1)*sizeof(struct WalSegment) + nByte = SZ_WALITERATOR(nSegment) + iLast*sizeof(ht_slot); p = (WalIterator *)sqlite3_malloc64(nByte + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) @@ -66938,7 +67912,7 @@ static int walEnableBlockingMs(Wal *pWal, int nMs){ static int walEnableBlocking(Wal *pWal){ int res = 0; if( pWal->db ){ - int tmout = pWal->db->busyTimeout; + int tmout = pWal->db->setlkTimeout; if( tmout ){ res = walEnableBlockingMs(pWal, tmout); } @@ -67324,7 +68298,9 @@ static int walHandleException(Wal *pWal){ static const int S = 1; static const int E = (1<lockMask & ~( + u32 mUnlock; + if( pWal->writeLock==2 ) pWal->writeLock = 0; + mUnlock = pWal->lockMask & ~( (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) @@ -67345,7 +68321,7 @@ static int walHandleException(Wal *pWal){ /* ** Assert that the Wal.lockMask mask, which indicates the locks held -** by the connenction, is consistent with the Wal.readLock, Wal.writeLock +** by the connection, is consistent with the Wal.readLock, Wal.writeLock ** and Wal.ckptLock variables. To be used as: ** ** assert( walAssertLockmask(pWal) ); @@ -67596,7 +68572,12 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ if( bWriteLock || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ - pWal->writeLock = 1; + /* If the write-lock was just obtained, set writeLock to 2 instead of + ** the usual 1. This causes walIndexPage() to behave as if the + ** write-lock were held (so that it allocates new pages as required), + ** and walHandleException() to unlock the write-lock if a SEH exception + ** is thrown. */ + if( !bWriteLock ) pWal->writeLock = 2; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); if( badHdr ){ @@ -67897,11 +68878,7 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ */ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ - u32 mxReadMark; /* Largest aReadMark[] value */ - int mxI; /* Index of largest aReadMark[] value */ - int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ - u32 mxFrame; /* Wal frame to lock to */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT int nBlockTmout = 0; #endif @@ -67964,7 +68941,6 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = walIndexReadHdr(pWal, pChanged); } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - walDisableBlocking(pWal); if( rc==SQLITE_BUSY_TIMEOUT ){ rc = SQLITE_BUSY; *pCnt |= WAL_RETRY_BLOCKED_MASK; @@ -67979,6 +68955,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ ** WAL_RETRY this routine will be called again and will probably be ** right on the second iteration. */ + (void)walEnableBlocking(pWal); if( pWal->apWiData[0]==0 ){ /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. ** We assume this is a transient condition, so return WAL_RETRY. The @@ -67995,6 +68972,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = SQLITE_BUSY_RECOVERY; } } + walDisableBlocking(pWal); if( rc!=SQLITE_OK ){ return rc; } @@ -68007,141 +68985,147 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ assert( pWal->apWiData[0]!=0 ); pInfo = walCkptInfo(pWal); SEH_INJECT_FAULT; - if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame + { + u32 mxReadMark; /* Largest aReadMark[] value */ + int mxI; /* Index of largest aReadMark[] value */ + int i; /* Loop counter */ + u32 mxFrame; /* Wal frame to lock to */ + if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame #ifdef SQLITE_ENABLE_SNAPSHOT - && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0) + && ((pWal->bGetSnapshot==0 && pWal->pSnapshot==0) || pWal->hdr.mxFrame==0) #endif - ){ - /* The WAL has been completely backfilled (or it is empty). - ** and can be safely ignored. - */ - rc = walLockShared(pWal, WAL_READ_LOCK(0)); - walShmBarrier(pWal); - if( rc==SQLITE_OK ){ - if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ - /* It is not safe to allow the reader to continue here if frames - ** may have been appended to the log before READ_LOCK(0) was obtained. - ** When holding READ_LOCK(0), the reader ignores the entire log file, - ** which implies that the database file contains a trustworthy - ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from - ** happening, this is usually correct. - ** - ** However, if frames have been appended to the log (or if the log - ** is wrapped and written for that matter) before the READ_LOCK(0) - ** is obtained, that is not necessarily true. A checkpointer may - ** have started to backfill the appended frames but crashed before - ** it finished. Leaving a corrupt image in the database file. - */ - walUnlockShared(pWal, WAL_READ_LOCK(0)); - return WAL_RETRY; + ){ + /* The WAL has been completely backfilled (or it is empty). + ** and can be safely ignored. + */ + rc = walLockShared(pWal, WAL_READ_LOCK(0)); + walShmBarrier(pWal); + if( rc==SQLITE_OK ){ + if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr,sizeof(WalIndexHdr)) ){ + /* It is not safe to allow the reader to continue here if frames + ** may have been appended to the log before READ_LOCK(0) was obtained. + ** When holding READ_LOCK(0), the reader ignores the entire log file, + ** which implies that the database file contains a trustworthy + ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from + ** happening, this is usually correct. + ** + ** However, if frames have been appended to the log (or if the log + ** is wrapped and written for that matter) before the READ_LOCK(0) + ** is obtained, that is not necessarily true. A checkpointer may + ** have started to backfill the appended frames but crashed before + ** it finished. Leaving a corrupt image in the database file. + */ + walUnlockShared(pWal, WAL_READ_LOCK(0)); + return WAL_RETRY; + } + pWal->readLock = 0; + return SQLITE_OK; + }else if( rc!=SQLITE_BUSY ){ + return rc; } - pWal->readLock = 0; - return SQLITE_OK; - }else if( rc!=SQLITE_BUSY ){ - return rc; } - } - /* If we get this far, it means that the reader will want to use - ** the WAL to get at content from recent commits. The job now is - ** to select one of the aReadMark[] entries that is closest to - ** but not exceeding pWal->hdr.mxFrame and lock that entry. - */ - mxReadMark = 0; - mxI = 0; - mxFrame = pWal->hdr.mxFrame; + /* If we get this far, it means that the reader will want to use + ** the WAL to get at content from recent commits. The job now is + ** to select one of the aReadMark[] entries that is closest to + ** but not exceeding pWal->hdr.mxFrame and lock that entry. + */ + mxReadMark = 0; + mxI = 0; + mxFrame = pWal->hdr.mxFrame; #ifdef SQLITE_ENABLE_SNAPSHOT - if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; - } -#endif - for(i=1; iaReadMark+i); SEH_INJECT_FAULT; - if( mxReadMark<=thisMark && thisMark<=mxFrame ){ - assert( thisMark!=READMARK_NOT_USED ); - mxReadMark = thisMark; - mxI = i; + if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; } - } - if( (pWal->readOnly & WAL_SHM_RDONLY)==0 - && (mxReadMarkaReadMark+i,mxFrame); - mxReadMark = mxFrame; + u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; + if( mxReadMark<=thisMark && thisMark<=mxFrame ){ + assert( thisMark!=READMARK_NOT_USED ); + mxReadMark = thisMark; mxI = i; - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - break; - }else if( rc!=SQLITE_BUSY ){ - return rc; } } - } - if( mxI==0 ){ - assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); - return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; - } + if( (pWal->readOnly & WAL_SHM_RDONLY)==0 + && (mxReadMarkaReadMark+i,mxFrame); + mxReadMark = mxFrame; + mxI = i; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + break; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + } + if( mxI==0 ){ + assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); + return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; + } - (void)walEnableBlockingMs(pWal, nBlockTmout); - rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); - walDisableBlocking(pWal); - if( rc ){ + (void)walEnableBlockingMs(pWal, nBlockTmout); + rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + walDisableBlocking(pWal); + if( rc ){ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - if( rc==SQLITE_BUSY_TIMEOUT ){ - *pCnt |= WAL_RETRY_BLOCKED_MASK; - } + if( rc==SQLITE_BUSY_TIMEOUT ){ + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } #else - assert( rc!=SQLITE_BUSY_TIMEOUT ); + assert( rc!=SQLITE_BUSY_TIMEOUT ); #endif - assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT ); - return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; - } - /* Now that the read-lock has been obtained, check that neither the - ** value in the aReadMark[] array or the contents of the wal-index - ** header have changed. - ** - ** It is necessary to check that the wal-index header did not change - ** between the time it was read and when the shared-lock was obtained - ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility - ** that the log file may have been wrapped by a writer, or that frames - ** that occur later in the log than pWal->hdr.mxFrame may have been - ** copied into the database by a checkpointer. If either of these things - ** happened, then reading the database with the current value of - ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry - ** instead. - ** - ** Before checking that the live wal-index header has not changed - ** since it was read, set Wal.minFrame to the first frame in the wal - ** file that has not yet been checkpointed. This client will not need - ** to read any frames earlier than minFrame from the wal file - they - ** can be safely read directly from the database file. - ** - ** Because a ShmBarrier() call is made between taking the copy of - ** nBackfill and checking that the wal-header in shared-memory still - ** matches the one cached in pWal->hdr, it is guaranteed that the - ** checkpointer that set nBackfill was not working with a wal-index - ** header newer than that cached in pWal->hdr. If it were, that could - ** cause a problem. The checkpointer could omit to checkpoint - ** a version of page X that lies before pWal->minFrame (call that version - ** A) on the basis that there is a newer version (version B) of the same - ** page later in the wal file. But if version B happens to like past - ** frame pWal->hdr.mxFrame - then the client would incorrectly assume - ** that it can read version A from the database file. However, since - ** we can guarantee that the checkpointer that set nBackfill could not - ** see any pages past pWal->hdr.mxFrame, this problem does not come up. - */ - pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; - walShmBarrier(pWal); - if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark - || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) - ){ - walUnlockShared(pWal, WAL_READ_LOCK(mxI)); - return WAL_RETRY; - }else{ - assert( mxReadMark<=pWal->hdr.mxFrame ); - pWal->readLock = (i16)mxI; + assert((rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT); + return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; + } + /* Now that the read-lock has been obtained, check that neither the + ** value in the aReadMark[] array or the contents of the wal-index + ** header have changed. + ** + ** It is necessary to check that the wal-index header did not change + ** between the time it was read and when the shared-lock was obtained + ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility + ** that the log file may have been wrapped by a writer, or that frames + ** that occur later in the log than pWal->hdr.mxFrame may have been + ** copied into the database by a checkpointer. If either of these things + ** happened, then reading the database with the current value of + ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry + ** instead. + ** + ** Before checking that the live wal-index header has not changed + ** since it was read, set Wal.minFrame to the first frame in the wal + ** file that has not yet been checkpointed. This client will not need + ** to read any frames earlier than minFrame from the wal file - they + ** can be safely read directly from the database file. + ** + ** Because a ShmBarrier() call is made between taking the copy of + ** nBackfill and checking that the wal-header in shared-memory still + ** matches the one cached in pWal->hdr, it is guaranteed that the + ** checkpointer that set nBackfill was not working with a wal-index + ** header newer than that cached in pWal->hdr. If it were, that could + ** cause a problem. The checkpointer could omit to checkpoint + ** a version of page X that lies before pWal->minFrame (call that version + ** A) on the basis that there is a newer version (version B) of the same + ** page later in the wal file. But if version B happens to like past + ** frame pWal->hdr.mxFrame - then the client would incorrectly assume + ** that it can read version A from the database file. However, since + ** we can guarantee that the checkpointer that set nBackfill could not + ** see any pages past pWal->hdr.mxFrame, this problem does not come up. + */ + pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; + walShmBarrier(pWal); + if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark + || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) + ){ + walUnlockShared(pWal, WAL_READ_LOCK(mxI)); + return WAL_RETRY; + }else{ + assert( mxReadMark<=pWal->hdr.mxFrame ); + pWal->readLock = (i16)mxI; + } } return rc; } @@ -68379,8 +69363,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ ** read-lock. */ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ - sqlite3WalEndWriteTransaction(pWal); +#ifndef SQLITE_ENABLE_SETLK_TIMEOUT + assert( pWal->writeLock==0 || pWal->readLock<0 ); +#endif if( pWal->readLock>=0 ){ + sqlite3WalEndWriteTransaction(pWal); walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->readLock = -1; } @@ -68573,7 +69560,7 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ ** read-transaction was even opened, making this call a no-op. ** Return early. */ if( pWal->writeLock ){ - assert( !memcmp(&pWal->hdr,(void *)walIndexHdr(pWal),sizeof(WalIndexHdr)) ); + assert( !memcmp(&pWal->hdr,(void*)pWal->apWiData[0],sizeof(WalIndexHdr)) ); return SQLITE_OK; } #endif @@ -68673,6 +69660,7 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + pWal->iReCksum = 0; } return rc; } @@ -68720,6 +69708,9 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + if( pWal->iReCksum>pWal->hdr.mxFrame ){ + pWal->iReCksum = 0; + } } return rc; @@ -69409,7 +70400,20 @@ SQLITE_PRIVATE void sqlite3WalSnapshotOpen( Wal *pWal, sqlite3_snapshot *pSnapshot ){ - pWal->pSnapshot = (WalIndexHdr*)pSnapshot; + if( pSnapshot && ((WalIndexHdr*)pSnapshot)->iVersion==0 ){ + /* iVersion==0 means that this is a call to sqlite3_snapshot_get(). In + ** this case set the bGetSnapshot flag so that if the call to + ** sqlite3_snapshot_get() is about to read transaction on this wal + ** file, it does not take read-lock 0 if the wal file has been completely + ** checkpointed. Taking read-lock 0 would work, but then it would be + ** possible for a subsequent writer to destroy the snapshot even while + ** this connection is holding its read-transaction open. This is contrary + ** to user expectations, so we avoid it by not taking read-lock 0. */ + pWal->bGetSnapshot = 1; + }else{ + pWal->pSnapshot = (WalIndexHdr*)pSnapshot; + pWal->bGetSnapshot = 0; + } } /* @@ -70009,6 +71013,12 @@ struct CellInfo { */ #define BTCURSOR_MAX_DEPTH 20 +/* +** Maximum amount of storage local to a database page, regardless of +** page size. +*/ +#define BT_MAX_LOCAL 65501 /* 65536 - 35 */ + /* ** A cursor is a pointer to a particular entry within a particular ** b-tree within a database file. @@ -70417,7 +71427,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ */ static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){ int i; - int skipOk = 1; + u8 skipOk = 1; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ @@ -71273,7 +72283,7 @@ static int saveCursorKey(BtCursor *pCur){ ** below. */ void *pKey; pCur->nKey = sqlite3BtreePayloadSize(pCur); - pKey = sqlite3Malloc( pCur->nKey + 9 + 8 ); + pKey = sqlite3Malloc( ((i64)pCur->nKey) + 9 + 8 ); if( pKey ){ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ @@ -71563,7 +72573,7 @@ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ */ SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 ); - pCur->hints = x; + pCur->hints = (u8)x; } @@ -71757,14 +72767,15 @@ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( static int btreePayloadToLocal(MemPage *pPage, i64 nPayload){ int maxLocal; /* Maximum amount of payload held locally */ maxLocal = pPage->maxLocal; + assert( nPayload>=0 ); if( nPayload<=maxLocal ){ - return nPayload; + return (int)nPayload; }else{ int minLocal; /* Minimum amount of payload held locally */ int surplus; /* Overflow payload available for local storage */ minLocal = pPage->minLocal; - surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize-4); - return ( surplus <= maxLocal ) ? surplus : minLocal; + surplus = (int)(minLocal +(nPayload - minLocal)%(pPage->pBt->usableSize-4)); + return (surplus <= maxLocal) ? surplus : minLocal; } } @@ -71874,11 +72885,13 @@ static void btreeParseCellPtr( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -71911,11 +72924,13 @@ static void btreeParseCellPtrIndex( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -72454,14 +73469,14 @@ static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ -static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ - u16 iPtr; /* Address of ptr to next freeblock */ - u16 iFreeBlk; /* Address of the next freeblock */ +static int freeSpace(MemPage *pPage, int iStart, int iSize){ + int iPtr; /* Address of ptr to next freeblock */ + int iFreeBlk; /* Address of the next freeblock */ u8 hdr; /* Page header size. 0 or 100 */ - u8 nFrag = 0; /* Reduction in fragmentation */ - u16 iOrigSize = iSize; /* Original value of iSize */ - u16 x; /* Offset to cell content area */ - u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ + int nFrag = 0; /* Reduction in fragmentation */ + int iOrigSize = iSize; /* Original value of iSize */ + int x; /* Offset to cell content area */ + int iEnd = iStart + iSize; /* First byte past the iStart buffer */ unsigned char *data = pPage->aData; /* Page content */ u8 *pTmp; /* Temporary ptr into data[] */ @@ -72488,7 +73503,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } iPtr = iFreeBlk; } - if( iFreeBlk>pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ + if( iFreeBlk>(int)pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ return SQLITE_CORRUPT_PAGE(pPage); } assert( iFreeBlk>iPtr || iFreeBlk==0 || CORRUPT_DB ); @@ -72503,7 +73518,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ nFrag = iFreeBlk - iEnd; if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_PAGE(pPage); iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); - if( iEnd > pPage->pBt->usableSize ){ + if( iEnd > (int)pPage->pBt->usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } iSize = iEnd - iStart; @@ -72524,7 +73539,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } } if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage); - data[hdr+7] -= nFrag; + data[hdr+7] -= (u8)nFrag; } pTmp = &data[hdr+5]; x = get2byte(pTmp); @@ -72545,7 +73560,8 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); + assert( iSize>=0 && iSize<=0xffff ); + put2byte(&data[iStart+2], (u16)iSize); } pPage->nFree += iOrigSize; return SQLITE_OK; @@ -72771,7 +73787,7 @@ static int btreeInitPage(MemPage *pPage){ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nOverflow = 0; - pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize; + pPage->cellOffset = (u16)(pPage->hdrOffset + 8 + pPage->childPtrSize); pPage->aCellIdx = data + pPage->childPtrSize + 8; pPage->aDataEnd = pPage->aData + pBt->pageSize; pPage->aDataOfst = pPage->aData + pPage->childPtrSize; @@ -72805,8 +73821,8 @@ static int btreeInitPage(MemPage *pPage){ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; - u8 hdr = pPage->hdrOffset; - u16 first; + int hdr = pPage->hdrOffset; + int first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno || CORRUPT_DB ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); @@ -72823,7 +73839,7 @@ static void zeroPage(MemPage *pPage, int flags){ put2byte(&data[hdr+5], pBt->usableSize); pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); - pPage->cellOffset = first; + pPage->cellOffset = (u16)first; pPage->aDataEnd = &data[pBt->pageSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; @@ -73609,7 +74625,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, BtShared *pBt = p->pBt; assert( nReserve>=0 && nReserve<=255 ); sqlite3BtreeEnter(p); - pBt->nReserveWanted = nReserve; + pBt->nReserveWanted = (u8)nReserve; x = pBt->pageSize - pBt->usableSize; if( nReservebtsFlags & BTS_PAGESIZE_FIXED ){ @@ -73715,7 +74731,7 @@ SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ assert( BTS_FAST_SECURE==(BTS_OVERWRITE|BTS_SECURE_DELETE) ); if( newFlag>=0 ){ p->pBt->btsFlags &= ~BTS_FAST_SECURE; - p->pBt->btsFlags |= BTS_SECURE_DELETE*newFlag; + p->pBt->btsFlags |= (u16)(BTS_SECURE_DELETE*newFlag); } b = (p->pBt->btsFlags & BTS_FAST_SECURE)/BTS_SECURE_DELETE; sqlite3BtreeLeave(p); @@ -74235,6 +75251,13 @@ static SQLITE_NOINLINE int btreeBeginTrans( (void)sqlite3PagerWalWriteLock(pPager, 0); unlockBtreeIfUnused(pBt); } +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) + if( rc==SQLITE_BUSY_TIMEOUT ){ + /* If a blocking lock timed out, break out of the loop here so that + ** the busy-handler is not invoked. */ + break; + } +#endif }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); sqlite3PagerWalDb(pPager, 0); @@ -75290,6 +76313,25 @@ SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ return ROUND8(sizeof(BtCursor)); } +#ifdef SQLITE_DEBUG +/* +** Return true if and only if the Btree object will be automatically +** closed with the BtCursor closes. This is used within assert() statements +** only. +*/ +SQLITE_PRIVATE int sqlite3BtreeClosesWithCursor( + Btree *pBtree, /* the btree object */ + BtCursor *pCur /* Corresponding cursor */ +){ + BtShared *pBt = pBtree->pBt; + if( (pBt->openFlags & BTREE_SINGLE)==0 ) return 0; + if( pBt->pCursor!=pCur ) return 0; + if( pCur->pNext!=0 ) return 0; + if( pCur->pBtree!=pBtree ) return 0; + return 1; +} +#endif + /* ** Initialize memory that will be converted into a BtCursor object. ** @@ -76533,7 +77575,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( && indexCellCompare(pCur, 0, pIdxKey, xRecordCompare)<=0 && pIdxKey->errCode==SQLITE_OK ){ - pCur->curFlags &= ~BTCF_ValidOvfl; + pCur->curFlags &= ~(BTCF_ValidOvfl|BTCF_AtLast); if( !pCur->pPage->isInit ){ return SQLITE_CORRUPT_BKPT; } @@ -76625,7 +77667,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( rc = SQLITE_CORRUPT_PAGE(pPage); goto moveto_index_finish; } - pCellKey = sqlite3Malloc( nCell+nOverrun ); + pCellKey = sqlite3Malloc( (u64)nCell+(u64)nOverrun ); if( pCellKey==0 ){ rc = SQLITE_NOMEM_BKPT; goto moveto_index_finish; @@ -78111,7 +79153,8 @@ static int rebuildPage( if( j>(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); - for(k=0; ALWAYS(kixNx[k]<=i; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i; k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd; @@ -78143,7 +79186,8 @@ static int rebuildPage( } /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ - pPg->nCell = nCell; + assert( nCell < 10922 ); + pPg->nCell = (u16)nCell; pPg->nOverflow = 0; put2byte(&aData[hdr+1], 0); @@ -78194,7 +79238,8 @@ static int pageInsertArray( u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; - for(k=0; ALWAYS(kixNx[k]<=i ; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i ; k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc; @@ -78389,9 +79434,13 @@ static int editPage( if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew+nCell, nNew-nCell, pCArray - ) ) goto editpage_fail; + ) + ){ + goto editpage_fail; + } - pPg->nCell = nNew; + assert( nNew < 10922 ); + pPg->nCell = (u16)nNew; pPg->nOverflow = 0; put2byte(&aData[hdr+3], pPg->nCell); @@ -78479,6 +79528,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ b.szCell = &szCell; b.apEnd[0] = pPage->aDataEnd; b.ixNx[0] = 2; + b.ixNx[NB*2-1] = 0x7fffffff; rc = rebuildPage(&b, 0, 1, pNew); if( NEVER(rc) ){ releasePage(pNew); @@ -78699,7 +79749,7 @@ static int balance_nonroot( int pageFlags; /* Value of pPage->aData[0] */ int iSpace1 = 0; /* First unused byte of aSpace1[] */ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ - int szScratch; /* Size of scratch memory requested */ + u64 szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ u8 *pRight; /* Location in parent of right-sibling pointer */ @@ -78714,7 +79764,9 @@ static int balance_nonroot( CellArray b; /* Parsed information on cells being balanced */ memset(abDone, 0, sizeof(abDone)); - memset(&b, 0, sizeof(b)); + assert( sizeof(b) - sizeof(b.ixNx) == offsetof(CellArray,ixNx) ); + memset(&b, 0, sizeof(b)-sizeof(b.ixNx[0])); + b.ixNx[NB*2-1] = 0x7fffffff; pBt = pParent->pBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); @@ -79305,7 +80357,8 @@ static int balance_nonroot( iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; ALWAYS(kj ); + for(k=0; b.ixNx[k]<=j; k++){} pSrcEnd = b.apEnd[k]; if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){ rc = SQLITE_CORRUPT_BKPT; @@ -79981,7 +81034,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pCur->info.nKey==pX->nKey ){ BtreePayload x2; x2.pData = pX->pKey; - x2.nData = pX->nKey; + x2.nData = (int)pX->nKey; assert( pX->nKey<=0x7fffffff ); x2.nZero = 0; return btreeOverwriteCell(pCur, &x2); } @@ -80162,7 +81215,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 getCellInfo(pSrc); if( pSrc->info.nPayload<0x80 ){ - *(aOut++) = pSrc->info.nPayload; + *(aOut++) = (u8)pSrc->info.nPayload; }else{ aOut += sqlite3PutVarint(aOut, pSrc->info.nPayload); } @@ -80175,7 +81228,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ memcpy(aOut, aIn, nIn); - pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = nIn + (int)(aOut - pBt->pTmpSpace); return SQLITE_OK; }else{ int rc = SQLITE_OK; @@ -80187,7 +81240,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 u32 nOut; /* Size of output buffer aOut[] */ nOut = btreePayloadToLocal(pDest->pPage, pSrc->info.nPayload); - pBt->nPreformatSize = nOut + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = (int)nOut + (int)(aOut - pBt->pTmpSpace); if( nOutinfo.nPayload ){ pPgnoOut = &aOut[nOut]; pBt->nPreformatSize += 4; @@ -81808,6 +82861,7 @@ SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ */ SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; + assert( nBytes==0 || nBytes==sizeof(Schema) ); sqlite3BtreeEnter(p); if( !pBt->pSchema && nBytes ){ pBt->pSchema = sqlite3DbMallocZero(0, nBytes); @@ -82924,7 +83978,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ ** corresponding string value, then it is important that the string be ** derived from the numeric value, not the other way around, to ensure ** that the index and table are consistent. See ticket -** https://www.sqlite.org/src/info/343634942dd54ab (2018-01-31) for +** https://sqlite.org/src/info/343634942dd54ab (2018-01-31) for ** an example. ** ** This routine looks at pMem to verify that if it has both a numeric @@ -83110,7 +84164,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){ return; } if( pMem->enc!=SQLITE_UTF8 ) return; - if( NEVER(pMem->z==0) ) return; + assert( pMem->z!=0 ); if( pMem->flags & MEM_Dyn ){ if( pMem->xDel==sqlite3_free && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1) @@ -83829,27 +84883,30 @@ SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){ int i; Mem *pX; - for(i=1, pX=pVdbe->aMem+1; inMem; i++, pX++){ - if( pX->pScopyFrom==pMem ){ - u16 mFlags; - if( pVdbe->db->flags & SQLITE_VdbeTrace ){ - sqlite3DebugPrintf("Invalidate R[%d] due to change in R[%d]\n", - (int)(pX - pVdbe->aMem), (int)(pMem - pVdbe->aMem)); - } - /* If pX is marked as a shallow copy of pMem, then try to verify that - ** no significant changes have been made to pX since the OP_SCopy. - ** A significant change would indicated a missed call to this - ** function for pX. Minor changes, such as adding or removing a - ** dual type, are allowed, as long as the underlying value is the - ** same. */ - mFlags = pMem->flags & pX->flags & pX->mScopyFlags; - assert( (mFlags&(MEM_Int|MEM_IntReal))==0 || pMem->u.i==pX->u.i ); - - /* pMem is the register that is changing. But also mark pX as - ** undefined so that we can quickly detect the shallow-copy error */ - pX->flags = MEM_Undefined; - pX->pScopyFrom = 0; - } + if( pMem->bScopy ){ + for(i=1, pX=pVdbe->aMem+1; inMem; i++, pX++){ + if( pX->pScopyFrom==pMem ){ + u16 mFlags; + if( pVdbe->db->flags & SQLITE_VdbeTrace ){ + sqlite3DebugPrintf("Invalidate R[%d] due to change in R[%d]\n", + (int)(pX - pVdbe->aMem), (int)(pMem - pVdbe->aMem)); + } + /* If pX is marked as a shallow copy of pMem, then try to verify that + ** no significant changes have been made to pX since the OP_SCopy. + ** A significant change would indicated a missed call to this + ** function for pX. Minor changes, such as adding or removing a + ** dual type, are allowed, as long as the underlying value is the + ** same. */ + mFlags = pMem->flags & pX->flags & pX->mScopyFlags; + assert( (mFlags&(MEM_Int|MEM_IntReal))==0 || pMem->u.i==pX->u.i ); + + /* pMem is the register that is changing. But also mark pX as + ** undefined so that we can quickly detect the shallow-copy error */ + pX->flags = MEM_Undefined; + pX->pScopyFrom = 0; + } + } + pMem->bScopy = 0; } pMem->pScopyFrom = 0; } @@ -84220,7 +85277,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ if( pRec==0 ){ Index *pIdx = p->pIdx; /* Index being probed */ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ int i; /* Counter variable */ int nCol = pIdx->nColumn; /* Number of index columns including rowid */ @@ -84286,7 +85343,7 @@ static int valueFromFunction( ){ sqlite3_context ctx; /* Context object for function invocation */ sqlite3_value **apVal = 0; /* Function arguments */ - int nVal = 0; /* Size of apVal[] array */ + int nVal = 0; /* Number of function arguments */ FuncDef *pFunc = 0; /* Function definition */ sqlite3_value *pVal = 0; /* New value */ int rc = SQLITE_OK; /* Return code */ @@ -84317,7 +85374,8 @@ static int valueFromFunction( goto value_from_function_out; } for(i=0; ia[i].pExpr, enc, aff, &apVal[i]); + rc = sqlite3Stat4ValueFromExpr(pCtx->pParse, pList->a[i].pExpr, aff, + &apVal[i]); if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out; } } @@ -85283,12 +86341,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall( int eCallCtx /* Calling context */ ){ Vdbe *v = pParse->pVdbe; - int nByte; int addr; sqlite3_context *pCtx; assert( v ); - nByte = sizeof(*pCtx) + (nArg-1)*sizeof(sqlite3_value*); - pCtx = sqlite3DbMallocRawNN(pParse->db, nByte); + pCtx = sqlite3DbMallocRawNN(pParse->db, SZ_CONTEXT(nArg)); if( pCtx==0 ){ assert( pParse->db->mallocFailed ); freeEphemeralFunction(pParse->db, (FuncDef*)pFunc); @@ -85564,7 +86620,7 @@ static Op *opIterNext(VdbeOpIter *p){ } if( pRet->p4type==P4_SUBPROGRAM ){ - int nByte = (p->nSub+1)*sizeof(SubProgram*); + i64 nByte = (1+(u64)p->nSub)*sizeof(SubProgram*); int j; for(j=0; jnSub; j++){ if( p->apSub[j]==pRet->p4.pProgram ) break; @@ -85694,8 +86750,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** (1) For each jump instruction with a negative P2 value (a label) ** resolve the P2 value to an actual address. ** -** (2) Compute the maximum number of arguments used by any SQL function -** and store that value in *pMaxFuncArgs. +** (2) Compute the maximum number of arguments used by the xUpdate/xFilter +** methods of any virtual table and store that value in *pMaxVtabArgs. ** ** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately ** indicate what the prepared statement actually does. @@ -85708,8 +86764,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** script numbers the opcodes correctly. Changes to this routine must be ** coordinated with changes to mkopcodeh.tcl. */ -static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ - int nMaxArgs = *pMaxFuncArgs; +static void resolveP2Values(Vdbe *p, int *pMaxVtabArgs){ + int nMaxVtabArgs = *pMaxVtabArgs; Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; @@ -85754,15 +86810,19 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ } #ifndef SQLITE_OMIT_VIRTUALTABLE case OP_VUpdate: { - if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; + if( pOp->p2>nMaxVtabArgs ) nMaxVtabArgs = pOp->p2; break; } case OP_VFilter: { int n; + /* The instruction immediately prior to VFilter will be an + ** OP_Integer that sets the "argc" value for the VFilter. See + ** the code where OP_VFilter is generated at tag-20250207a. */ assert( (pOp - p->aOp) >= 3 ); assert( pOp[-1].opcode==OP_Integer ); + assert( pOp[-1].p2==pOp->p3+1 ); n = pOp[-1].p1; - if( n>nMaxArgs ) nMaxArgs = n; + if( n>nMaxVtabArgs ) nMaxVtabArgs = n; /* Fall through into the default case */ /* no break */ deliberate_fall_through } @@ -85803,7 +86863,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ pParse->aLabel = 0; } pParse->nLabel = 0; - *pMaxFuncArgs = nMaxArgs; + *pMaxVtabArgs = nMaxVtabArgs; assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) ); } @@ -86032,7 +87092,7 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( const char *zName /* Name of table or index being scanned */ ){ if( IS_STMT_SCANSTATUS(p->db) ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + i64 nByte = (1+(i64)p->nScan) * sizeof(ScanStatus); ScanStatus *aNew; aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); if( aNew ){ @@ -86142,6 +87202,9 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ */ SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){ VdbeOp *pOp = sqlite3VdbeGetLastOp(p); +#ifdef SQLITE_DEBUG + while( pOp->opcode==OP_ReleaseReg ) pOp--; +#endif if( pOp->p3==iDest && pOp->opcode==OP_Column ){ pOp->p5 |= OPFLAG_TYPEOFARG; } @@ -86251,6 +87314,12 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4); break; } + case P4_SUBRTNSIG: { + SubrtnSig *pSig = (SubrtnSig*)p4; + sqlite3DbFree(db, pSig->zAff); + sqlite3DbFree(db, pSig); + break; + } } } @@ -86830,6 +87899,11 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3 *db, Op *pOp){ zP4 = pOp->p4.pTab->zName; break; } + case P4_SUBRTNSIG: { + SubrtnSig *pSig = pOp->p4.pSubrtnSig; + sqlite3_str_appendf(&x, "subrtnsig:%d,%s", pSig->selId, pSig->zAff); + break; + } default: { zP4 = pOp->p4.z; } @@ -86971,6 +88045,7 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, VdbeOp *pOp){ ** will be initialized before use. */ static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ + assert( db!=0 ); if( N>0 ){ do{ p->flags = flags; @@ -86978,6 +88053,7 @@ static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ p->szMalloc = 0; #ifdef SQLITE_DEBUG p->pScopyFrom = 0; + p->bScopy = 0; #endif p++; }while( (--N)>0 ); @@ -86996,6 +88072,7 @@ static void releaseMemArray(Mem *p, int N){ if( p && N ){ Mem *pEnd = &p[N]; sqlite3 *db = p->db; + assert( db!=0 ); if( db->pnBytesFreed ){ do{ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); @@ -87467,7 +88544,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( int nVar; /* Number of parameters */ int nMem; /* Number of VM memory registers */ int nCursor; /* Number of cursors required */ - int nArg; /* Number of arguments in subprograms */ + int nArg; /* Max number args to xFilter or xUpdate */ int n; /* Loop counter */ struct ReusableSpace x; /* Reusable bulk memory */ @@ -87476,6 +88553,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( assert( pParse!=0 ); assert( p->eVdbeState==VDBE_INIT_STATE ); assert( pParse==p->pParse ); + assert( pParse->db==p->db ); p->pVList = pParse->pVList; pParse->pVList = 0; db = p->db; @@ -87538,6 +88616,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); } } +#ifdef SQLITE_DEBUG + p->napArg = nArg; +#endif if( db->mallocFailed ){ p->nVar = 0; @@ -89035,6 +90116,7 @@ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( ){ UnpackedRecord *p; /* Unpacked record to return */ int nByte; /* Number of bytes required for *p */ + assert( sizeof(UnpackedRecord) + sizeof(Mem)*65536 < 0x7fffffff ); nByte = ROUND8P(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1); p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); if( !p ) return 0; @@ -89339,7 +90421,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem ** We must use separate SQLITE_NOINLINE functions here, since otherwise ** optimizer code movement causes gcov to become very confused. */ -#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) static int SQLITE_NOINLINE doubleLt(double a, double b){ return ar ); - testcase( x==r ); - return (xr); }else{ i64 y; if( r<-9223372036854775808.0 ) return +1; @@ -90348,10 +91423,11 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( preupdate.pCsr = pCsr; preupdate.op = op; preupdate.iNewReg = iReg; - preupdate.keyinfo.db = db; - preupdate.keyinfo.enc = ENC(db); - preupdate.keyinfo.nKeyField = pTab->nCol; - preupdate.keyinfo.aSortFlags = (u8*)&fakeSortOrder; + preupdate.pKeyinfo = (KeyInfo*)&preupdate.keyinfoSpace; + preupdate.pKeyinfo->db = db; + preupdate.pKeyinfo->enc = ENC(db); + preupdate.pKeyinfo->nKeyField = pTab->nCol; + preupdate.pKeyinfo->aSortFlags = (u8*)&fakeSortOrder; preupdate.iKey1 = iKey1; preupdate.iKey2 = iKey2; preupdate.pTab = pTab; @@ -90361,8 +91437,9 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); db->pPreUpdate = 0; sqlite3DbFree(db, preupdate.aRecord); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pUnpacked); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pNewUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pNewUnpacked); + sqlite3VdbeMemRelease(&preupdate.oldipk); if( preupdate.aNew ){ int i; for(i=0; inField; i++){ @@ -90370,6 +91447,13 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( } sqlite3DbNNFreeNN(db, preupdate.aNew); } + if( preupdate.apDflt ){ + int i; + for(i=0; inCol; i++){ + sqlite3ValueFree(preupdate.apDflt[i]); + } + sqlite3DbFree(db, preupdate.apDflt); + } } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -90440,7 +91524,6 @@ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){ sqlite3_int64 iNow; sqlite3_int64 iElapse; assert( p->startTime>0 ); - assert( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0 ); assert( db->init.busy==0 ); assert( p->zSql!=0 ); sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); @@ -91160,7 +92243,7 @@ static int sqlite3Step(Vdbe *p){ } assert( db->nVdbeWrite>0 || db->autoCommit==0 - || (db->nDeferredCons==0 && db->nDeferredImmCons==0) + || ((db->nDeferredCons + db->nDeferredImmCons)==0) ); #ifndef SQLITE_OMIT_TRACE @@ -91671,6 +92754,7 @@ static const Mem *columnNullValue(void){ #ifdef SQLITE_DEBUG /* .pScopyFrom = */ (Mem*)0, /* .mScopyFlags= */ 0, + /* .bScopy = */ 0, #endif }; return &nullMem; @@ -91712,7 +92796,7 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ ** sqlite3_column_int64() ** sqlite3_column_text() ** sqlite3_column_text16() -** sqlite3_column_real() +** sqlite3_column_double() ** sqlite3_column_bytes() ** sqlite3_column_bytes16() ** sqlite3_column_blob() @@ -91998,6 +93082,17 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ ** ** The error code stored in database p->db is overwritten with the return ** value in any case. +** +** (tag-20240917-01) If vdbeUnbind(p,(u32)(i-1)) returns SQLITE_OK, +** that means all of the the following will be true: +** +** p!=0 +** p->pVar!=0 +** i>0 +** i<=p->nVar +** +** An assert() is normally added after vdbeUnbind() to help static analyzers +** realize this. */ static int vdbeUnbind(Vdbe *p, unsigned int i){ Mem *pVar; @@ -92055,6 +93150,7 @@ static int bindText( rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ if( zData!=0 ){ pVar = &p->aVar[i-1]; rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); @@ -92104,6 +93200,7 @@ SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); sqlite3_mutex_leave(p->db->mutex); } @@ -92117,6 +93214,7 @@ SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValu Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); sqlite3_mutex_leave(p->db->mutex); } @@ -92127,6 +93225,7 @@ SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ Vdbe *p = (Vdbe*)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3_mutex_leave(p->db->mutex); } return rc; @@ -92142,6 +93241,7 @@ SQLITE_API int sqlite3_bind_pointer( Vdbe *p = (Vdbe*)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetPointer(&p->aVar[i-1], pPtr, zPTtype, xDestructor); sqlite3_mutex_leave(p->db->mutex); }else if( xDestructor ){ @@ -92169,7 +93269,7 @@ SQLITE_API int sqlite3_bind_text64( assert( xDel!=SQLITE_DYNAMIC ); if( enc!=SQLITE_UTF8 ){ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; - nData &= ~(u16)1; + nData &= ~(u64)1; } return bindText(pStmt, i, zData, nData, xDel, enc); } @@ -92223,6 +93323,7 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ #ifndef SQLITE_OMIT_INCRBLOB sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); #else @@ -92536,6 +93637,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa PreUpdate *p; Mem *pMem; int rc = SQLITE_OK; + int iStore = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( db==0 || ppValue==0 ){ @@ -92550,44 +93652,75 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa goto preupdate_old_out; } if( p->pPk ){ - iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx); + iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else{ + iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } - if( iIdx>=p->pCsr->nField || iIdx<0 ){ + if( iStore>=p->pCsr->nField || iStore<0 ){ rc = SQLITE_RANGE; goto preupdate_old_out; } - /* If the old.* record has not yet been loaded into memory, do so now. */ - if( p->pUnpacked==0 ){ - u32 nRec; - u8 *aRec; + if( iIdx==p->pTab->iPKey ){ + *ppValue = pMem = &p->oldipk; + sqlite3VdbeMemSetInt64(pMem, p->iKey1); + }else{ + + /* If the old.* record has not yet been loaded into memory, do so now. */ + if( p->pUnpacked==0 ){ + u32 nRec; + u8 *aRec; - assert( p->pCsr->eCurType==CURTYPE_BTREE ); - nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); - aRec = sqlite3DbMallocRaw(db, nRec); - if( !aRec ) goto preupdate_old_out; - rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); - if( rc==SQLITE_OK ){ - p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); - if( !p->pUnpacked ) rc = SQLITE_NOMEM; - } - if( rc!=SQLITE_OK ){ - sqlite3DbFree(db, aRec); - goto preupdate_old_out; + assert( p->pCsr->eCurType==CURTYPE_BTREE ); + nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); + aRec = sqlite3DbMallocRaw(db, nRec); + if( !aRec ) goto preupdate_old_out; + rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); + if( rc==SQLITE_OK ){ + p->pUnpacked = vdbeUnpackRecord(p->pKeyinfo, nRec, aRec); + if( !p->pUnpacked ) rc = SQLITE_NOMEM; + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, aRec); + goto preupdate_old_out; + } + p->aRecord = aRec; } - p->aRecord = aRec; - } - pMem = *ppValue = &p->pUnpacked->aMem[iIdx]; - if( iIdx==p->pTab->iPKey ){ - sqlite3VdbeMemSetInt64(pMem, p->iKey1); - }else if( iIdx>=p->pUnpacked->nField ){ - *ppValue = (sqlite3_value *)columnNullValue(); - }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ - if( pMem->flags & (MEM_Int|MEM_IntReal) ){ - testcase( pMem->flags & MEM_Int ); - testcase( pMem->flags & MEM_IntReal ); - sqlite3VdbeMemRealify(pMem); + pMem = *ppValue = &p->pUnpacked->aMem[iStore]; + if( iStore>=p->pUnpacked->nField ){ + /* This occurs when the table has been extended using ALTER TABLE + ** ADD COLUMN. The value to return is the default value of the column. */ + Column *pCol = &p->pTab->aCol[iIdx]; + if( pCol->iDflt>0 ){ + if( p->apDflt==0 ){ + int nByte; + assert( sizeof(sqlite3_value*)*UMXV(p->pTab->nCol) < 0x7fffffff ); + nByte = sizeof(sqlite3_value*)*p->pTab->nCol; + p->apDflt = (sqlite3_value**)sqlite3DbMallocZero(db, nByte); + if( p->apDflt==0 ) goto preupdate_old_out; + } + if( p->apDflt[iIdx]==0 ){ + sqlite3_value *pVal = 0; + Expr *pDflt; + assert( p->pTab!=0 && IsOrdinaryTable(p->pTab) ); + pDflt = p->pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; + rc = sqlite3ValueFromExpr(db, pDflt, ENC(db), pCol->affinity, &pVal); + if( rc==SQLITE_OK && pVal==0 ){ + rc = SQLITE_CORRUPT_BKPT; + } + p->apDflt[iIdx] = pVal; + } + *ppValue = p->apDflt[iIdx]; + }else{ + *ppValue = (sqlite3_value *)columnNullValue(); + } + }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ + if( pMem->flags & (MEM_Int|MEM_IntReal) ){ + testcase( pMem->flags & MEM_Int ); + testcase( pMem->flags & MEM_IntReal ); + sqlite3VdbeMemRealify(pMem); + } } } @@ -92609,7 +93742,7 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ #else p = db->pPreUpdate; #endif - return (p ? p->keyinfo.nKeyField : 0); + return (p ? p->pKeyinfo->nKeyField : 0); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -92661,6 +93794,7 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa PreUpdate *p; int rc = SQLITE_OK; Mem *pMem; + int iStore = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( db==0 || ppValue==0 ){ @@ -92673,9 +93807,12 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa goto preupdate_new_out; } if( p->pPk && p->op!=SQLITE_UPDATE ){ - iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx); + iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else{ + iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } - if( iIdx>=p->pCsr->nField || iIdx<0 ){ + + if( iStore>=p->pCsr->nField || iStore<0 ){ rc = SQLITE_RANGE; goto preupdate_new_out; } @@ -92688,40 +93825,41 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa Mem *pData = &p->v->aMem[p->iNewReg]; rc = ExpandBlob(pData); if( rc!=SQLITE_OK ) goto preupdate_new_out; - pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z); + pUnpack = vdbeUnpackRecord(p->pKeyinfo, pData->n, pData->z); if( !pUnpack ){ rc = SQLITE_NOMEM; goto preupdate_new_out; } p->pNewUnpacked = pUnpack; } - pMem = &pUnpack->aMem[iIdx]; + pMem = &pUnpack->aMem[iStore]; if( iIdx==p->pTab->iPKey ){ sqlite3VdbeMemSetInt64(pMem, p->iKey2); - }else if( iIdx>=pUnpack->nField ){ + }else if( iStore>=pUnpack->nField ){ pMem = (sqlite3_value *)columnNullValue(); } }else{ - /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required + /* For an UPDATE, memory cell (p->iNewReg+1+iStore) contains the required ** value. Make a copy of the cell contents and return a pointer to it. ** It is not safe to return a pointer to the memory cell itself as the ** caller may modify the value text encoding. */ assert( p->op==SQLITE_UPDATE ); if( !p->aNew ){ - p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField); + assert( sizeof(Mem)*UMXV(p->pCsr->nField) < 0x7fffffff ); + p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem)*p->pCsr->nField); if( !p->aNew ){ rc = SQLITE_NOMEM; goto preupdate_new_out; } } - assert( iIdx>=0 && iIdxpCsr->nField ); - pMem = &p->aNew[iIdx]; + assert( iStore>=0 && iStorepCsr->nField ); + pMem = &p->aNew[iStore]; if( pMem->flags==0 ){ if( iIdx==p->pTab->iPKey ){ sqlite3VdbeMemSetInt64(pMem, p->iKey2); }else{ - rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]); + rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iStore]); if( rc!=SQLITE_OK ) goto preupdate_new_out; } } @@ -93135,6 +94273,104 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ +/* +** High-resolution hardware timer used for debugging and testing only. +*/ +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/************** Include hwtime.h in the middle of vdbe.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on Pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in vdbe.c ***********************/ +#endif + /* ** Invoke this macro on memory cells just prior to changing the ** value of the cell. This macro verifies that shallow copies are @@ -93381,11 +94617,11 @@ static VdbeCursor *allocateCursor( */ Mem *pMem = iCur>0 ? &p->aMem[p->nMem-iCur] : p->aMem; - int nByte; + i64 nByte; VdbeCursor *pCx = 0; - nByte = - ROUND8P(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + - (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0); + nByte = SZ_VDBECURSOR(nField); + assert( ROUND8(nByte)==nByte ); + if( eCurType==CURTYPE_BTREE ) nByte += sqlite3BtreeCursorSize(); assert( iCur>=0 && iCurnCursor ); if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ @@ -93409,7 +94645,7 @@ static VdbeCursor *allocateCursor( pMem->szMalloc = 0; return 0; } - pMem->szMalloc = nByte; + pMem->szMalloc = (int)nByte; } p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->zMalloc; @@ -93418,8 +94654,8 @@ static VdbeCursor *allocateCursor( pCx->nField = nField; pCx->aOffset = &pCx->aType[nField]; if( eCurType==CURTYPE_BTREE ){ - pCx->uc.pCursor = (BtCursor*) - &pMem->z[ROUND8P(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; + assert( ROUND8(SZ_VDBECURSOR(nField))==SZ_VDBECURSOR(nField) ); + pCx->uc.pCursor = (BtCursor*)&pMem->z[SZ_VDBECURSOR(nField)]; sqlite3BtreeCursorZero(pCx->uc.pCursor); } return pCx; @@ -93712,6 +94948,7 @@ static void registerTrace(int iReg, Mem *p){ printf("R[%d] = ", iReg); memTracePrint(p); if( p->pScopyFrom ){ + assert( p->pScopyFrom->bScopy ); printf(" <== R[%d]", (int)(p->pScopyFrom - &p[-iReg])); } printf("\n"); @@ -94328,7 +95565,7 @@ case OP_HaltIfNull: { /* in3 */ /* no break */ deliberate_fall_through } -/* Opcode: Halt P1 P2 * P4 P5 +/* Opcode: Halt P1 P2 P3 P4 P5 ** ** Exit immediately. All open cursors, etc are closed ** automatically. @@ -94341,18 +95578,22 @@ case OP_HaltIfNull: { /* in3 */ ** then back out all changes that have occurred during this execution of the ** VDBE, but do not rollback the transaction. ** -** If P4 is not null then it is an error message string. +** If P3 is not zero and P4 is NULL, then P3 is a register that holds the +** text of an error message. ** -** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. +** If P3 is zero and P4 is not null then the error message string is held +** in P4. +** +** P5 is a value between 1 and 4, inclusive, then the P4 error message +** string is modified as follows: ** -** 0: (no change) ** 1: NOT NULL constraint failed: P4 ** 2: UNIQUE constraint failed: P4 ** 3: CHECK constraint failed: P4 ** 4: FOREIGN KEY constraint failed: P4 ** -** If P5 is not zero and P4 is NULL, then everything after the ":" is -** omitted. +** If P3 is zero and P5 is not zero and P4 is NULL, then everything after +** the ":" is omitted. ** ** There is an implied "Halt 0 0 0" instruction inserted at the very end of ** every program. So a jump past the last instruction of the program @@ -94365,6 +95606,9 @@ case OP_Halt: { #ifdef SQLITE_DEBUG if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); } #endif + assert( pOp->p4type==P4_NOTUSED + || pOp->p4type==P4_STATIC + || pOp->p4type==P4_DYNAMIC ); /* A deliberately coded "OP_Halt SQLITE_INTERNAL * * * *" opcode indicates ** something is wrong with the code generator. Raise an assertion in order @@ -94395,7 +95639,12 @@ case OP_Halt: { p->errorAction = (u8)pOp->p2; assert( pOp->p5<=4 ); if( p->rc ){ - if( pOp->p5 ){ + if( pOp->p3>0 && pOp->p4type==P4_NOTUSED ){ + const char *zErr; + assert( pOp->p3<=(p->nMem + 1 - p->nCursor) ); + zErr = sqlite3ValueText(&aMem[pOp->p3], SQLITE_UTF8); + sqlite3VdbeError(p, "%s", zErr); + }else if( pOp->p5 ){ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", "FOREIGN KEY" }; testcase( pOp->p5==1 ); @@ -94410,7 +95659,7 @@ case OP_Halt: { sqlite3VdbeError(p, "%s", pOp->p4.z); } pcx = (int)(pOp - aOp); - sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg); + sqlite3_log(pOp->p1, "abort at %d: %s; [%s]", pcx, p->zErrMsg, p->zSql); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); @@ -94683,6 +95932,7 @@ case OP_Move: { { int i; for(i=1; inMem; i++){ if( aMem[i].pScopyFrom==pIn1 ){ + assert( aMem[i].bScopy ); aMem[i].pScopyFrom = pOut; } } @@ -94755,6 +96005,7 @@ case OP_SCopy: { /* out2 */ #ifdef SQLITE_DEBUG pOut->pScopyFrom = pIn1; pOut->mScopyFlags = pIn1->flags; + pIn1->bScopy = 1; #endif break; } @@ -95198,7 +96449,7 @@ case OP_RealAffinity: { /* in1 */ } #endif -#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_ANALYZE) +#if !defined(SQLITE_OMIT_CAST) || !defined(SQLITE_OMIT_ANALYZE) /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** @@ -95734,7 +96985,7 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ break; } -/* Opcode: Once P1 P2 * * * +/* Opcode: Once P1 P2 P3 * * ** ** Fall through to the next instruction the first time this opcode is ** encountered on each invocation of the byte-code program. Jump to P2 @@ -95750,6 +97001,12 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ ** whether or not the jump should be taken. The bitmask is necessary ** because the self-altering code trick does not work for recursive ** triggers. +** +** The P3 operand is not used directly by this opcode. However P3 is +** used by the code generator as follows: If this opcode is the start +** of a subroutine and that subroutine uses a Bloom filter, then P3 will +** be the register that holds that Bloom filter. See tag-202407032019 +** in the source code for implementation details. */ case OP_Once: { /* jump */ u32 iAddr; /* Address of this instruction */ @@ -96795,6 +98052,7 @@ case OP_MakeRecord: { zHdr += sqlite3PutVarint(zHdr, serial_type); if( pRec->n ){ assert( pRec->z!=0 ); + assert( pRec->z!=(const char*)sqlite3CtypeMap ); memcpy(zPayload, pRec->z, pRec->n); zPayload += pRec->n; } @@ -97438,23 +98696,23 @@ case OP_OpenWrite: if( pDb->pSchema->file_format < p->minWriteFileFormat ){ p->minWriteFileFormat = pDb->pSchema->file_format; } + if( pOp->p5 & OPFLAG_P2ISREG ){ + assert( p2>0 ); + assert( p2<=(u32)(p->nMem+1 - p->nCursor) ); + pIn2 = &aMem[p2]; + assert( memIsValid(pIn2) ); + assert( (pIn2->flags & MEM_Int)!=0 ); + sqlite3VdbeMemIntegerify(pIn2); + p2 = (int)pIn2->u.i; + /* The p2 value always comes from a prior OP_CreateBtree opcode and + ** that opcode will always set the p2 value to 2 or more or else fail. + ** If there were a failure, the prepared statement would have halted + ** before reaching this instruction. */ + assert( p2>=2 ); + } }else{ wrFlag = 0; - } - if( pOp->p5 & OPFLAG_P2ISREG ){ - assert( p2>0 ); - assert( p2<=(u32)(p->nMem+1 - p->nCursor) ); - assert( pOp->opcode==OP_OpenWrite ); - pIn2 = &aMem[p2]; - assert( memIsValid(pIn2) ); - assert( (pIn2->flags & MEM_Int)!=0 ); - sqlite3VdbeMemIntegerify(pIn2); - p2 = (int)pIn2->u.i; - /* The p2 value always comes from a prior OP_CreateBtree opcode and - ** that opcode will always set the p2 value to 2 or more or else fail. - ** If there were a failure, the prepared statement would have halted - ** before reaching this instruction. */ - assert( p2>=2 ); + assert( (pOp->p5 & OPFLAG_P2ISREG)==0 ); } if( pOp->p4type==P4_KEYINFO ){ pKeyInfo = pOp->p4.pKeyInfo; @@ -97631,8 +98889,13 @@ case OP_OpenEphemeral: { /* ncycle */ } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); + assert( p->apCsr[pOp->p1]==pCx ); if( rc ){ + assert( !sqlite3BtreeClosesWithCursor(pCx->ub.pBtx, pCx->uc.pCursor) ); sqlite3BtreeClose(pCx->ub.pBtx); + p->apCsr[pOp->p1] = 0; /* Not required; helps with static analysis */ + }else{ + assert( sqlite3BtreeClosesWithCursor(pCx->ub.pBtx, pCx->uc.pCursor) ); } } } @@ -98410,6 +99673,7 @@ case OP_Found: { /* jump, in3, ncycle */ r.pKeyInfo = pC->pKeyInfo; r.default_rc = 0; #ifdef SQLITE_DEBUG + (void)sqlite3FaultSim(50); /* For use by --counter in TH3 */ for(ii=0; iinCsr * sizeof(VdbeCursor*) - + (pProgram->nOp + 7)/8; + + (7 + (i64)pProgram->nOp)/8; pFrame = sqlite3DbMallocZero(db, nByte); if( !pFrame ){ goto no_mem; @@ -100468,7 +101732,7 @@ case OP_Program: { /* jump0 */ sqlite3VdbeMemRelease(pRt); pRt->flags = MEM_Blob|MEM_Dyn; pRt->z = (char*)pFrame; - pRt->n = nByte; + pRt->n = (int)nByte; pRt->xDel = sqlite3VdbeFrameMemDel; pFrame->v = p; @@ -100567,12 +101831,14 @@ case OP_Param: { /* out2 */ ** statement counter is incremented (immediate foreign key constraints). */ case OP_FkCounter: { - if( db->flags & SQLITE_DeferFKs ){ - db->nDeferredImmCons += pOp->p2; - }else if( pOp->p1 ){ + if( pOp->p1 ){ db->nDeferredCons += pOp->p2; }else{ - p->nFkConstraint += pOp->p2; + if( db->flags & SQLITE_DeferFKs ){ + db->nDeferredImmCons += pOp->p2; + }else{ + p->nFkConstraint += pOp->p2; + } } break; } @@ -100772,18 +102038,29 @@ case OP_AggInverse: case OP_AggStep: { int n; sqlite3_context *pCtx; + u64 nAlloc; assert( pOp->p4type==P4_FUNCDEF ); n = pOp->p5; assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) ); assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); - pCtx = sqlite3DbMallocRawNN(db, n*sizeof(sqlite3_value*) + - (sizeof(pCtx[0]) + sizeof(Mem) - sizeof(sqlite3_value*))); + + /* Allocate space for (a) the context object and (n-1) extra pointers + ** to append to the sqlite3_context.argv[1] array, and (b) a memory + ** cell in which to store the accumulation. Be careful that the memory + ** cell is 8-byte aligned, even on platforms where a pointer is 32-bits. + ** + ** Note: We could avoid this by using a regular memory cell from aMem[] for + ** the accumulator, instead of allocating one here. */ + nAlloc = ROUND8P( SZ_CONTEXT(n) ); + pCtx = sqlite3DbMallocRawNN(db, nAlloc + sizeof(Mem)); if( pCtx==0 ) goto no_mem; - pCtx->pMem = 0; - pCtx->pOut = (Mem*)&(pCtx->argv[n]); + pCtx->pOut = (Mem*)((u8*)pCtx + nAlloc); + assert( EIGHT_BYTE_ALIGNMENT(pCtx->pOut) ); + sqlite3VdbeMemInit(pCtx->pOut, db, MEM_Null); + pCtx->pMem = 0; pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; @@ -101436,6 +102713,7 @@ case OP_VFilter: { /* jump, ncycle */ /* Invoke the xFilter method */ apArg = p->apArg; + assert( nArg<=p->napArg ); for(i = 0; ivtabOnConflict; apArg = p->apArg; pX = &aMem[pOp->p3]; + assert( nArg<=p->napArg ); for(i=0; iopcode==OP_Noop || pOp->opcode==OP_Explain ); @@ -102207,8 +103501,8 @@ default: { /* This is really OP_Noop, OP_Explain */ p->rc = rc; sqlite3SystemError(db, rc); testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(rc, "statement aborts at %d: [%s] %s", - (int)(pOp - aOp), p->zSql, p->zErrMsg); + sqlite3_log(rc, "statement aborts at %d: %s; [%s]", + (int)(pOp - aOp), p->zErrMsg, p->zSql); if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){ @@ -102417,6 +103711,7 @@ SQLITE_API int sqlite3_blob_open( char *zErr = 0; Table *pTab; Incrblob *pBlob = 0; + int iDb; Parse sParse; #ifdef SQLITE_ENABLE_API_ARMOR @@ -102451,13 +103746,21 @@ SQLITE_API int sqlite3_blob_open( pTab = 0; sqlite3ErrorMsg(&sParse, "cannot open table without rowid: %s", zTable); } + if( pTab && (pTab->tabFlags&TF_HasGenerated)!=0 ){ + pTab = 0; + sqlite3ErrorMsg(&sParse, "cannot open table with generated columns: %s", + zTable); + } #ifndef SQLITE_OMIT_VIEW if( pTab && IsView(pTab) ){ pTab = 0; sqlite3ErrorMsg(&sParse, "cannot open view: %s", zTable); } #endif - if( !pTab ){ + if( pTab==0 + || ((iDb = sqlite3SchemaToIndex(db, pTab->pSchema))==1 && + sqlite3OpenTempDatabase(&sParse)) + ){ if( sParse.zErrMsg ){ sqlite3DbFree(db, zErr); zErr = sParse.zErrMsg; @@ -102468,15 +103771,11 @@ SQLITE_API int sqlite3_blob_open( goto blob_open_out; } pBlob->pTab = pTab; - pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName; + pBlob->zDb = db->aDb[iDb].zDbSName; /* Now search pTab for the exact column. */ - for(iCol=0; iColnCol; iCol++) { - if( sqlite3StrICmp(pTab->aCol[iCol].zCnName, zColumn)==0 ){ - break; - } - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zColumn); + if( iCol<0 ){ sqlite3DbFree(db, zErr); zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); rc = SQLITE_ERROR; @@ -102556,7 +103855,6 @@ SQLITE_API int sqlite3_blob_open( {OP_Halt, 0, 0, 0}, /* 5 */ }; Vdbe *v = (Vdbe *)pBlob->pStmt; - int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); VdbeOp *aOp; sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag, @@ -103134,9 +104432,12 @@ struct VdbeSorter { u8 iPrev; /* Previous thread used to flush PMA */ u8 nTask; /* Size of aTask[] array */ u8 typeMask; - SortSubtask aTask[1]; /* One or more subtasks */ + SortSubtask aTask[FLEXARRAY]; /* One or more subtasks */ }; +/* Size (in bytes) of a VdbeSorter object that works with N or fewer subtasks */ +#define SZ_VDBESORTER(N) (offsetof(VdbeSorter,aTask)+(N)*sizeof(SortSubtask)) + #define SORTER_TYPE_INTEGER 0x01 #define SORTER_TYPE_TEXT 0x02 @@ -103358,13 +104659,14 @@ static int vdbePmaReadBlob( while( nRem>0 ){ int rc; /* vdbePmaReadBlob() return code */ int nCopy; /* Number of bytes to copy */ - u8 *aNext; /* Pointer to buffer to copy data from */ + u8 *aNext = 0; /* Pointer to buffer to copy data from */ nCopy = nRem; if( nRem>p->nBuffer ) nCopy = p->nBuffer; rc = vdbePmaReadBlob(p, nCopy, &aNext); if( rc!=SQLITE_OK ) return rc; assert( aNext!=p->aAlloc ); + assert( aNext!=0 ); memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); nRem -= nCopy; } @@ -103737,7 +105039,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( VdbeSorter *pSorter; /* The new sorter */ KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ - int sz; /* Size of pSorter in bytes */ + i64 sz; /* Size of pSorter in bytes */ int rc = SQLITE_OK; #if SQLITE_MAX_WORKER_THREADS==0 # define nWorker 0 @@ -103765,8 +105067,10 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( assert( pCsr->pKeyInfo ); assert( !pCsr->isEphemeral ); assert( pCsr->eCurType==CURTYPE_SORTER ); - szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nKeyField-1)*sizeof(CollSeq*); - sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); + assert( sizeof(KeyInfo) + UMXV(pCsr->pKeyInfo->nKeyField)*sizeof(CollSeq*) + < 0x7fffffff ); + szKeyInfo = SZ_KEYINFO(pCsr->pKeyInfo->nKeyField); + sz = SZ_VDBESORTER(nWorker+1); pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); pCsr->uc.pSorter = pSorter; @@ -103978,7 +105282,7 @@ static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ */ static MergeEngine *vdbeMergeEngineNew(int nReader){ int N = 2; /* Smallest power of two >= nReader */ - int nByte; /* Total bytes of space to allocate */ + i64 nByte; /* Total bytes of space to allocate */ MergeEngine *pNew; /* Pointer to allocated object to return */ assert( nReader<=SORTER_MAX_MERGE_COUNT ); @@ -104230,6 +105534,10 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ p->u.pNext = 0; for(i=0; aSlot[i]; i++){ p = vdbeSorterMerge(pTask, p, aSlot[i]); + /* ,--Each aSlot[] holds twice as much as the previous. So we cannot use + ** | up all 64 aSlots[] with only a 64-bit address space. + ** v */ + assert( ipSrc; if( ALWAYS(pSrc) ){ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - if( pItem->pSelect && sqlite3WalkSelect(pWalker, pItem->pSelect) ){ + if( pItem->fg.isSubquery + && sqlite3WalkSelect(pWalker, pItem->u4.pSubq->pSelect) + ){ return WRC_Abort; } if( pItem->fg.isTabFunc @@ -106940,7 +108250,7 @@ static void extendFJMatch( if( pNew ){ pNew->iTable = pMatch->iCursor; pNew->iColumn = iColumn; - pNew->y.pTab = pMatch->pTab; + pNew->y.pTab = pMatch->pSTab; assert( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 ); ExprSetProperty(pNew, EP_CanBeNull); *ppList = sqlite3ExprListAppend(pParse, *ppList, pNew); @@ -107019,7 +108329,6 @@ static int lookupName( Schema *pSchema = 0; /* Schema of the expression */ int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */ Table *pTab = 0; /* Table holding the row */ - Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ const char *zCol = pRight->u.zToken; @@ -107070,11 +108379,10 @@ static int lookupName( if( pSrcList ){ for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ - u8 hCol; - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 || pParse->nErr ); - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem)); if( pItem->fg.isNestedFrom ){ /* In this case, pItem is a subquery that has been formed from a ** parenthesized subset of the FROM clause terms. Example: @@ -107083,8 +108391,12 @@ static int lookupName( ** This pItem -------------^ */ int hit = 0; - assert( pItem->pSelect!=0 ); - pEList = pItem->pSelect->pEList; + Select *pSel; + assert( pItem->fg.isSubquery ); + assert( pItem->u4.pSubq!=0 ); + pSel = pItem->u4.pSubq->pSelect; + assert( pSel!=0 ); + pEList = pSel->pEList; assert( pEList!=0 ); assert( pEList->nExpr==pTab->nCol ); for(j=0; jnExpr; j++){ @@ -107154,43 +108466,38 @@ static int lookupName( sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab); } } - hCol = sqlite3StrIHash(zCol); - for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( cnt>0 ){ - if( pItem->fg.isUsing==0 - || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 - ){ - /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else - if( (pItem->fg.jointype & JT_RIGHT)==0 ){ - /* An INNER or LEFT JOIN. Use the left-most table */ - continue; - }else - if( (pItem->fg.jointype & JT_LEFT)==0 ){ - /* A RIGHT JOIN. Use the right-most table */ - cnt = 0; - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else{ - /* For a FULL JOIN, we must construct a coalesce() func */ - extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); - } - } - cnt++; - pMatch = pItem; - /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ - pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; - if( pItem->fg.isNestedFrom ){ - sqlite3SrcItemColumnUsed(pItem, j); + j = sqlite3ColumnIndex(pTab, zCol); + if( j>=0 ){ + if( cnt>0 ){ + if( pItem->fg.isUsing==0 + || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + ){ + /* Two or more tables have the same column name which is + ** not joined by USING. This is an error. Signal as much + ** by clearing pFJMatch and letting cnt go above 1. */ + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else + if( (pItem->fg.jointype & JT_RIGHT)==0 ){ + /* An INNER or LEFT JOIN. Use the left-most table */ + continue; + }else + if( (pItem->fg.jointype & JT_LEFT)==0 ){ + /* A RIGHT JOIN. Use the right-most table */ + cnt = 0; + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else{ + /* For a FULL JOIN, we must construct a coalesce() func */ + extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); } - break; + } + cnt++; + pMatch = pItem; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; + if( pItem->fg.isNestedFrom ){ + sqlite3SrcItemColumnUsed(pItem, j); } } if( 0==cnt && VisibleRowid(pTab) ){ @@ -107207,9 +108514,9 @@ static int lookupName( */ if( cntTab==0 || (cntTab==1 - && ALWAYS(pMatch!=0) - && ALWAYS(pMatch->pTab!=0) - && (pMatch->pTab->tabFlags & TF_Ephemeral)!=0 + && pMatch!=0 + && ALWAYS(pMatch->pSTab!=0) + && (pMatch->pSTab->tabFlags & TF_Ephemeral)!=0 && (pTab->tabFlags & TF_Ephemeral)==0) ){ cntTab = 1; @@ -107230,7 +108537,7 @@ static int lookupName( if( pMatch ){ pExpr->iTable = pMatch->iCursor; assert( ExprUseYTab(pExpr) ); - pExpr->y.pTab = pMatch->pTab; + pExpr->y.pTab = pMatch->pSTab; if( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 ){ ExprSetProperty(pExpr, EP_CanBeNull); } @@ -107272,7 +108579,7 @@ static int lookupName( if( (pNC->ncFlags & NC_UUpsert)!=0 && zTab!=0 ){ Upsert *pUpsert = pNC->uNC.pUpsert; if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){ - pTab = pUpsert->pUpsertSrc->a[0].pTab; + pTab = pUpsert->pUpsertSrc->a[0].pSTab; pExpr->iTable = EXCLUDED_TABLE_NUMBER; } } @@ -107280,23 +108587,18 @@ static int lookupName( if( pTab ){ int iCol; - u8 hCol = sqlite3StrIHash(zCol); pSchema = pTab->pSchema; cntTab++; - for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( iCol==pTab->iPKey ){ - iCol = -1; - } - break; + iCol = sqlite3ColumnIndex(pTab, zCol); + if( iCol>=0 ){ + if( pTab->iPKey==iCol ) iCol = -1; + }else{ + if( sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ + iCol = -1; + }else{ + iCol = pTab->nCol; } } - if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ - /* IMP: R-51414-32910 */ - iCol = -1; - } if( iColnCol ){ cnt++; pMatch = 0; @@ -107355,11 +108657,11 @@ static int lookupName( && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) - && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) + && ALWAYS(VisibleRowid(pMatch->pSTab) || pMatch->fg.isNestedFrom) ){ cnt = cntTab; #if SQLITE_ALLOW_ROWID_IN_VIEW+0==2 - if( pMatch->pTab!=0 && IsView(pMatch->pTab) ){ + if( pMatch->pSTab!=0 && IsView(pMatch->pSTab) ){ eNewExprOp = TK_NULL; } #endif @@ -107596,7 +108898,7 @@ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSr SrcItem *pItem = &pSrc->a[iSrc]; Table *pTab; assert( ExprUseYTab(p) ); - pTab = p->y.pTab = pItem->pTab; + pTab = p->y.pTab = pItem->pSTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ p->iColumn = -1; @@ -107715,7 +109017,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ pItem = pSrcList->a; pExpr->op = TK_COLUMN; assert( ExprUseYTab(pExpr) ); - pExpr->y.pTab = pItem->pTab; + pExpr->y.pTab = pItem->pSTab; pExpr->iTable = pItem->iCursor; pExpr->iColumn--; pExpr->affExpr = SQLITE_AFF_INTEGER; @@ -107840,8 +109142,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ /* Resolve function names */ case TK_FUNCTION: { - ExprList *pList = pExpr->x.pList; /* The argument list */ - int n = pList ? pList->nExpr : 0; /* Number of arguments */ + ExprList *pList; /* The argument list */ + int n; /* Number of arguments */ int no_such_func = 0; /* True if no such function exists */ int wrong_num_args = 0; /* True if wrong number of arguments */ int is_agg = 0; /* True if is an aggregate function */ @@ -107854,6 +109156,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) ); assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER ); + pList = pExpr->x.pList; + n = pList ? pList->nExpr : 0; zId = pExpr->u.zToken; pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){ @@ -107902,6 +109206,24 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } } #endif + + /* If the function may call sqlite3_value_subtype(), then set the + ** EP_SubtArg flag on all of its argument expressions. This prevents + ** where.c from replacing the expression with a value read from an + ** index on the same expression, which will not have the correct + ** subtype. Also set the flag if the function expression itself is + ** an EP_SubtArg expression. In this case subtypes are required as + ** the function may return a value with a subtype back to its + ** caller using sqlite3_result_value(). */ + if( (pDef->funcFlags & SQLITE_SUBTYPE) + || ExprHasProperty(pExpr, EP_SubtArg) + ){ + int ii; + for(ii=0; iia[ii].pExpr, EP_SubtArg); + } + } + if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){ /* For the purposes of the EP_ConstFunc flag, date and time ** functions and other functions that change slowly are considered @@ -107915,13 +109237,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** sqlite_version() that might change over time cannot be used ** in an index or generated column. Curiously, they can be used ** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all - ** all this. */ + ** allow this. */ sqlite3ResolveNotValid(pParse, pNC, "non-deterministic functions", NC_IdxExpr|NC_PartIdx|NC_GenCol, 0, pExpr); }else{ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */ pExpr->op2 = pNC->ncFlags & NC_SelfRef; - if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); } if( (pDef->funcFlags & SQLITE_FUNC_INTERNAL)!=0 && pParse->nested==0 @@ -107937,6 +109258,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ if( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0 && !IN_RENAME_OBJECT ){ + if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); sqlite3ExprFunctionUsable(pParse, pExpr, pDef); } } @@ -108021,9 +109343,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList); } #ifndef SQLITE_OMIT_WINDOWFUNC - if( pWin ){ + if( pWin && pParse->nErr==0 ){ Select *pSel = pNC->pWinSelect; - assert( pWin==0 || (ExprUseYWin(pExpr) && pWin==pExpr->y.pWin) ); + assert( ExprUseYWin(pExpr) && pWin==pExpr->y.pWin ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef); if( pParse->db->mallocFailed ) break; @@ -108230,7 +109552,7 @@ static int resolveOrderByTermToExprList( int rc; /* Return code from subprocedures */ u8 savedSuppErr; /* Saved value of db->suppressErr */ - assert( sqlite3ExprIsInteger(pE, &i)==0 ); + assert( sqlite3ExprIsInteger(pE, &i, 0)==0 ); pEList = pSelect->pEList; /* Resolve all names in the ORDER BY term expression @@ -108329,7 +109651,7 @@ static int resolveCompoundOrderBy( if( pItem->fg.done ) continue; pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr); if( NEVER(pE==0) ) continue; - if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( sqlite3ExprIsInteger(pE, &iCol, 0) ){ if( iCol<=0 || iCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr, pE); return 1; @@ -108514,7 +109836,7 @@ static int resolveOrderGroupBy( continue; } } - if( sqlite3ExprIsInteger(pE2, &iCol) ){ + if( sqlite3ExprIsInteger(pE2, &iCol, 0) ){ /* The ORDER BY term is an integer constant. Again, set the column ** number so that sqlite3ResolveOrderGroupBy() will convert the ** order-by term to a copy of the result-set expression */ @@ -108605,7 +109927,11 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** moves the pOrderBy down to the sub-query. It will be moved back ** after the names have been resolved. */ if( p->selFlags & SF_Converted ){ - Select *pSub = p->pSrc->a[0].pSelect; + Select *pSub; + assert( p->pSrc->a[0].fg.isSubquery ); + assert( p->pSrc->a[0].u4.pSubq!=0 ); + pSub = p->pSrc->a[0].u4.pSubq->pSelect; + assert( pSub!=0 ); assert( p->pSrc->nSrc==1 && p->pOrderBy ); assert( pSub->pPrior && pSub->pOrderBy==0 ); pSub->pOrderBy = p->pOrderBy; @@ -108617,13 +109943,16 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; - assert( pItem->zName!=0 || pItem->pSelect!=0 );/* Test of tag-20240424-1*/ - if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ + assert( pItem->zName!=0 + || pItem->fg.isSubquery ); /* Test of tag-20240424-1*/ + if( pItem->fg.isSubquery + && (pItem->u4.pSubq->pSelect->selFlags & SF_Resolved)==0 + ){ int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; if( pItem->zName ) pParse->zAuthContext = pItem->zName; - sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); + sqlite3ResolveSelectNames(pParse, pItem->u4.pSubq->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr ) return WRC_Abort; assert( db->mallocFailed==0 ); @@ -108725,7 +110054,10 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** These integers will be replaced by copies of the corresponding result ** set expressions by the call to resolveOrderGroupBy() below. */ if( p->selFlags & SF_Converted ){ - Select *pSub = p->pSrc->a[0].pSelect; + Select *pSub; + assert( p->pSrc->a[0].fg.isSubquery ); + pSub = p->pSrc->a[0].u4.pSubq->pSelect; + assert( pSub!=0 ); p->pOrderBy = pSub->pOrderBy; pSub->pOrderBy = 0; } @@ -108980,20 +110312,22 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( Expr *pExpr, /* Expression to resolve. May be NULL. */ ExprList *pList /* Expression list to resolve. May be NULL. */ ){ - SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ + SrcList *pSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int rc; + u8 srcSpace[SZ_SRCLIST_1]; /* Memory space for the fake SrcList */ assert( type==0 || pTab!=0 ); assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr || type==NC_GenCol || pTab==0 ); memset(&sNC, 0, sizeof(sNC)); - memset(&sSrc, 0, sizeof(sSrc)); + pSrc = (SrcList*)srcSpace; + memset(pSrc, 0, SZ_SRCLIST_1); if( pTab ){ - sSrc.nSrc = 1; - sSrc.a[0].zName = pTab->zName; - sSrc.a[0].pTab = pTab; - sSrc.a[0].iCursor = -1; + pSrc->nSrc = 1; + pSrc->a[0].zName = pTab->zName; + pSrc->a[0].pSTab = pTab; + pSrc->a[0].iCursor = -1; if( pTab->pSchema!=pParse->db->aDb[1].pSchema ){ /* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP ** schema elements */ @@ -109001,7 +110335,7 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( } } sNC.pParse = pParse; - sNC.pSrcList = &sSrc; + sNC.pSrcList = pSrc; sNC.ncFlags = type | NC_IsDDL; if( (rc = sqlite3ResolveExprNames(&sNC, pExpr))!=SQLITE_OK ) return rc; if( pList ) rc = sqlite3ResolveExprListNames(&sNC, pList); @@ -109085,7 +110419,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr ); } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && pExpr->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(pExpr) ); return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); } @@ -109097,7 +110433,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ op = pExpr->op; continue; } - if( op!=TK_REGISTER || (op = pExpr->op2)==TK_REGISTER ) break; + if( op!=TK_REGISTER ) break; + op = pExpr->op2; + if( NEVER( op==TK_REGISTER ) ) break; } return pExpr->affExpr; } @@ -109276,7 +110614,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ p = p->pLeft; continue; } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && p->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(p) ); p = p->x.pList->a[0].pExpr; continue; @@ -109489,7 +110829,7 @@ static int codeCompare( p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, (void*)p4, P4_COLLSEQ); - sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); + sqlite3VdbeChangeP5(pParse->pVdbe, (u16)p5); return addr; } @@ -110150,7 +111490,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ return pLeft; }else{ u32 f = pLeft->flags | pRight->flags; - if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse|EP_HasFunc))==EP_IsFalse && !IN_RENAME_OBJECT ){ sqlite3ExprDeferredDelete(pParse, pLeft); @@ -110748,7 +112088,7 @@ static Expr *exprDup( SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p){ With *pRet = 0; if( p ){ - sqlite3_int64 nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); + sqlite3_int64 nByte = SZ_WITH(p->nCte); pRet = sqlite3DbMallocZero(db, nByte); if( pRet ){ int i; @@ -110859,7 +112199,6 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int } pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName); pItem->fg = pOldItem->fg; - pItem->fg.done = 0; pItem->u = pOldItem->u; } return pNew; @@ -110876,26 +112215,39 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int flags){ SrcList *pNew; int i; - int nByte; assert( db!=0 ); if( p==0 ) return 0; - nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); - pNew = sqlite3DbMallocRawNN(db, nByte ); + pNew = sqlite3DbMallocRawNN(db, SZ_SRCLIST(p->nSrc) ); if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ SrcItem *pNewItem = &pNew->a[i]; const SrcItem *pOldItem = &p->a[i]; Table *pTab; - pNewItem->pSchema = pOldItem->pSchema; - pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); + pNewItem->fg = pOldItem->fg; + if( pOldItem->fg.isSubquery ){ + Subquery *pNewSubq = sqlite3DbMallocRaw(db, sizeof(Subquery)); + if( pNewSubq==0 ){ + assert( db->mallocFailed ); + pNewItem->fg.isSubquery = 0; + }else{ + memcpy(pNewSubq, pOldItem->u4.pSubq, sizeof(*pNewSubq)); + pNewSubq->pSelect = sqlite3SelectDup(db, pNewSubq->pSelect, flags); + if( pNewSubq->pSelect==0 ){ + sqlite3DbFree(db, pNewSubq); + pNewSubq = 0; + pNewItem->fg.isSubquery = 0; + } + } + pNewItem->u4.pSubq = pNewSubq; + }else if( pOldItem->fg.fixedSchema ){ + pNewItem->u4.pSchema = pOldItem->u4.pSchema; + }else{ + pNewItem->u4.zDatabase = sqlite3DbStrDup(db, pOldItem->u4.zDatabase); + } pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); - pNewItem->fg = pOldItem->fg; pNewItem->iCursor = pOldItem->iCursor; - pNewItem->addrFillSub = pOldItem->addrFillSub; - pNewItem->regReturn = pOldItem->regReturn; - pNewItem->regResult = pOldItem->regResult; if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); }else if( pNewItem->fg.isTabFunc ){ @@ -110908,11 +112260,10 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla if( pNewItem->fg.isCte ){ pNewItem->u2.pCteUse->nUse++; } - pTab = pNewItem->pTab = pOldItem->pTab; + pTab = pNewItem->pSTab = pOldItem->pSTab; if( pTab ){ pTab->nTabRef++; } - pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); if( pOldItem->fg.isUsing ){ assert( pNewItem->fg.isUsing ); pNewItem->u3.pUsing = sqlite3IdListDup(db, pOldItem->u3.pUsing); @@ -110928,16 +112279,13 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){ int i; assert( db!=0 ); if( p==0 ) return 0; - assert( p->eU4!=EU4_EXPR ); - pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew)+(p->nId-1)*sizeof(p->a[0]) ); + pNew = sqlite3DbMallocRawNN(db, SZ_IDLIST(p->nId)); if( pNew==0 ) return 0; pNew->nId = p->nId; - pNew->eU4 = p->eU4; for(i=0; inId; i++){ struct IdList_item *pNewItem = &pNew->a[i]; const struct IdList_item *pOldItem = &p->a[i]; pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); - pNewItem->u4 = pOldItem->u4; } return pNew; } @@ -110963,7 +112311,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int fla pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); pNew->iLimit = 0; pNew->iOffset = 0; - pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; + pNew->selFlags = p->selFlags & ~(u32)SF_UsesEphemeral; pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = p->nSelectRow; @@ -110986,7 +112334,6 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int fla pp = &pNew->pPrior; pNext = pNew; } - return pRet; } #else @@ -111016,7 +112363,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendNew( struct ExprList_item *pItem; ExprList *pList; - pList = sqlite3DbMallocRawNN(db, sizeof(ExprList)+sizeof(pList->a[0])*4 ); + pList = sqlite3DbMallocRawNN(db, SZ_EXPRLIST(4)); if( pList==0 ){ sqlite3ExprDelete(db, pExpr); return 0; @@ -111036,8 +112383,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendGrow( struct ExprList_item *pItem; ExprList *pNew; pList->nAlloc *= 2; - pNew = sqlite3DbRealloc(db, pList, - sizeof(*pList)+(pList->nAlloc-1)*sizeof(pList->a[0])); + pNew = sqlite3DbRealloc(db, pList, SZ_EXPRLIST(pList->nAlloc)); if( pNew==0 ){ sqlite3ExprListDelete(db, pList); sqlite3ExprDelete(db, pExpr); @@ -111643,7 +112989,7 @@ static int sqlite3ExprIsTableConstant(Expr *p, int iCur, int bAllowSubq){ ** (4a) pExpr must come from an ON clause.. ** (4b) and specifically the ON clause associated with the LEFT JOIN. ** -** (5) If pSrc is not the right operand of a LEFT JOIN or the left +** (5) If pSrc is the right operand of a LEFT JOIN or the left ** operand of a RIGHT JOIN, then pExpr must be from the WHERE ** clause, not an ON clause. ** @@ -111801,8 +113147,12 @@ SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr *p){ ** to fit in a 32-bit integer, return 1 and put the value of the integer ** in *pValue. If the expression is not an integer or if it is too big ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. +** +** If the pParse pointer is provided, then allow the expression p to be +** a parameter (TK_VARIABLE) that is bound to an integer. +** But if pParse is NULL, then p must be a pure integer literal. */ -SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue){ +SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue, Parse *pParse){ int rc = 0; if( NEVER(p==0) ) return 0; /* Used to only happen following on OOM */ @@ -111817,18 +113167,38 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue){ } switch( p->op ){ case TK_UPLUS: { - rc = sqlite3ExprIsInteger(p->pLeft, pValue); + rc = sqlite3ExprIsInteger(p->pLeft, pValue, 0); break; } case TK_UMINUS: { int v = 0; - if( sqlite3ExprIsInteger(p->pLeft, &v) ){ + if( sqlite3ExprIsInteger(p->pLeft, &v, 0) ){ assert( ((unsigned int)v)!=0x80000000 ); *pValue = -v; rc = 1; } break; } + case TK_VARIABLE: { + sqlite3_value *pVal; + if( pParse==0 ) break; + if( NEVER(pParse->pVdbe==0) ) break; + if( (pParse->db->flags & SQLITE_EnableQPSG)!=0 ) break; + sqlite3VdbeSetVarmask(pParse->pVdbe, p->iColumn); + pVal = sqlite3VdbeGetBoundValue(pParse->pReprepare, p->iColumn, + SQLITE_AFF_BLOB); + if( pVal ){ + if( sqlite3_value_type(pVal)==SQLITE_INTEGER ){ + sqlite3_int64 vv = sqlite3_value_int64(pVal); + if( vv == (vv & 0x7fffffff) ){ /* non-negative numbers only */ + *pValue = (int)vv; + rc = 1; + } + } + sqlite3ValueFree(pVal); + } + break; + } default: break; } return rc; @@ -111942,13 +113312,7 @@ SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){ int ii; assert( VisibleRowid(pTab) ); for(ii=0; iinCol; iCol++){ - if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break; - } - if( iCol==pTab->nCol ){ - return azOpt[ii]; - } + if( sqlite3ColumnIndex(pTab, azOpt[ii])<0 ) return azOpt[ii]; } return 0; } @@ -111982,8 +113346,8 @@ static Select *isCandidateForInOpt(const Expr *pX){ pSrc = p->pSrc; assert( pSrc!=0 ); if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ - if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ - pTab = pSrc->a[0].pTab; + if( pSrc->a[0].fg.isSubquery) return 0;/* FROM is not a subquery or view */ + pTab = pSrc->a[0].pSTab; assert( pTab!=0 ); assert( !IsView(pTab) ); /* FROM clause is not a view */ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ @@ -112166,7 +113530,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ - pTab = p->pSrc->a[0].pTab; + pTab = p->pSrc->a[0].pSTab; /* Code an OP_Transaction and OP_TableLock for . */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -112258,6 +113622,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( aiMap ) aiMap[i] = j; } + assert( nExpr>0 && nExprop==TK_IN ); - zRet = sqlite3DbMallocRaw(pParse->db, nVal+1); + zRet = sqlite3DbMallocRaw(pParse->db, 1+(i64)nVal); if( zRet ){ int i; for(i=0; imSubrtnSig & (1<<(pNewSig->selId&7)))==0 ) return 0; + assert( pExpr->op==TK_IN ); + assert( !ExprUseYSub(pExpr) ); + assert( ExprUseXSelect(pExpr) ); + assert( pExpr->x.pSelect!=0 ); + assert( (pExpr->x.pSelect->selFlags & SF_All)==0 ); + v = pParse->pVdbe; + assert( v!=0 ); + pOp = sqlite3VdbeGetOp(v, 1); + pEnd = sqlite3VdbeGetLastOp(v); + for(; pOpp4type!=P4_SUBRTNSIG ) continue; + assert( pOp->opcode==OP_BeginSubrtn ); + pSig = pOp->p4.pSubrtnSig; + assert( pSig!=0 ); + if( !pSig->bComplete ) continue; + if( pNewSig->selId!=pSig->selId ) continue; + if( strcmp(pNewSig->zAff,pSig->zAff)!=0 ) continue; + pExpr->y.sub.iAddr = pSig->iAddr; + pExpr->y.sub.regReturn = pSig->regReturn; + pExpr->iTable = pSig->iTable; + ExprSetProperty(pExpr, EP_Subrtn); + return 1; + } + return 0; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + #ifndef SQLITE_OMIT_SUBQUERY /* ** Generate code that will construct an ephemeral table containing all terms @@ -112440,6 +113849,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( KeyInfo *pKeyInfo = 0; /* Key information */ int nVal; /* Size of vector pLeft */ Vdbe *v; /* The prepared statement under construction */ + SubrtnSig *pSig = 0; /* Signature for this subroutine */ v = pParse->pVdbe; assert( v!=0 ); @@ -112455,11 +113865,27 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** and reuse it many names. */ if( !ExprHasProperty(pExpr, EP_VarSelect) && pParse->iSelfTab==0 ){ - /* Reuse of the RHS is allowed */ - /* If this routine has already been coded, but the previous code - ** might not have been invoked yet, so invoke it now as a subroutine. + /* Reuse of the RHS is allowed + ** + ** Compute a signature for the RHS of the IN operator to facility + ** finding and reusing prior instances of the same IN operator. */ - if( ExprHasProperty(pExpr, EP_Subrtn) ){ + assert( !ExprUseXSelect(pExpr) || pExpr->x.pSelect!=0 ); + if( ExprUseXSelect(pExpr) && (pExpr->x.pSelect->selFlags & SF_All)==0 ){ + pSig = sqlite3DbMallocRawNN(pParse->db, sizeof(pSig[0])); + if( pSig ){ + pSig->selId = pExpr->x.pSelect->selId; + pSig->zAff = exprINAffinity(pParse, pExpr); + } + } + + /* Check to see if there is a prior materialization of the RHS of + ** this IN operator. If there is, then make use of that prior + ** materialization rather than recomputing it. + */ + if( ExprHasProperty(pExpr, EP_Subrtn) + || findCompatibleInRhsSubrtn(pParse, pExpr, pSig) + ){ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); if( ExprUseXSelect(pExpr) ){ ExplainQueryPlan((pParse, 0, "REUSE LIST SUBQUERY %d", @@ -112471,6 +113897,10 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( assert( iTab!=pExpr->iTable ); sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable); sqlite3VdbeJumpHere(v, addrOnce); + if( pSig ){ + sqlite3DbFree(pParse->db, pSig->zAff); + sqlite3DbFree(pParse->db, pSig); + } return; } @@ -112481,7 +113911,14 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( pExpr->y.sub.regReturn = ++pParse->nMem; pExpr->y.sub.iAddr = sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pExpr->y.sub.regReturn) + 1; - + if( pSig ){ + pSig->bComplete = 0; + pSig->iAddr = pExpr->y.sub.iAddr; + pSig->regReturn = pExpr->y.sub.regReturn; + pSig->iTable = iTab; + pParse->mSubrtnSig = 1 << (pSig->selId&7); + sqlite3VdbeChangeP4(v, -1, (const char*)pSig, P4_SUBRTNSIG); + } addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } @@ -112522,15 +113959,31 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( SelectDest dest; int i; int rc; + int addrBloom = 0; sqlite3SelectDestInit(&dest, SRT_Set, iTab); dest.zAffSdst = exprINAffinity(pParse, pExpr); pSelect->iLimit = 0; + if( addrOnce && OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + int regBloom = ++pParse->nMem; + addrBloom = sqlite3VdbeAddOp2(v, OP_Blob, 10000, regBloom); + VdbeComment((v, "Bloom filter")); + dest.iSDParm2 = regBloom; + } testcase( pSelect->selFlags & SF_Distinct ); testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ pCopy = sqlite3SelectDup(pParse->db, pSelect, 0); rc = pParse->db->mallocFailed ? 1 :sqlite3Select(pParse, pCopy, &dest); sqlite3SelectDelete(pParse->db, pCopy); sqlite3DbFree(pParse->db, dest.zAffSdst); + if( addrBloom ){ + /* Remember that location of the Bloom filter in the P3 operand + ** of the OP_Once that began this subroutine. tag-202407032019 */ + sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + if( dest.iSDParm2==0 ){ + /* If the Bloom filter won't actually be used, keep it small */ + sqlite3VdbeGetOp(v, addrBloom)->p1 = 10; + } + } if( rc ){ sqlite3KeyInfoUnref(pKeyInfo); return; @@ -112596,6 +114049,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempReg(pParse, r2); } + if( pSig ) pSig->bComplete = 1; if( pKeyInfo ){ sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); } @@ -112828,9 +114282,7 @@ static void sqlite3ExprCodeIN( if( sqlite3ExprCheckIN(pParse, pExpr) ) return; zAff = exprINAffinity(pParse, pExpr); nVector = sqlite3ExprVectorSize(pExpr->pLeft); - aiMap = (int*)sqlite3DbMallocZero( - pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1 - ); + aiMap = (int*)sqlite3DbMallocZero(pParse->db, nVector*sizeof(int)); if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; /* Attempt to compute the RHS. After this step, if anything other than @@ -112973,6 +114425,15 @@ static void sqlite3ExprCodeIN( sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector); if( destIfFalse==destIfNull ){ /* Combine Step 3 and Step 5 into a single opcode */ + if( ExprHasProperty(pExpr, EP_Subrtn) ){ + const VdbeOp *pOp = sqlite3VdbeGetOp(v, pExpr->y.sub.iAddr); + assert( pOp->opcode==OP_Once || pParse->nErr ); + if( pOp->opcode==OP_Once && pOp->p3>0 ){ /* tag-202407032019 */ + assert( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ); + sqlite3VdbeAddOp4Int(v, OP_Filter, pOp->p3, destIfFalse, + rLhs, nVector); VdbeCoverage(v); + } + } sqlite3VdbeAddOp4Int(v, OP_NotFound, iTab, destIfFalse, rLhs, nVector); VdbeCoverage(v); goto sqlite3ExprCodeIN_finished; @@ -113255,13 +114716,17 @@ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int n ** register iReg. The caller must ensure that iReg already contains ** the correct value for the expression. */ -static void exprToRegister(Expr *pExpr, int iReg){ +SQLITE_PRIVATE void sqlite3ExprToRegister(Expr *pExpr, int iReg){ Expr *p = sqlite3ExprSkipCollateAndLikely(pExpr); if( NEVER(p==0) ) return; - p->op2 = p->op; - p->op = TK_REGISTER; - p->iTable = iReg; - ExprClearProperty(p, EP_Skip); + if( p->op==TK_REGISTER ){ + assert( p->iTable==iReg ); + }else{ + p->op2 = p->op; + p->op = TK_REGISTER; + p->iTable = iReg; + ExprClearProperty(p, EP_Skip); + } } /* @@ -113431,6 +114896,59 @@ static int exprCodeInlineFunction( return target; } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = ALWAYS(pExpr->x.pList) ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + + /* ** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr. ** If it is, then resolve the expression by reading from the index and @@ -113463,6 +114981,17 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( continue; } + + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index if they are themselves an + ** argument to another scalar function or aggregate. + ** https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + if( ExprHasProperty(pExpr, EP_SubtArg) + && sqlite3ExprCanReturnSubtype(pParse, pExpr) + ){ + continue; + } + v = pParse->pVdbe; assert( v!=0 ); if( p->bMaybeNullRow ){ @@ -113491,7 +115020,7 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( /* -** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This +** Expression pExpr is guaranteed to be a TK_COLUMN or equivalent. This ** function checks the Parse.pIdxPartExpr list to see if this column ** can be replaced with a constant value. If so, it generates code to ** put the constant value in a register (ideally, but not necessarily, @@ -113715,6 +115244,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeLoadString(v, target, pExpr->u.zToken); return target; } + case TK_NULLS: { + /* Set a range of registers to NULL. pExpr->y.nReg registers starting + ** with target */ + sqlite3VdbeAddOp3(v, OP_Null, 0, target, target + pExpr->y.nReg - 1); + return target; + } default: { /* Make NULL the default case so that if a bug causes an illegal ** Expr node to be passed into this function, it will be handled @@ -114264,7 +115799,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) break; } testcase( pX->op==TK_COLUMN ); - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); testcase( regFree1==0 ); memset(&opCompare, 0, sizeof(opCompare)); opCompare.op = TK_EQ; @@ -114318,15 +115853,14 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->affExpr==OE_Ignore ){ - sqlite3VdbeAddOp4( - v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, OE_Ignore); VdbeCoverage(v); }else{ - sqlite3HaltConstraint(pParse, + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp3(v, OP_Halt, pParse->pTriggerTab ? SQLITE_CONSTRAINT_TRIGGER : SQLITE_ERROR, - pExpr->affExpr, pExpr->u.zToken, 0, 0); + pExpr->affExpr, r1); } - break; } #endif @@ -114400,6 +115934,25 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce( return regDest; } +/* +** Make arrangements to invoke OP_Null on a range of registers +** during initialization. +*/ +SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3ExprNullRegisterRange( + Parse *pParse, /* Parsing context */ + int iReg, /* First register to set to NULL */ + int nReg /* Number of sequential registers to NULL out */ +){ + u8 okConstFactor = pParse->okConstFactor; + Expr t; + memset(&t, 0, sizeof(t)); + t.op = TK_NULLS; + t.y.nReg = nReg; + pParse->okConstFactor = 1; + sqlite3ExprCodeRunJustOnce(pParse, &t, iReg); + pParse->okConstFactor = okConstFactor; +} + /* ** Generate code to evaluate an expression and store the results ** into a register. Return the register number where the results @@ -114615,7 +116168,7 @@ static void exprCodeBetween( compRight.op = TK_LE; compRight.pLeft = pDel; compRight.pRight = pExpr->x.pList->a[1].pExpr; - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); if( xJump ){ xJump(pParse, &exprAnd, dest, jumpIfNull); }else{ @@ -114749,11 +116302,11 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -114924,11 +116477,11 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -114994,16 +116547,23 @@ SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,i ** same as that currently bound to variable pVar, non-zero is returned. ** Otherwise, if the values are not the same or if pExpr is not a simple ** SQL value, zero is returned. +** +** If the SQLITE_EnableQPSG flag is set on the database connection, then +** this routine always returns false. */ -static int exprCompareVariable( +static SQLITE_NOINLINE int exprCompareVariable( const Parse *pParse, const Expr *pVar, const Expr *pExpr ){ - int res = 0; + int res = 2; int iVar; sqlite3_value *pL, *pR = 0; + if( pExpr->op==TK_VARIABLE && pVar->iColumn==pExpr->iColumn ){ + return 0; + } + if( (pParse->db->flags & SQLITE_EnableQPSG)!=0 ) return 2; sqlite3ValueFromExpr(pParse->db, pExpr, SQLITE_UTF8, SQLITE_AFF_BLOB, &pR); if( pR ){ iVar = pVar->iColumn; @@ -115013,12 +116573,11 @@ static int exprCompareVariable( if( sqlite3_value_type(pL)==SQLITE_TEXT ){ sqlite3_value_text(pL); /* Make sure the encoding is UTF-8 */ } - res = 0==sqlite3MemCompare(pL, pR, 0); + res = sqlite3MemCompare(pL, pR, 0) ? 2 : 0; } sqlite3ValueFree(pR); sqlite3ValueFree(pL); } - return res; } @@ -115044,12 +116603,10 @@ static int exprCompareVariable( ** just might result in some slightly slower code. But returning ** an incorrect 0 or 1 could lead to a malfunction. ** -** If pParse is not NULL then TK_VARIABLE terms in pA with bindings in -** pParse->pReprepare can be matched against literals in pB. The -** pParse->pVdbe->expmask bitmask is updated for each variable referenced. -** If pParse is NULL (the normal case) then any TK_VARIABLE term in -** Argument pParse should normally be NULL. If it is not NULL and pA or -** pB causes a return value of 2. +** If pParse is not NULL and SQLITE_EnableQPSG is off then TK_VARIABLE +** terms in pA with bindings in pParse->pReprepare can be matched against +** literals in pB. The pParse->pVdbe->expmask bitmask is updated for +** each variable referenced. */ SQLITE_PRIVATE int sqlite3ExprCompare( const Parse *pParse, @@ -115061,8 +116618,8 @@ SQLITE_PRIVATE int sqlite3ExprCompare( if( pA==0 || pB==0 ){ return pB==pA ? 0 : 2; } - if( pParse && pA->op==TK_VARIABLE && exprCompareVariable(pParse, pA, pB) ){ - return 0; + if( pParse && pA->op==TK_VARIABLE ){ + return exprCompareVariable(pParse, pA, pB); } combinedFlags = pA->flags | pB->flags; if( combinedFlags & EP_IntValue ){ @@ -115257,18 +116814,70 @@ static int exprImpliesNotNull( return 0; } +/* +** Return true if the boolean value of the expression is always either +** FALSE or NULL. +*/ +static int sqlite3ExprIsNotTrue(Expr *pExpr){ + int v; + if( pExpr->op==TK_NULL ) return 1; + if( pExpr->op==TK_TRUEFALSE && sqlite3ExprTruthValue(pExpr)==0 ) return 1; + v = 1; + if( sqlite3ExprIsInteger(pExpr, &v, 0) && v==0 ) return 1; + return 0; +} + +/* +** Return true if the expression is one of the following: +** +** CASE WHEN x THEN y END +** CASE WHEN x THEN y ELSE NULL END +** CASE WHEN x THEN y ELSE false END +** iif(x,y) +** iif(x,y,NULL) +** iif(x,y,false) +*/ +static int sqlite3ExprIsIIF(sqlite3 *db, const Expr *pExpr){ + ExprList *pList; + if( pExpr->op==TK_FUNCTION ){ + const char *z = pExpr->u.zToken; + FuncDef *pDef; + if( (z[0]!='i' && z[0]!='I') ) return 0; + if( pExpr->x.pList==0 ) return 0; + pDef = sqlite3FindFunction(db, z, pExpr->x.pList->nExpr, ENC(db), 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pDef==0 ) return 0; +#else + if( NEVER(pDef==0) ) return 0; +#endif + if( (pDef->funcFlags & SQLITE_FUNC_INLINE)==0 ) return 0; + if( SQLITE_PTR_TO_INT(pDef->pUserData)!=INLINEFUNC_iif ) return 0; + }else if( pExpr->op==TK_CASE ){ + if( pExpr->pLeft!=0 ) return 0; + }else{ + return 0; + } + pList = pExpr->x.pList; + assert( pList!=0 ); + if( pList->nExpr==2 ) return 1; + if( pList->nExpr==3 && sqlite3ExprIsNotTrue(pList->a[2].pExpr) ) return 1; + return 0; +} + /* ** Return true if we can prove the pE2 will always be true if pE1 is ** true. Return false if we cannot complete the proof or if pE2 might ** be false. Examples: ** -** pE1: x==5 pE2: x==5 Result: true -** pE1: x>0 pE2: x==5 Result: false -** pE1: x=21 pE2: x=21 OR y=43 Result: true -** pE1: x!=123 pE2: x IS NOT NULL Result: true -** pE1: x!=?1 pE2: x IS NOT NULL Result: true -** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: x==5 pE2: x==5 Result: true +** pE1: x>0 pE2: x==5 Result: false +** pE1: x=21 pE2: x=21 OR y=43 Result: true +** pE1: x!=123 pE2: x IS NOT NULL Result: true +** pE1: x!=?1 pE2: x IS NOT NULL Result: true +** pE1: x IS NULL pE2: x IS NOT NULL Result: false +** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: iif(x,y) pE2: x Result: true +** PE1: iif(x,y,0) pE2: x Result: true ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab. @@ -115302,6 +116911,9 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr( ){ return 1; } + if( sqlite3ExprIsIIF(pParse->db, pE1) ){ + return sqlite3ExprImpliesExpr(pParse,pE1->x.pList->a[0].pExpr,pE2,iTab); + } return 0; } @@ -115769,7 +117381,9 @@ static void findOrCreateAggInfoColumn( ){ struct AggInfo_col *pCol; int k; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); assert( pAggInfo->iFirstReg==0 ); pCol = pAggInfo->aCol; for(k=0; knColumn; k++, pCol++){ @@ -115787,6 +117401,10 @@ static void findOrCreateAggInfoColumn( assert( pParse->db->mallocFailed ); return; } + if( k>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + k = mxTerm; + } pCol = &pAggInfo->aCol[k]; assert( ExprUseYTab(pExpr) ); pCol->pTab = pExpr->y.pTab; @@ -115820,6 +117438,7 @@ static void findOrCreateAggInfoColumn( if( pExpr->op==TK_COLUMN ){ pExpr->op = TK_AGG_COLUMN; } + assert( k <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)k; } @@ -115904,13 +117523,19 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); for(i=0; inFunc; i++, pItem++){ if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } } - if( i>=pAggInfo->nFunc ){ + if( i>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + i = mxTerm; + assert( inFunc ); + }else if( i>=pAggInfo->nFunc ){ /* pExpr is original. Make a new entry in pAggInfo->aFunc[] */ u8 enc = ENC(pParse->db); @@ -115964,6 +117589,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pExpr, EP_NoReduce); + assert( i <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)i; pExpr->pAggInfo = pAggInfo; return WRC_Prune; @@ -116674,13 +118300,13 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); - pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*(u32)nAlloc); pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); if( !pNew->aCol || !pNew->zName ){ assert( db->mallocFailed ); goto exit_begin_add_column; } - memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); + memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*(size_t)pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; pCol->zCnName = sqlite3DbStrDup(db, pCol->zCnName); @@ -116775,10 +118401,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ** altered. Set iCol to be the index of the column being renamed */ zOld = sqlite3NameFromToken(db, pOld); if( !zOld ) goto exit_rename_column; - for(iCol=0; iColnCol; iCol++){ - if( 0==sqlite3StrICmp(pTab->aCol[iCol].zCnName, zOld) ) break; - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zOld); + if( iCol<0 ){ sqlite3ErrorMsg(pParse, "no such column: \"%T\"", pOld); goto exit_rename_column; } @@ -117281,6 +118905,7 @@ static int renameParseSql( int bTemp /* True if SQL is from temp schema */ ){ int rc; + u64 flags; sqlite3ParseObjectInit(p, db); if( zSql==0 ){ @@ -117289,11 +118914,21 @@ static int renameParseSql( if( sqlite3StrNICmp(zSql,"CREATE ",7)!=0 ){ return SQLITE_CORRUPT_BKPT; } - db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb); + if( bTemp ){ + db->init.iDb = 1; + }else{ + int iDb = sqlite3FindDbName(db, zDb); + assert( iDb>=0 && iDb<=0xff ); + db->init.iDb = (u8)iDb; + } p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; + flags = db->flags; + testcase( (db->flags & SQLITE_Comments)==0 && strstr(zSql," /* ")!=0 ); + db->flags |= SQLITE_Comments; rc = sqlite3RunParser(p, zSql); + db->flags = flags; if( db->mallocFailed ) rc = SQLITE_NOMEM; if( rc==SQLITE_OK && NEVER(p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0) @@ -117356,10 +118991,11 @@ static int renameEditSql( nQuot = sqlite3Strlen30(zQuot)-1; } - assert( nQuot>=nNew ); - zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1); + assert( nQuot>=nNew && nSql>=0 && nNew>=0 ); + zOut = sqlite3DbMallocZero(db, (u64)nSql + pRename->nList*(u64)nQuot + 1); }else{ - zOut = (char*)sqlite3DbMallocZero(db, (nSql*2+1) * 3); + assert( nSql>0 ); + zOut = (char*)sqlite3DbMallocZero(db, (2*(u64)nSql + 1) * 3); if( zOut ){ zBuf1 = &zOut[nSql*2+1]; zBuf2 = &zOut[nSql*4+2]; @@ -117371,16 +119007,17 @@ static int renameEditSql( ** with the new column name, or with single-quoted versions of themselves. ** All that remains is to construct and return the edited SQL string. */ if( zOut ){ - int nOut = nSql; - memcpy(zOut, zSql, nSql); + i64 nOut = nSql; + assert( nSql>0 ); + memcpy(zOut, zSql, (size_t)nSql); while( pRename->pList ){ int iOff; /* Offset of token to replace in zOut */ - u32 nReplace; + i64 nReplace; const char *zReplace; RenameToken *pBest = renameColumnTokenNext(pRename); if( zNew ){ - if( bQuote==0 && sqlite3IsIdChar(*pBest->t.z) ){ + if( bQuote==0 && sqlite3IsIdChar(*(u8*)pBest->t.z) ){ nReplace = nNew; zReplace = zNew; }else{ @@ -117398,14 +119035,15 @@ static int renameEditSql( memcpy(zBuf1, pBest->t.z, pBest->t.n); zBuf1[pBest->t.n] = 0; sqlite3Dequote(zBuf1); - sqlite3_snprintf(nSql*2, zBuf2, "%Q%s", zBuf1, + assert( nSql < 0x15555554 /* otherwise malloc would have failed */ ); + sqlite3_snprintf((int)(nSql*2), zBuf2, "%Q%s", zBuf1, pBest->t.z[pBest->t.n]=='\'' ? " " : "" ); zReplace = zBuf2; nReplace = sqlite3Strlen30(zReplace); } - iOff = pBest->t.z - zSql; + iOff = (int)(pBest->t.z - zSql); if( pBest->t.n!=nReplace ){ memmove(&zOut[iOff + nReplace], &zOut[iOff + pBest->t.n], nOut - (iOff + pBest->t.n) @@ -117431,11 +119069,12 @@ static int renameEditSql( ** Set all pEList->a[].fg.eEName fields in the expression-list to val. */ static void renameSetENames(ExprList *pEList, int val){ + assert( val==ENAME_NAME || val==ENAME_TAB || val==ENAME_SPAN ); if( pEList ){ int i; for(i=0; inExpr; i++){ assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); - pEList->a[i].fg.eEName = val; + pEList->a[i].fg.eEName = val&0x3; } } } @@ -117509,8 +119148,9 @@ static int renameResolveTrigger(Parse *pParse){ int i; for(i=0; ipFrom->nSrc && rc==SQLITE_OK; i++){ SrcItem *p = &pStep->pFrom->a[i]; - if( p->pSelect ){ - sqlite3SelectPrep(pParse, p->pSelect, 0); + if( p->fg.isSubquery ){ + assert( p->u4.pSubq!=0 ); + sqlite3SelectPrep(pParse, p->u4.pSubq->pSelect, 0); } } } @@ -117578,8 +119218,12 @@ static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){ } if( pStep->pFrom ){ int i; - for(i=0; ipFrom->nSrc; i++){ - sqlite3WalkSelect(pWalker, pStep->pFrom->a[i].pSelect); + SrcList *pFrom = pStep->pFrom; + for(i=0; inSrc; i++){ + if( pFrom->a[i].fg.isSubquery ){ + assert( pFrom->a[i].u4.pSubq!=0 ); + sqlite3WalkSelect(pWalker, pFrom->a[i].u4.pSubq->pSelect); + } } } } @@ -117687,7 +119331,7 @@ static void renameColumnFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -117826,7 +119470,7 @@ static int renameTableSelectCb(Walker *pWalker, Select *pSelect){ } for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->pTab==p->pTab ){ + if( pItem->pSTab==p->pTab ){ renameTokenFind(pWalker->pParse, p, pItem->zName); } } @@ -117905,7 +119549,7 @@ static void renameTableFunc( sNC.pParse = &sParse; assert( pSelect->selFlags & SF_View ); - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sqlite3SelectPrep(&sParse, pTab->u.view.pSelect, &sNC); if( sParse.nErr ){ rc = sParse.rc; @@ -118078,7 +119722,7 @@ static void renameQuotefixFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -118177,10 +119821,10 @@ static void renameTableTest( if( zDb && zInput ){ int rc; Parse sParse; - int flags = db->flags; + u64 flags = db->flags; if( bNoDQS ) db->flags &= ~(SQLITE_DqsDML|SQLITE_DqsDDL); rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); - db->flags |= (flags & (SQLITE_DqsDML|SQLITE_DqsDDL)); + db->flags = flags; if( rc==SQLITE_OK ){ if( isLegacy==0 && sParse.pNewTable && IsView(sParse.pNewTable) ){ NameContext sNC; @@ -118672,7 +120316,8 @@ static void openStatTable( sqlite3NestedParse(pParse, "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols ); - aRoot[i] = (u32)pParse->regRoot; + assert( pParse->isCreate || pParse->nErr ); + aRoot[i] = (u32)pParse->u1.cr.regRoot; aCreateTbl[i] = OPFLAG_P2ISREG; } }else{ @@ -118863,7 +120508,7 @@ static void statInit( int nCol; /* Number of columns in index being sampled */ int nKeyCol; /* Number of key columns */ int nColUp; /* nCol rounded up for alignment */ - int n; /* Bytes of space to allocate */ + i64 n; /* Bytes of space to allocate */ sqlite3 *db = sqlite3_context_db_handle(context); /* Database connection */ #ifdef SQLITE_ENABLE_STAT4 /* Maximum number of samples. 0 if STAT4 data is not collected */ @@ -118899,7 +120544,7 @@ static void statInit( p->db = db; p->nEst = sqlite3_value_int64(argv[2]); p->nRow = 0; - p->nLimit = sqlite3_value_int64(argv[3]); + p->nLimit = sqlite3_value_int(argv[3]); p->nCol = nCol; p->nKeyCol = nKeyCol; p->nSkipAhead = 0; @@ -120032,16 +121677,6 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } - - /* Set the bLowQual flag if the peak number of rows obtained - ** from a full equality match is so large that a full table scan - ** seems likely to be faster than using the index. - */ - if( aLog[0] > 66 /* Index has more than 100 rows */ - && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ - ){ - pIndex->bLowQual = 1; - } } } @@ -120254,12 +121889,13 @@ static int loadStatTbl( while( sqlite3_step(pStmt)==SQLITE_ROW ){ int nIdxCol = 1; /* Number of columns in stat4 records */ - char *zIndex; /* Index name */ - Index *pIdx; /* Pointer to the index object */ - int nSample; /* Number of samples */ - int nByte; /* Bytes of space required */ - int i; /* Bytes of space required */ - tRowcnt *pSpace; + char *zIndex; /* Index name */ + Index *pIdx; /* Pointer to the index object */ + int nSample; /* Number of samples */ + i64 nByte; /* Bytes of space required */ + i64 i; /* Bytes of space required */ + tRowcnt *pSpace; /* Available allocated memory space */ + u8 *pPtr; /* Available memory as a u8 for easier manipulation */ zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; @@ -120279,7 +121915,7 @@ static int loadStatTbl( } pIdx->nSampleCol = nIdxCol; pIdx->mxSample = nSample; - nByte = sizeof(IndexSample) * nSample; + nByte = ROUND8(sizeof(IndexSample) * nSample); nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ @@ -120288,7 +121924,10 @@ static int loadStatTbl( sqlite3_finalize(pStmt); return SQLITE_NOMEM_BKPT; } - pSpace = (tRowcnt*)&pIdx->aSample[nSample]; + pPtr = (u8*)pIdx->aSample; + pPtr += ROUND8(nSample*sizeof(pIdx->aSample[0])); + pSpace = (tRowcnt*)pPtr; + assert( EIGHT_BYTE_ALIGNMENT( pSpace ) ); pIdx->aAvgEq = pSpace; pSpace += nIdxCol; pIdx->pTable->tabFlags |= TF_HasStat4; for(i=0; iaDb, sizeof(db->aDb[0])*2); }else{ - aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(1+(i64)db->nDb)); if( aNew==0 ) return; } db->aDb = aNew; @@ -120652,6 +122291,12 @@ static void attachFunc( sqlite3_free(zErr); return; } + if( (db->flags & SQLITE_AttachWrite)==0 ){ + flags &= ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE); + flags |= SQLITE_OPEN_READONLY; + }else if( (db->flags & SQLITE_AttachCreate)==0 ){ + flags &= ~SQLITE_OPEN_CREATE; + } assert( pVfs ); flags |= SQLITE_OPEN_MAIN_DB; rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags); @@ -120698,21 +122343,19 @@ static void attachFunc( sqlite3BtreeEnterAll(db); db->init.iDb = 0; db->mDbFlags &= ~(DBFLAG_SchemaKnownOk); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( db->setlkFlags & SQLITE_SETLK_BLOCK_ON_CONNECT ){ + int val = 1; + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pNew->pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, &val); + } +#endif if( !REOPEN_AS_MEMDB(db) ){ rc = sqlite3Init(db, &zErrDyn); } sqlite3BtreeLeaveAll(db); assert( zErrDyn==0 || rc!=SQLITE_OK ); } -#ifdef SQLITE_USER_AUTHENTICATION - if( rc==SQLITE_OK && !REOPEN_AS_MEMDB(db) ){ - u8 newAuth = 0; - rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); - if( newAuthauth.authLevel ){ - rc = SQLITE_AUTH_USER; - } - } -#endif if( rc ){ if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){ int iDb = db->nDb - 1; @@ -120956,20 +122599,21 @@ static int fixSelectCb(Walker *p, Select *pSelect){ if( NEVER(pList==0) ) return WRC_Continue; for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pFix->bTemp==0 ){ - if( pItem->zDatabase ){ - if( iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ + if( pFix->bTemp==0 && pItem->fg.isSubquery==0 ){ + if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + if( iDb!=sqlite3FindDbName(db, pItem->u4.zDatabase) ){ sqlite3ErrorMsg(pFix->pParse, "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); + pFix->zType, pFix->pName, pItem->u4.zDatabase); return WRC_Abort; } - sqlite3DbFree(db, pItem->zDatabase); - pItem->zDatabase = 0; + sqlite3DbFree(db, pItem->u4.zDatabase); pItem->fg.notCte = 1; + pItem->fg.hadSchema = 1; } - pItem->pSchema = pFix->pSchema; + pItem->u4.pSchema = pFix->pSchema; pItem->fg.fromDDL = 1; + pItem->fg.fixedSchema = 1; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) if( pList->a[i].fg.isUsing==0 @@ -121209,11 +122853,7 @@ SQLITE_PRIVATE int sqlite3AuthReadCol( int rc; /* Auth callback return code */ if( db->init.busy ) return SQLITE_OK; - rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext); if( rc==SQLITE_DENY ){ char *z = sqlite3_mprintf("%s.%s", zTab, zCol); if( db->nDb>2 || iDb!=0 ) z = sqlite3_mprintf("%s.%z", zDb, z); @@ -121262,7 +122902,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( assert( pTabList ); for(iSrc=0; iSrcnSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ - pTab = pTabList->a[iSrc].pTab; + pTab = pTabList->a[iSrc].pSTab; break; } } @@ -121320,11 +122960,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( testcase( zArg3==0 ); testcase( pParse->zAuthContext==0 ); - rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg,code,zArg1,zArg2,zArg3,pParse->zAuthContext); if( rc==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized"); pParse->rc = SQLITE_AUTH; @@ -121436,6 +123072,7 @@ static SQLITE_NOINLINE void lockTable( } } + assert( pToplevel->nTableLock < 0x7fff0000 ); nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); pToplevel->aTableLock = sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); @@ -121536,10 +123173,12 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ if( pParse->bReturning ){ - Returning *pReturning = pParse->u1.pReturning; + Returning *pReturning; int addrRewind; int reg; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pReturning->nRetCol ){ sqlite3VdbeAddOp0(v, OP_FkCheck); addrRewind = @@ -121557,17 +123196,6 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) - if( pParse->nTableLock>0 && db->init.busy==0 ){ - sqlite3UserAuthInit(db); - if( db->auth.authLevelrc = SQLITE_AUTH_USER; - return; - } - } -#endif - /* The cookie mask contains one bit for each database file open. ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are ** set for each database that is used. Generate code to start a @@ -121626,7 +123254,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } if( pParse->bReturning ){ - Returning *pRet = pParse->u1.pReturning; + Returning *pRet; + assert( !pParse->isCreate ); + pRet = pParse->u1.d.pReturning; if( pRet->nRetCol ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); } @@ -121696,16 +123326,6 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ pParse->nested--; } -#if SQLITE_USER_AUTHENTICATION -/* -** Return TRUE if zTable is the name of the system table that stores the -** list of users and their access credentials. -*/ -SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){ - return sqlite3_stricmp(zTable, "sqlite_user")==0; -} -#endif - /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the @@ -121724,13 +123344,6 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha /* All mutexes are required for schema access. Make sure we hold them. */ assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); -#if SQLITE_USER_AUTHENTICATION - /* Only the admin user is allowed to know that the sqlite_user table - ** exists */ - if( db->auth.authLevelnDb; i++){ if( sqlite3StrICmp(zDatabase, db->aDb[i].zDbSName)==0 ) break; @@ -121865,12 +123478,12 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem( SrcItem *p ){ const char *zDb; - assert( p->pSchema==0 || p->zDatabase==0 ); - if( p->pSchema ){ - int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); + if( p->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, p->u4.pSchema); zDb = pParse->db->aDb[iDb].zDbSName; }else{ - zDb = p->zDatabase; + assert( !p->fg.isSubquery ); + zDb = p->u4.zDatabase; } return sqlite3LocateTable(pParse, flags, p->zName, zDb); } @@ -122458,10 +124071,16 @@ SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ ** find the (first) offset of that column in index pIdx. Or return -1 ** if column iCol is not used in index pIdx. */ -SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index *pIdx, i16 iCol){ +SQLITE_PRIVATE int sqlite3TableColumnToIndex(Index *pIdx, int iCol){ int i; + i16 iCol16; + assert( iCol>=(-1) && iCol<=SQLITE_MAX_COLUMN ); + assert( pIdx->nColumn<=SQLITE_MAX_COLUMN+1 ); + iCol16 = iCol; for(i=0; inColumn; i++){ - if( iCol==pIdx->aiColumn[i] ) return i; + if( iCol16==pIdx->aiColumn[i] ){ + return i; + } } return -1; } @@ -122715,8 +124334,9 @@ SQLITE_PRIVATE void sqlite3StartTable( /* If the file format and encoding in the database have not been set, ** set them now. */ - reg1 = pParse->regRowid = ++pParse->nMem; - reg2 = pParse->regRoot = ++pParse->nMem; + assert( pParse->isCreate ); + reg1 = pParse->u1.cr.regRowid = ++pParse->nMem; + reg2 = pParse->u1.cr.regRoot = ++pParse->nMem; reg3 = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); @@ -122731,8 +124351,8 @@ SQLITE_PRIVATE void sqlite3StartTable( ** The record created does not contain anything yet. It will be replaced ** by the real entry in code generated at sqlite3EndTable(). ** - ** The rowid for the new entry is left in register pParse->regRowid. - ** The root page number of the new table is left in reg pParse->regRoot. + ** The rowid for the new entry is left in register pParse->u1.cr.regRowid. + ** The root page of the new table is left in reg pParse->u1.cr.regRoot. ** The rowid and root page number values are needed by the code that ** sqlite3EndTable will generate. */ @@ -122743,7 +124363,7 @@ SQLITE_PRIVATE void sqlite3StartTable( #endif { assert( !pParse->bReturning ); - pParse->u1.addrCrTab = + pParse->u1.cr.addrCrTab = sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY); } sqlite3OpenSchemaTable(pParse, iDb); @@ -122821,7 +124441,8 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ sqlite3ExprListDelete(db, pList); return; } - pParse->u1.pReturning = pRet; + assert( !pParse->isCreate ); + pParse->u1.d.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); @@ -122863,7 +124484,6 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ char *zType; Column *pCol; sqlite3 *db = pParse->db; - u8 hName; Column *aNew; u8 eType = COLTYPE_CUSTOM; u8 szEst = 1; @@ -122917,13 +124537,10 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ memcpy(z, sName.z, sName.n); z[sName.n] = 0; sqlite3Dequote(z); - hName = sqlite3StrIHash(z); - for(i=0; inCol; i++){ - if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zCnName)==0 ){ - sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); - sqlite3DbFree(db, z); - return; - } + if( p->nCol && sqlite3ColumnIndex(p, z)>=0 ){ + sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); + sqlite3DbFree(db, z); + return; } aNew = sqlite3DbRealloc(db,p->aCol,((i64)p->nCol+1)*sizeof(p->aCol[0])); if( aNew==0 ){ @@ -122934,7 +124551,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zCnName = z; - pCol->hName = hName; + pCol->hName = sqlite3StrIHash(z); sqlite3ColumnPropertiesFromName(p, pCol); if( sType.n==0 ){ @@ -122958,9 +124575,14 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol->affinity = sqlite3AffinityType(zType, pCol); pCol->colFlags |= COLFLAG_HASTYPE; } + if( p->nCol<=0xff ){ + u8 h = pCol->hName % sizeof(p->aHx); + p->aHx[h] = p->nCol; + } p->nCol++; p->nNVCol++; - pParse->constraintName.n = 0; + assert( pParse->isCreate ); + pParse->u1.cr.constraintName.n = 0; } /* @@ -123224,15 +124846,11 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( assert( pCExpr!=0 ); sqlite3StringToId(pCExpr); if( pCExpr->op==TK_ID ){ - const char *zCName; assert( !ExprHasProperty(pCExpr, EP_IntValue) ); - zCName = pCExpr->u.zToken; - for(iCol=0; iColnCol; iCol++){ - if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zCnName)==0 ){ - pCol = &pTab->aCol[iCol]; - makeColumnPartOfPrimaryKey(pParse, pCol); - break; - } + iCol = sqlite3ColumnIndex(pTab, pCExpr->u.zToken); + if( iCol>=0 ){ + pCol = &pTab->aCol[iCol]; + makeColumnPartOfPrimaryKey(pParse, pCol); } } } @@ -123284,8 +124902,10 @@ SQLITE_PRIVATE void sqlite3AddCheckConstraint( && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) ){ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); - if( pParse->constraintName.n ){ - sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); + assert( pParse->isCreate ); + if( pParse->u1.cr.constraintName.n ){ + sqlite3ExprListSetName(pParse, pTab->pCheck, + &pParse->u1.cr.constraintName, 1); }else{ Token t; for(zStart++; sqlite3Isspace(zStart[0]); zStart++){} @@ -123480,7 +125100,8 @@ static void identPut(char *z, int *pIdx, char *zSignedIdent){ ** from sqliteMalloc() and must be freed by the calling function. */ static char *createTableStmt(sqlite3 *db, Table *p){ - int i, k, n; + int i, k, len; + i64 n; char *zStmt; char *zSep, *zSep2, *zEnd; Column *pCol; @@ -123504,8 +125125,9 @@ static char *createTableStmt(sqlite3 *db, Table *p){ sqlite3OomFault(db); return 0; } - sqlite3_snprintf(n, zStmt, "CREATE TABLE "); - k = sqlite3Strlen30(zStmt); + assert( n>14 && n<=0x7fffffff ); + memcpy(zStmt, "CREATE TABLE ", 13); + k = 13; identPut(zStmt, &k, p->zName); zStmt[k++] = '('; for(pCol=p->aCol, i=0; inCol; i++, pCol++){ @@ -123517,13 +125139,15 @@ static char *createTableStmt(sqlite3 *db, Table *p){ /* SQLITE_AFF_REAL */ " REAL", /* SQLITE_AFF_FLEXNUM */ " NUM", }; - int len; const char *zType; - sqlite3_snprintf(n-k, &zStmt[k], zSep); - k += sqlite3Strlen30(&zStmt[k]); + len = sqlite3Strlen30(zSep); + assert( k+lenzCnName); + assert( kaffinity-SQLITE_AFF_BLOB >= 0 ); assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); testcase( pCol->affinity==SQLITE_AFF_BLOB ); @@ -123538,11 +125162,14 @@ static char *createTableStmt(sqlite3 *db, Table *p){ assert( pCol->affinity==SQLITE_AFF_BLOB || pCol->affinity==SQLITE_AFF_FLEXNUM || pCol->affinity==sqlite3AffinityType(zType, 0) ); + assert( k+lennColumn>=N ) return SQLITE_OK; + db = pParse->db; + assert( N>0 ); + assert( N <= SQLITE_MAX_COLUMN*2 /* tag-20250221-1 */ ); + testcase( N==2*pParse->db->aLimit[SQLITE_LIMIT_COLUMN] ); assert( pIdx->isResized==0 ); - nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*N; + nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*(u64)N; zExtra = sqlite3DbMallocZero(db, nByte); if( zExtra==0 ) return SQLITE_NOMEM_BKPT; memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); @@ -123569,7 +125201,7 @@ static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ zExtra += sizeof(i16)*N; memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); pIdx->aSortOrder = (u8*)zExtra; - pIdx->nColumn = N; + pIdx->nColumn = (u16)N; /* See tag-20250221-1 above for proof of safety */ pIdx->isResized = 1; return SQLITE_OK; } @@ -123735,9 +125367,9 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ ** into BTREE_BLOBKEY. */ assert( !pParse->bReturning ); - if( pParse->u1.addrCrTab ){ + if( pParse->u1.cr.addrCrTab ){ assert( v ); - sqlite3VdbeChangeP3(v, pParse->u1.addrCrTab, BTREE_BLOBKEY); + sqlite3VdbeChangeP3(v, pParse->u1.cr.addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally @@ -123823,14 +125455,14 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ pIdx->nColumn = pIdx->nKeyCol; continue; } - if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; + if( resizeIndexObject(pParse, pIdx, pIdx->nKeyCol+n) ) return; for(i=0, j=pIdx->nKeyCol; inKeyCol, pPk, i) ){ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ); pIdx->aiColumn[j] = pPk->aiColumn[i]; pIdx->azColl[j] = pPk->azColl[i]; if( pPk->aSortOrder[i] ){ - /* See ticket https://www.sqlite.org/src/info/bba7b69f9849b5bf */ + /* See ticket https://sqlite.org/src/info/bba7b69f9849b5bf */ pIdx->bAscKeyBug = 1; } j++; @@ -123847,7 +125479,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ if( !hasColumn(pPk->aiColumn, nPk, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) nExtra++; } - if( resizeIndexObject(db, pPk, nPk+nExtra) ) return; + if( resizeIndexObject(pParse, pPk, nPk+nExtra) ) return; for(i=0, j=nPk; inCol; i++){ if( !hasColumn(pPk->aiColumn, j, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 @@ -124177,7 +125809,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT ** statement to populate the new table. The root-page number for the - ** new table is in register pParse->regRoot. + ** new table is in register pParse->u1.cr.regRoot. ** ** Once the SELECT has been coded by sqlite3Select(), it is in a ** suitable state to query for the column names and types to be used @@ -124208,7 +125840,8 @@ SQLITE_PRIVATE void sqlite3EndTable( regRec = ++pParse->nMem; regRowid = ++pParse->nMem; sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); + assert( pParse->isCreate ); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->u1.cr.regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); @@ -124253,6 +125886,7 @@ SQLITE_PRIVATE void sqlite3EndTable( ** schema table. We just need to update that slot with all ** the information we've collected. */ + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q" @@ -124261,9 +125895,9 @@ SQLITE_PRIVATE void sqlite3EndTable( zType, p->zName, p->zName, - pParse->regRoot, + pParse->u1.cr.regRoot, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); sqlite3DbFree(db, zStmt); sqlite3ChangeCookie(pParse, iDb); @@ -124855,6 +126489,8 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, } assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; assert( isView==0 || isView==LOCATE_VIEW ); @@ -124863,7 +126499,7 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, if( pTab==0 ){ if( noErr ){ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } goto exit_drop_table; @@ -125001,7 +126637,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( }else{ nCol = pFromCol->nExpr; } - nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; + nByte = SZ_FKEY(nCol) + pTo->n + 1; if( pToCol ){ for(i=0; inExpr; i++){ nByte += sqlite3Strlen30(pToCol->a[i].zEName) + 1; @@ -125203,7 +126839,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ ** not work for UNIQUE constraint indexes on WITHOUT ROWID tables ** with DESC primary keys, since those indexes have there keys in ** a different order from the main table. - ** See ticket: https://www.sqlite.org/src/info/bba7b69f9849b5bf + ** See ticket: https://sqlite.org/src/info/bba7b69f9849b5bf */ sqlite3VdbeAddOp1(v, OP_SeekEnd, iIdx); } @@ -125227,13 +126863,14 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ */ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( sqlite3 *db, /* Database connection */ - i16 nCol, /* Total number of columns in the index */ + int nCol, /* Total number of columns in the index */ int nExtra, /* Number of bytes of extra space to alloc */ char **ppExtra /* Pointer to the "extra" space */ ){ Index *p; /* Allocated index object */ - int nByte; /* Bytes of space for Index object + arrays */ + i64 nByte; /* Bytes of space for Index object + arrays */ + assert( nCol <= 2*db->aLimit[SQLITE_LIMIT_COLUMN] ); nByte = ROUND8(sizeof(Index)) + /* Index structure */ ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ @@ -125246,8 +126883,9 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; p->aSortOrder = (u8*)pExtra; - p->nColumn = nCol; - p->nKeyCol = nCol - 1; + assert( nCol>0 ); + p->nColumn = (u16)nCol; + p->nKeyCol = (u16)(nCol - 1); *ppExtra = ((char*)p) + nByte; } return p; @@ -125387,9 +127025,6 @@ SQLITE_PRIVATE void sqlite3CreateIndex( if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && db->init.busy==0 && pTblName!=0 -#if SQLITE_USER_AUTHENTICATION - && sqlite3UserAuthTable(pTab->zName)==0 -#endif ){ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); goto exit_create_index; @@ -125954,15 +127589,17 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists } assert( pParse->nErr==0 ); /* Never called with prior non-OOM errors */ assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_drop_index; } - pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); + pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].u4.zDatabase); if( pIndex==0 ){ if( !ifExists ){ sqlite3ErrorMsg(pParse, "no such index: %S", pName->a); }else{ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } pParse->checkSchema = 1; @@ -126059,12 +127696,11 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token * sqlite3 *db = pParse->db; int i; if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(IdList) ); + pList = sqlite3DbMallocZero(db, SZ_IDLIST(1)); if( pList==0 ) return 0; }else{ IdList *pNew; - pNew = sqlite3DbRealloc(db, pList, - sizeof(IdList) + pList->nId*sizeof(pList->a)); + pNew = sqlite3DbRealloc(db, pList, SZ_IDLIST(pList->nId+1)); if( pNew==0 ){ sqlite3IdListDelete(db, pList); return 0; @@ -126086,7 +127722,6 @@ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; assert( db!=0 ); if( pList==0 ) return; - assert( pList->eU4!=EU4_EXPR ); /* EU4_EXPR mode is not currently used */ for(i=0; inId; i++){ sqlite3DbFree(db, pList->a[i].zName); } @@ -126164,8 +127799,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( return 0; } if( nAlloc>SQLITE_MAX_SRCLIST ) nAlloc = SQLITE_MAX_SRCLIST; - pNew = sqlite3DbRealloc(db, pSrc, - sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); + pNew = sqlite3DbRealloc(db, pSrc, SZ_SRCLIST(nAlloc)); if( pNew==0 ){ assert( db->mallocFailed ); return 0; @@ -126240,7 +127874,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( assert( pParse->db!=0 ); db = pParse->db; if( pList==0 ){ - pList = sqlite3DbMallocRawNN(pParse->db, sizeof(SrcList) ); + pList = sqlite3DbMallocRawNN(pParse->db, SZ_SRCLIST(1)); if( pList==0 ) return 0; pList->nAlloc = 1; pList->nSrc = 1; @@ -126259,12 +127893,14 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( if( pDatabase && pDatabase->z==0 ){ pDatabase = 0; } + assert( pItem->fg.fixedSchema==0 ); + assert( pItem->fg.isSubquery==0 ); if( pDatabase ){ pItem->zName = sqlite3NameFromToken(db, pDatabase); - pItem->zDatabase = sqlite3NameFromToken(db, pTable); + pItem->u4.zDatabase = sqlite3NameFromToken(db, pTable); }else{ pItem->zName = sqlite3NameFromToken(db, pTable); - pItem->zDatabase = 0; + pItem->u4.zDatabase = 0; } return pList; } @@ -126280,13 +127916,40 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->iCursor>=0 ) continue; pItem->iCursor = pParse->nTab++; - if( pItem->pSelect ){ - sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); + if( pItem->fg.isSubquery ){ + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + assert( pItem->u4.pSubq->pSelect->pSrc!=0 ); + sqlite3SrcListAssignCursors(pParse, pItem->u4.pSubq->pSelect->pSrc); } } } } +/* +** Delete a Subquery object and its substructure. +*/ +SQLITE_PRIVATE void sqlite3SubqueryDelete(sqlite3 *db, Subquery *pSubq){ + assert( pSubq!=0 && pSubq->pSelect!=0 ); + sqlite3SelectDelete(db, pSubq->pSelect); + sqlite3DbFree(db, pSubq); +} + +/* +** Remove a Subquery from a SrcItem. Return the associated Select object. +** The returned Select becomes the responsibility of the caller. +*/ +SQLITE_PRIVATE Select *sqlite3SubqueryDetach(sqlite3 *db, SrcItem *pItem){ + Select *pSel; + assert( pItem!=0 ); + assert( pItem->fg.isSubquery ); + pSel = pItem->u4.pSubq->pSelect; + sqlite3DbFree(db, pItem->u4.pSubq); + pItem->u4.pSubq = 0; + pItem->fg.isSubquery = 0; + return pSel; +} + /* ** Delete an entire SrcList including all its substructure. */ @@ -126296,13 +127959,24 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ assert( db!=0 ); if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - if( pItem->zDatabase ) sqlite3DbNNFreeNN(db, pItem->zDatabase); + + /* Check invariants on SrcItem */ + assert( !pItem->fg.isIndexedBy || !pItem->fg.isTabFunc ); + assert( !pItem->fg.isCte || !pItem->fg.isIndexedBy ); + assert( !pItem->fg.fixedSchema || !pItem->fg.isSubquery ); + assert( !pItem->fg.isSubquery || (pItem->u4.pSubq!=0 && + pItem->u4.pSubq->pSelect!=0) ); + if( pItem->zName ) sqlite3DbNNFreeNN(db, pItem->zName); if( pItem->zAlias ) sqlite3DbNNFreeNN(db, pItem->zAlias); + if( pItem->fg.isSubquery ){ + sqlite3SubqueryDelete(db, pItem->u4.pSubq); + }else if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + sqlite3DbNNFreeNN(db, pItem->u4.zDatabase); + } if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); - sqlite3DeleteTable(db, pItem->pTab); - if( pItem->pSelect ) sqlite3SelectDelete(db, pItem->pSelect); + sqlite3DeleteTable(db, pItem->pSTab); if( pItem->fg.isUsing ){ sqlite3IdListDelete(db, pItem->u3.pUsing); }else if( pItem->u3.pOn ){ @@ -126312,6 +127986,54 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ sqlite3DbNNFreeNN(db, pList); } +/* +** Attach a Subquery object to pItem->uv.pSubq. Set the +** pSelect value but leave all the other values initialized +** to zero. +** +** A copy of the Select object is made if dupSelect is true, and the +** SrcItem takes responsibility for deleting the copy. If dupSelect is +** false, ownership of the Select passes to the SrcItem. Either way, +** the SrcItem will take responsibility for deleting the Select. +** +** When dupSelect is zero, that means the Select might get deleted right +** away if there is an OOM error. Beware. +** +** Return non-zero on success. Return zero on an OOM error. +*/ +SQLITE_PRIVATE int sqlite3SrcItemAttachSubquery( + Parse *pParse, /* Parsing context */ + SrcItem *pItem, /* Item to which the subquery is to be attached */ + Select *pSelect, /* The subquery SELECT. Must be non-NULL */ + int dupSelect /* If true, attach a copy of pSelect, not pSelect itself.*/ +){ + Subquery *p; + assert( pSelect!=0 ); + assert( pItem->fg.isSubquery==0 ); + if( pItem->fg.fixedSchema ){ + pItem->u4.pSchema = 0; + pItem->fg.fixedSchema = 0; + }else if( pItem->u4.zDatabase!=0 ){ + sqlite3DbFree(pParse->db, pItem->u4.zDatabase); + pItem->u4.zDatabase = 0; + } + if( dupSelect ){ + pSelect = sqlite3SelectDup(pParse->db, pSelect, 0); + if( pSelect==0 ) return 0; + } + p = pItem->u4.pSubq = sqlite3DbMallocRawNN(pParse->db, sizeof(Subquery)); + if( p==0 ){ + sqlite3SelectDelete(pParse->db, pSelect); + return 0; + } + pItem->fg.isSubquery = 1; + p->pSelect = pSelect; + assert( offsetof(Subquery, pSelect)==0 ); + memset(((char*)p)+sizeof(p->pSelect), 0, sizeof(*p)-sizeof(p->pSelect)); + return 1; +} + + /* ** This routine is called by the parser to add a new term to the ** end of a growing FROM clause. The "p" parameter is the part of @@ -126361,10 +128083,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( if( pAlias->n ){ pItem->zAlias = sqlite3NameFromToken(db, pAlias); } + assert( pSubquery==0 || pDatabase==0 ); if( pSubquery ){ - pItem->pSelect = pSubquery; - if( pSubquery->selFlags & SF_NestedFrom ){ - pItem->fg.isNestedFrom = 1; + if( sqlite3SrcItemAttachSubquery(pParse, pItem, pSubquery, 0) ){ + if( pSubquery->selFlags & SF_NestedFrom ){ + pItem->fg.isNestedFrom = 1; + } } } assert( pOnUsing==0 || pOnUsing->pOn==0 || pOnUsing->pUsing==0 ); @@ -127036,10 +128760,9 @@ SQLITE_PRIVATE With *sqlite3WithAdd( } if( pWith ){ - sqlite3_int64 nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); - pNew = sqlite3DbRealloc(db, pWith, nByte); + pNew = sqlite3DbRealloc(db, pWith, SZ_WITH(pWith->nCte+1)); }else{ - pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); + pNew = sqlite3DbMallocZero(db, SZ_WITH(1)); } assert( (pNew!=0 && zName!=0) || db->mallocFailed ); @@ -127377,12 +129100,18 @@ static int matchQuality( u8 enc /* Desired text encoding */ ){ int match; - assert( p->nArg>=-1 ); + assert( p->nArg>=(-4) && p->nArg!=(-2) ); + assert( nArg>=(-2) ); /* Wrong number of arguments means "no match" */ if( p->nArg!=nArg ){ - if( nArg==(-2) ) return (p->xSFunc==0) ? 0 : FUNC_PERFECT_MATCH; + if( nArg==(-2) ) return p->xSFunc==0 ? 0 : FUNC_PERFECT_MATCH; if( p->nArg>=0 ) return 0; + /* Special p->nArg values available to built-in functions only: + ** -3 1 or more arguments required + ** -4 2 or more arguments required + */ + if( p->nArg<(-2) && nArg<(-2-p->nArg) ) return 0; } /* Give a better score to a function with a specific number of arguments @@ -127642,8 +129371,8 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ ** ** The following fields are initialized appropriate in pSrc: ** -** pSrc->a[0].pTab Pointer to the Table object -** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one +** pSrc->a[0].spTab Pointer to the Table object +** pSrc->a[0].u2.pIBIndex Pointer to the INDEXED BY index, if there is one ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ @@ -127651,8 +129380,8 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); - if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab); - pItem->pTab = pTab; + if( pItem->pSTab ) sqlite3DeleteTable(pParse->db, pItem->pSTab); + pItem->pSTab = pTab; pItem->fg.notCte = 1; if( pTab ){ pTab->nTabRef++; @@ -127693,6 +129422,7 @@ SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char * ** is for a top-level SQL statement. */ static int vtabIsReadOnly(Parse *pParse, Table *pTab){ + assert( IsVirtual(pTab) ); if( sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ){ return 1; } @@ -127774,7 +129504,8 @@ SQLITE_PRIVATE void sqlite3MaterializeView( if( pFrom ){ assert( pFrom->nSrc==1 ); pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); - pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pFrom->a[0].fg.fixedSchema==0 && pFrom->a[0].fg.isSubquery==0 ); + pFrom->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); assert( pFrom->a[0].fg.isUsing==0 ); assert( pFrom->a[0].u3.pOn==0 ); } @@ -127836,7 +129567,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( ** ); */ - pTab = pSrc->a[0].pTab; + pTab = pSrc->a[0].pSTab; if( HasRowid(pTab) ){ pLhs = sqlite3PExpr(pParse, TK_ROW, 0, 0); pEList = sqlite3ExprListAppend( @@ -127869,9 +129600,9 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab = 0; pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); - pSrc->a[0].pTab = pTab; + pSrc->a[0].pSTab = pTab; if( pSrc->a[0].fg.isIndexedBy ){ assert( pSrc->a[0].fg.isCte==0 ); pSrc->a[0].u2.pIBIndex = 0; @@ -129003,16 +130734,10 @@ static void substrFunc( int len; int p0type; i64 p1, p2; - int negP2 = 0; assert( argc==3 || argc==2 ); - if( sqlite3_value_type(argv[1])==SQLITE_NULL - || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) - ){ - return; - } p0type = sqlite3_value_type(argv[0]); - p1 = sqlite3_value_int(argv[1]); + p1 = sqlite3_value_int64(argv[1]); if( p0type==SQLITE_BLOB ){ len = sqlite3_value_bytes(argv[0]); z = sqlite3_value_blob(argv[0]); @@ -129028,28 +130753,31 @@ static void substrFunc( } } } -#ifdef SQLITE_SUBSTR_COMPATIBILITY - /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as - ** as substr(X,1,N) - it returns the first N characters of X. This - ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] - ** from 2009-02-02 for compatibility of applications that exploited the - ** old buggy behavior. */ - if( p1==0 ) p1 = 1; /* */ -#endif if( argc==3 ){ - p2 = sqlite3_value_int(argv[2]); - if( p2<0 ){ - p2 = -p2; - negP2 = 1; - } + p2 = sqlite3_value_int64(argv[2]); + if( p2==0 && sqlite3_value_type(argv[2])==SQLITE_NULL ) return; }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } + if( p1==0 ){ +#ifdef SQLITE_SUBSTR_COMPATIBILITY + /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as + ** as substr(X,1,N) - it returns the first N characters of X. This + ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] + ** from 2009-02-02 for compatibility of applications that exploited the + ** old buggy behavior. */ + p1 = 1; /* */ +#endif + if( sqlite3_value_type(argv[1])==SQLITE_NULL ) return; + } if( p1<0 ){ p1 += len; if( p1<0 ){ - p2 += p1; - if( p2<0 ) p2 = 0; + if( p2<0 ){ + p2 = 0; + }else{ + p2 += p1; + } p1 = 0; } }else if( p1>0 ){ @@ -129057,12 +130785,13 @@ static void substrFunc( }else if( p2>0 ){ p2--; } - if( negP2 ){ - p1 -= p2; - if( p1<0 ){ - p2 += p1; - p1 = 0; + if( p2<0 ){ + if( p2<-p1 ){ + p2 = p1; + }else{ + p2 = -p2; } + p1 -= p2; } assert( p1>=0 && p2>=0 ); if( p0type!=SQLITE_BLOB ){ @@ -129076,9 +130805,11 @@ static void substrFunc( sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT, SQLITE_UTF8); }else{ - if( p1+p2>len ){ + if( p1>=len ){ + p1 = p2 = 0; + }else if( p2>len-p1 ){ p2 = len-p1; - if( p2<0 ) p2 = 0; + assert( p2>0 ); } sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT); } @@ -129089,13 +130820,13 @@ static void substrFunc( */ #ifndef SQLITE_OMIT_FLOATING_POINT static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - int n = 0; + i64 n = 0; double r; char *zBuf; assert( argc==1 || argc==2 ); if( argc==2 ){ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; - n = sqlite3_value_int(argv[1]); + n = sqlite3_value_int64(argv[1]); if( n>30 ) n = 30; if( n<0 ) n = 0; } @@ -129110,7 +130841,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ }else if( n==0 ){ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5))); }else{ - zBuf = sqlite3_mprintf("%!.*f",n,r); + zBuf = sqlite3_mprintf("%!.*f",(int)n,r); if( zBuf==0 ){ sqlite3_result_error_nomem(context); return; @@ -129739,7 +131470,7 @@ static const char hexdigits[] = { ** Append to pStr text that is the SQL literal representation of the ** value contained in pValue. */ -SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue, int bEscape){ /* As currently implemented, the string must be initially empty. ** we might relax this requirement in the future, but that will ** require enhancements to the implementation. */ @@ -129787,7 +131518,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } case SQLITE_TEXT: { const unsigned char *zArg = sqlite3_value_text(pValue); - sqlite3_str_appendf(pStr, "%Q", zArg); + sqlite3_str_appendf(pStr, bEscape ? "%#Q" : "%Q", zArg); break; } default: { @@ -129798,6 +131529,105 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } } +/* +** Return true if z[] begins with N hexadecimal digits, and write +** a decoding of those digits into *pVal. Or return false if any +** one of the first N characters in z[] is not a hexadecimal digit. +*/ +static int isNHex(const char *z, int N, u32 *pVal){ + int i; + int v = 0; + for(i=0; i0 ){ + memmove(&zOut[j], &zIn[i], n); + j += n; + i += n; + } + if( zIn[i+1]=='\\' ){ + i += 2; + zOut[j++] = '\\'; + }else if( sqlite3Isxdigit(zIn[i+1]) ){ + if( !isNHex(&zIn[i+1], 4, &v) ) goto unistr_error; + i += 5; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='+' ){ + if( !isNHex(&zIn[i+2], 6, &v) ) goto unistr_error; + i += 8; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='u' ){ + if( !isNHex(&zIn[i+2], 4, &v) ) goto unistr_error; + i += 6; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='U' ){ + if( !isNHex(&zIn[i+2], 8, &v) ) goto unistr_error; + i += 10; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else{ + goto unistr_error; + } + } + zOut[j] = 0; + sqlite3_result_text64(context, zOut, j, sqlite3_free, SQLITE_UTF8); + return; + +unistr_error: + sqlite3_free(zOut); + sqlite3_result_error(context, "invalid Unicode escape", -1); + return; +} + + /* ** Implementation of the QUOTE() function. ** @@ -129807,6 +131637,10 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ ** as needed. BLOBs are encoded as hexadecimal literals. Strings with ** embedded NUL characters cannot be represented as string literals in SQL ** and hence the returned string literal is truncated prior to the first NUL. +** +** If sqlite3_user_data() is non-zero, then the UNISTR_QUOTE() function is +** implemented instead. The difference is that UNISTR_QUOTE() uses the +** UNISTR() function to escape control characters. */ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ sqlite3_str str; @@ -129814,7 +131648,7 @@ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); UNUSED_PARAMETER(argc); sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); - sqlite3QuoteValue(&str,argv[0]); + sqlite3QuoteValue(&str,argv[0],SQLITE_PTR_TO_INT(sqlite3_user_data(context))); sqlite3_result_text(context, sqlite3StrAccumFinish(&str), str.nChar, SQLITE_DYNAMIC); if( str.accError!=SQLITE_OK ){ @@ -130069,7 +131903,7 @@ static void replaceFunc( assert( zRep==sqlite3_value_text(argv[2]) ); nOut = nStr + 1; assert( nOut0 ){ + if( sqlite3_value_type(argv[i])!=SQLITE_NULL ){ + int k = sqlite3_value_bytes(argv[i]); const char *v = (const char*)sqlite3_value_text(argv[i]); if( v!=0 ){ if( j>0 && nSep>0 ){ @@ -130465,7 +132299,7 @@ static void kahanBabuskaNeumaierInit( ** that it returns NULL if it sums over no inputs. TOTAL returns ** 0.0 in that case. In addition, TOTAL always returns a float where ** SUM might return an integer if it never encounters a floating point -** value. TOTAL never fails, but SUM might through an exception if +** value. TOTAL never fails, but SUM might throw an exception if ** it overflows an integer. */ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ @@ -130517,7 +132351,10 @@ static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){ assert( p->cnt>0 ); p->cnt--; if( !p->approx ){ - p->iSum -= sqlite3_value_int64(argv[0]); + if( sqlite3SubInt64(&p->iSum, sqlite3_value_int64(argv[0])) ){ + p->ovrfl = 1; + p->approx = 1; + } }else if( type==SQLITE_INTEGER ){ i64 iVal = sqlite3_value_int64(argv[0]); if( iVal!=SMALLEST_INT64 ){ @@ -130698,7 +132535,11 @@ static void minMaxFinalize(sqlite3_context *context){ ** group_concat(EXPR, ?SEPARATOR?) ** string_agg(EXPR, SEPARATOR) ** -** The SEPARATOR goes before the EXPR string. This is tragic. The +** Content is accumulated in GroupConcatCtx.str with the SEPARATOR +** coming before the EXPR value, except for the first entry which +** omits the SEPARATOR. +** +** It is tragic that the SEPARATOR goes before the EXPR string. The ** groupConcatInverse() implementation would have been easier if the ** SEPARATOR were appended after EXPR. And the order is undocumented, ** so we could change it, in theory. But the old behavior has been @@ -130802,7 +132643,7 @@ static void groupConcatInverse( /* pGCC is always non-NULL since groupConcatStep() will have always ** run first to initialize it */ if( ALWAYS(pGCC) ){ - int nVS; + int nVS; /* Number of characters to remove */ /* Must call sqlite3_value_text() to convert the argument into text prior ** to invoking sqlite3_value_bytes(), in case the text encoding is UTF16 */ (void)sqlite3_value_text(argv[0]); @@ -131180,7 +133021,13 @@ static void signFunc( ** Implementation of fpdecode(x,y,z) function. ** ** x is a real number that is to be decoded. y is the precision. -** z is the maximum real precision. +** z is the maximum real precision. Return a string that shows the +** results of the sqlite3FpDecode() function. +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3FpDecode() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. */ static void fpdecodeFunc( sqlite3_context *context, @@ -131196,6 +133043,7 @@ static void fpdecodeFunc( x = sqlite3_value_double(argv[0]); y = sqlite3_value_int(argv[1]); z = sqlite3_value_int(argv[2]); + if( z<=0 ) z = 1; sqlite3FpDecode(&s, x, y, z); if( s.isSpecial==2 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN"); @@ -131206,6 +133054,82 @@ static void fpdecodeFunc( } #endif /* SQLITE_DEBUG */ +#ifdef SQLITE_DEBUG +/* +** Implementation of parseuri(uri,flags) function. +** +** Required Arguments: +** "uri" The URI to parse. +** "flags" Bitmask of flags, as if to sqlite3_open_v2(). +** +** Additional arguments beyond the first two make calls to +** sqlite3_uri_key() for integers and sqlite3_uri_parameter for +** anything else. +** +** The result is a string showing the results of calling sqlite3ParseUri(). +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3ParseUri() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. +*/ +static void parseuriFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + sqlite3_str *pResult; + const char *zVfs; + const char *zUri; + unsigned int flgs; + int rc; + sqlite3_vfs *pVfs = 0; + char *zFile = 0; + char *zErr = 0; + + if( argc<2 ) return; + pVfs = sqlite3_vfs_find(0); + assert( pVfs ); + zVfs = pVfs->zName; + zUri = (const char*)sqlite3_value_text(argv[0]); + if( zUri==0 ) return; + flgs = (unsigned int)sqlite3_value_int(argv[1]); + rc = sqlite3ParseUri(zVfs, zUri, &flgs, &pVfs, &zFile, &zErr); + pResult = sqlite3_str_new(0); + if( pResult ){ + int i; + sqlite3_str_appendf(pResult, "rc=%d", rc); + sqlite3_str_appendf(pResult, ", flags=0x%x", flgs); + sqlite3_str_appendf(pResult, ", vfs=%Q", pVfs ? pVfs->zName: 0); + sqlite3_str_appendf(pResult, ", err=%Q", zErr); + sqlite3_str_appendf(pResult, ", file=%Q", zFile); + if( zFile ){ + const char *z = zFile; + z += sqlite3Strlen30(z)+1; + while( z[0] ){ + sqlite3_str_appendf(pResult, ", %Q", z); + z += sqlite3Strlen30(z)+1; + } + for(i=2; ia; - pItem->pTab = pFKey->pFrom; + pItem->pSTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; - pItem->pTab->nTabRef++; + pItem->pSTab->nTabRef++; pItem->iCursor = pParse->nTab++; if( regNew!=0 ){ @@ -132737,7 +134656,8 @@ static Trigger *fkActionTrigger( SrcList *pSrc; Expr *pRaise; - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); + pRaise = sqlite3Expr(db, TK_STRING, "FOREIGN KEY constraint failed"), + pRaise = sqlite3PExpr(pParse, TK_RAISE, pRaise, 0); if( pRaise ){ pRaise->affExpr = OE_Abort; } @@ -132745,7 +134665,8 @@ static Trigger *fkActionTrigger( if( pSrc ){ assert( pSrc->nSrc==1 ); pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); - pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pSrc->a[0].fg.fixedSchema==0 && pSrc->a[0].fg.isSubquery==0 ); + pSrc->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), @@ -133479,8 +135400,11 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ SrcItem *pItem = &pVal->pSrc->a[0]; - sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); - sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + assert( (pItem->fg.isSubquery && pItem->u4.pSubq!=0) || pParse->nErr ); + if( pItem->fg.isSubquery ){ + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->u4.pSubq->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->u4.pSubq->addrFillSub - 1); + } } } @@ -133584,7 +135508,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList f = (f & pLeft->selFlags); } pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); - pLeft->selFlags &= ~SF_MultiValue; + pLeft->selFlags &= ~(u32)SF_MultiValue; if( pSelect ){ pSelect->op = TK_ALL; pSelect->pPrior = pLeft; @@ -133608,6 +135532,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList if( pRet ){ SelectDest dest; + Subquery *pSubq; pRet->pSrc->nSrc = 1; pRet->pPrior = pLeft->pPrior; pRet->op = pLeft->op; @@ -133617,28 +135542,32 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList assert( pLeft->pNext==0 ); assert( pRet->pNext==0 ); p = &pRet->pSrc->a[0]; - p->pSelect = pLeft; p->fg.viaCoroutine = 1; - p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; - p->regReturn = ++pParse->nMem; p->iCursor = -1; + assert( !p->fg.isIndexedBy && !p->fg.isTabFunc ); p->u1.nRow = 2; - sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); - sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); - - /* Allocate registers for the output of the co-routine. Do so so - ** that there are two unused registers immediately before those - ** used by the co-routine. This allows the code in sqlite3Insert() - ** to use these registers directly, instead of copying the output - ** of the co-routine to a separate array for processing. */ - dest.iSdst = pParse->nMem + 3; - dest.nSdst = pLeft->pEList->nExpr; - pParse->nMem += 2 + dest.nSdst; - - pLeft->selFlags |= SF_MultiValue; - sqlite3Select(pParse, pLeft, &dest); - p->regResult = dest.iSdst; - assert( pParse->nErr || dest.iSdst>0 ); + if( sqlite3SrcItemAttachSubquery(pParse, p, pLeft, 0) ){ + pSubq = p->u4.pSubq; + pSubq->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, + pSubq->regReturn, 0, pSubq->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + pSubq->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + } pLeft = pRet; } }else{ @@ -133648,12 +135577,18 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList } if( pParse->nErr==0 ){ + Subquery *pSubq; assert( p!=0 ); - if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ - sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + assert( p->fg.isSubquery ); + pSubq = p->u4.pSubq; + assert( pSubq!=0 ); + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + if( pSubq->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, pSubq->pSelect); }else{ - sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); - sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + sqlite3ExprCodeExprList(pParse, pRow, pSubq->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, pSubq->regReturn); } } sqlite3ExprListDelete(pParse->db, pRow); @@ -133807,6 +135742,7 @@ SQLITE_PRIVATE void sqlite3Insert( int regRowid; /* registers holding insert rowid */ int regData; /* register holding first column to insert */ int *aRegIdx = 0; /* One register allocated to each index */ + int *aTabColMap = 0; /* Mapping from pTab columns to pCol entries */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to insert into a view */ @@ -133951,31 +135887,25 @@ SQLITE_PRIVATE void sqlite3Insert( */ bIdListInOrder = (pTab->tabFlags & (TF_OOOHidden|TF_HasStored))==0; if( pColumn ){ - assert( pColumn->eU4!=EU4_EXPR ); - pColumn->eU4 = EU4_IDX; - for(i=0; inId; i++){ - pColumn->a[i].u4.idx = -1; - } + aTabColMap = sqlite3DbMallocZero(db, pTab->nCol*sizeof(int)); + if( aTabColMap==0 ) goto insert_cleanup; for(i=0; inId; i++){ - for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zCnName)==0 ){ - pColumn->a[i].u4.idx = j; - if( i!=j ) bIdListInOrder = 0; - if( j==pTab->iPKey ){ - ipkColumn = i; assert( !withoutRowid ); - } + j = sqlite3ColumnIndex(pTab, pColumn->a[i].zName); + if( j>=0 ){ + if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; + if( i!=j ) bIdListInOrder = 0; + if( j==pTab->iPKey ){ + ipkColumn = i; assert( !withoutRowid ); + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ - sqlite3ErrorMsg(pParse, - "cannot INSERT into generated column \"%s\"", - pTab->aCol[j].zCnName); - goto insert_cleanup; - } -#endif - break; + if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ + sqlite3ErrorMsg(pParse, + "cannot INSERT into generated column \"%s\"", + pTab->aCol[j].zCnName); + goto insert_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + }else{ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ ipkColumn = i; bIdListInOrder = 0; @@ -134004,9 +135934,14 @@ SQLITE_PRIVATE void sqlite3Insert( && pSelect->pPrior==0 ){ SrcItem *pItem = &pSelect->pSrc->a[0]; - dest.iSDParm = pItem->regReturn; - regFromSelect = pItem->regResult; - nColumn = pItem->pSelect->pEList->nExpr; + Subquery *pSubq; + assert( pItem->fg.isSubquery ); + pSubq = pItem->u4.pSubq; + dest.iSDParm = pSubq->regReturn; + regFromSelect = pSubq->regResult; + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + nColumn = pSubq->pSelect->pEList->nExpr; ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); if( bIdListInOrder && nColumn==pTab->nCol ){ regData = regFromSelect; @@ -134268,7 +136203,7 @@ SQLITE_PRIVATE void sqlite3Insert( continue; }else if( pColumn==0 ){ /* Hidden columns that are not explicitly named in the INSERT - ** get there default value */ + ** get their default value */ sqlite3ExprCodeFactorable(pParse, sqlite3ColumnExpr(pTab, &pTab->aCol[i]), iRegStore); @@ -134276,9 +136211,9 @@ SQLITE_PRIVATE void sqlite3Insert( } } if( pColumn ){ - assert( pColumn->eU4==EU4_IDX ); - for(j=0; jnId && pColumn->a[j].u4.idx!=i; j++){} - if( j>=pColumn->nId ){ + j = aTabColMap[i]; + assert( j>=0 && j<=pColumn->nId ); + if( j==0 ){ /* A column not named in the insert column list gets its ** default value */ sqlite3ExprCodeFactorable(pParse, @@ -134286,7 +136221,7 @@ SQLITE_PRIVATE void sqlite3Insert( iRegStore); continue; } - k = j; + k = j - 1; }else if( nColumn==0 ){ /* This is INSERT INTO ... DEFAULT VALUES. Load the default value. */ sqlite3ExprCodeFactorable(pParse, @@ -134531,7 +136466,10 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3ExprListDelete(db, pList); sqlite3UpsertDelete(db, pUpsert); sqlite3SelectDelete(db, pSelect); - sqlite3IdListDelete(db, pColumn); + if( pColumn ){ + sqlite3IdListDelete(db, pColumn); + sqlite3DbFree(db, aTabColMap); + } if( aRegIdx ) sqlite3DbNNFreeNN(db, aRegIdx); } @@ -134990,7 +136928,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** could happen in any order, but they are grouped up front for ** convenience. ** - ** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43 + ** 2018-08-14: Ticket https://sqlite.org/src/info/908f001483982c43 ** The order of constraints used to have OE_Update as (2) and OE_Abort ** and so forth as (1). But apparently PostgreSQL checks the OE_Update ** constraint before any others, so it had to be moved. @@ -135926,7 +137864,7 @@ static int xferOptimization( if( pSelect->pSrc->nSrc!=1 ){ return 0; /* FROM clause must have exactly one term */ } - if( pSelect->pSrc->a[0].pSelect ){ + if( pSelect->pSrc->a[0].fg.isSubquery ){ return 0; /* FROM clause cannot contain a subquery */ } if( pSelect->pWhere ){ @@ -136800,6 +138738,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -137133,6 +139073,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -137654,7 +139596,9 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_stmt_explain, /* Version 3.44.0 and later */ sqlite3_get_clientdata, - sqlite3_set_clientdata + sqlite3_set_clientdata, + /* Version 3.50.0 and later */ + sqlite3_setlk_timeout }; /* True if x is the directory separator character @@ -138176,48 +140120,48 @@ static const char *const pragCName[] = { /* 13 */ "pk", /* 14 */ "hidden", /* table_info reuses 8 */ - /* 15 */ "schema", /* Used by: table_list */ - /* 16 */ "name", + /* 15 */ "name", /* Used by: function_list */ + /* 16 */ "builtin", /* 17 */ "type", - /* 18 */ "ncol", - /* 19 */ "wr", - /* 20 */ "strict", - /* 21 */ "seqno", /* Used by: index_xinfo */ - /* 22 */ "cid", - /* 23 */ "name", - /* 24 */ "desc", - /* 25 */ "coll", - /* 26 */ "key", - /* 27 */ "name", /* Used by: function_list */ - /* 28 */ "builtin", - /* 29 */ "type", - /* 30 */ "enc", - /* 31 */ "narg", - /* 32 */ "flags", - /* 33 */ "tbl", /* Used by: stats */ - /* 34 */ "idx", - /* 35 */ "wdth", - /* 36 */ "hght", - /* 37 */ "flgs", - /* 38 */ "seq", /* Used by: index_list */ - /* 39 */ "name", - /* 40 */ "unique", - /* 41 */ "origin", - /* 42 */ "partial", + /* 18 */ "enc", + /* 19 */ "narg", + /* 20 */ "flags", + /* 21 */ "schema", /* Used by: table_list */ + /* 22 */ "name", + /* 23 */ "type", + /* 24 */ "ncol", + /* 25 */ "wr", + /* 26 */ "strict", + /* 27 */ "seqno", /* Used by: index_xinfo */ + /* 28 */ "cid", + /* 29 */ "name", + /* 30 */ "desc", + /* 31 */ "coll", + /* 32 */ "key", + /* 33 */ "seq", /* Used by: index_list */ + /* 34 */ "name", + /* 35 */ "unique", + /* 36 */ "origin", + /* 37 */ "partial", + /* 38 */ "tbl", /* Used by: stats */ + /* 39 */ "idx", + /* 40 */ "wdth", + /* 41 */ "hght", + /* 42 */ "flgs", /* 43 */ "table", /* Used by: foreign_key_check */ /* 44 */ "rowid", /* 45 */ "parent", /* 46 */ "fkid", - /* index_info reuses 21 */ - /* 47 */ "seq", /* Used by: database_list */ - /* 48 */ "name", - /* 49 */ "file", - /* 50 */ "busy", /* Used by: wal_checkpoint */ - /* 51 */ "log", - /* 52 */ "checkpointed", - /* collation_list reuses 38 */ + /* 47 */ "busy", /* Used by: wal_checkpoint */ + /* 48 */ "log", + /* 49 */ "checkpointed", + /* 50 */ "seq", /* Used by: database_list */ + /* 51 */ "name", + /* 52 */ "file", + /* index_info reuses 27 */ /* 53 */ "database", /* Used by: lock_status */ /* 54 */ "status", + /* collation_list reuses 33 */ /* 55 */ "cache_size", /* Used by: default_cache_size */ /* module_list pragma_list reuses 9 */ /* 56 */ "timeout", /* Used by: busy_timeout */ @@ -138310,7 +140254,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "collation_list", /* ePragTyp: */ PragTyp_COLLATION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 38, 2, + /* ColNames: */ 33, 2, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) @@ -138345,7 +140289,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 47, 3, + /* ColNames: */ 50, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) @@ -138425,7 +140369,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "function_list", /* ePragTyp: */ PragTyp_FUNCTION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 27, 6, + /* ColNames: */ 15, 6, /* iArg: */ 0 }, #endif #endif @@ -138454,17 +140398,17 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "index_info", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 3, + /* ColNames: */ 27, 3, /* iArg: */ 0 }, {/* zName: */ "index_list", /* ePragTyp: */ PragTyp_INDEX_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 38, 5, + /* ColNames: */ 33, 5, /* iArg: */ 0 }, {/* zName: */ "index_xinfo", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 6, + /* ColNames: */ 27, 6, /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) @@ -138643,7 +140587,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, - /* ColNames: */ 33, 5, + /* ColNames: */ 38, 5, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -138662,7 +140606,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "table_list", /* ePragTyp: */ PragTyp_TABLE_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1, - /* ColNames: */ 15, 6, + /* ColNames: */ 21, 6, /* iArg: */ 0 }, {/* zName: */ "table_xinfo", /* ePragTyp: */ PragTyp_TABLE_INFO, @@ -138739,7 +140683,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, /* ePragFlg: */ PragFlg_NeedSchema, - /* ColNames: */ 50, 3, + /* ColNames: */ 47, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) @@ -138761,7 +140705,7 @@ static const PragmaName aPragmaName[] = { ** the following macro or to the actual analysis_limit if it is non-zero, ** in order to prevent PRAGMA optimize from running for too long. ** -** The value of 2000 is chosen emperically so that the worst-case run-time +** The value of 2000 is chosen empirically so that the worst-case run-time ** for PRAGMA optimize does not exceed 100 milliseconds against a variety ** of test databases on a RaspberryPI-4 compiled using -Os and without ** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of @@ -139869,12 +141813,6 @@ SQLITE_PRIVATE void sqlite3Pragma( ** in auto-commit mode. */ mask &= ~(SQLITE_ForeignKeys); } -#if SQLITE_USER_AUTHENTICATION - if( db->auth.authLevel==UAUTH_User ){ - /* Do not allow non-admin users to modify the schema arbitrarily */ - mask &= ~(SQLITE_WriteSchema); - } -#endif if( sqlite3GetBoolean(zRight, 0) ){ if( (mask & SQLITE_WriteSchema)==0 @@ -139884,7 +141822,10 @@ SQLITE_PRIVATE void sqlite3Pragma( } }else{ db->flags &= ~mask; - if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; + if( mask==SQLITE_DeferFKs ){ + db->nDeferredImmCons = 0; + db->nDeferredCons = 0; + } if( (mask & SQLITE_WriteSchema)!=0 && sqlite3_stricmp(zRight, "reset")==0 ){ @@ -140010,7 +141951,8 @@ SQLITE_PRIVATE void sqlite3Pragma( char *zSql = sqlite3MPrintf(db, "SELECT*FROM\"%w\"", pTab->zName); if( zSql ){ sqlite3_stmt *pDummy = 0; - (void)sqlite3_prepare(db, zSql, -1, &pDummy, 0); + (void)sqlite3_prepare_v3(db, zSql, -1, SQLITE_PREPARE_DONT_LOG, + &pDummy, 0); (void)sqlite3_finalize(pDummy); sqlite3DbFree(db, zSql); } @@ -140486,11 +142428,12 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Make sure sufficient number of registers have been allocated */ sqlite3TouchRegister(pParse, 8+cnt); + sqlite3VdbeAddOp3(v, OP_Null, 0, 8, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); - sqlite3VdbeChangeP5(v, (u8)i); + sqlite3VdbeChangeP5(v, (u16)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName), @@ -142110,14 +144053,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl #else encoding = SQLITE_UTF8; #endif - if( db->nVdbeActive>0 && encoding!=ENC(db) - && (db->mDbFlags & DBFLAG_Vacuum)==0 - ){ - rc = SQLITE_LOCKED; - goto initone_error_out; - }else{ - sqlite3SetTextEncoding(db, encoding); - } + sqlite3SetTextEncoding(db, encoding); }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){ @@ -142811,12 +144747,24 @@ static int sqlite3Prepare16( if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } + + /* Make sure nBytes is non-negative and correct. It should be the + ** number of bytes until the end of the input buffer or until the first + ** U+0000 character. If the input nBytes is odd, convert it into + ** an even number. If the input nBytes is negative, then the input + ** must be terminated by at least one U+0000 character */ if( nBytes>=0 ){ int sz; const char *z = (const char*)zSql; for(sz=0; szmutex); zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); if( zSql8 ){ @@ -142830,7 +144778,7 @@ static int sqlite3Prepare16( ** the same number of characters into the UTF-16 string. */ int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); - *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); + *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, nBytes, chars_parsed); } sqlite3DbFree(db, zSql8); rc = sqlite3ApiExit(db, rc); @@ -143046,7 +144994,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = 0; - if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*pSrc)); + if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); pNew->pSrc = pSrc; pNew->pWhere = pWhere; pNew->pGroupBy = pGroupBy; @@ -143211,10 +145159,33 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p */ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ int i; - u8 h = sqlite3StrIHash(zCol); - Column *pCol; - for(pCol=pTab->aCol, i=0; inCol; pCol++, i++){ - if( pCol->hName==h && sqlite3StrICmp(pCol->zCnName, zCol)==0 ) return i; + u8 h; + const Column *aCol; + int nCol; + + h = sqlite3StrIHash(zCol); + aCol = pTab->aCol; + nCol = pTab->nCol; + + /* See if the aHx gives us a lucky match */ + i = pTab->aHx[h % sizeof(pTab->aHx)]; + assert( i=nCol ) break; } return -1; } @@ -143224,11 +145195,13 @@ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ */ SQLITE_PRIVATE void sqlite3SrcItemColumnUsed(SrcItem *pItem, int iCol){ assert( pItem!=0 ); - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem) ); if( pItem->fg.isNestedFrom ){ ExprList *pResults; - assert( pItem->pSelect!=0 ); - pResults = pItem->pSelect->pEList; + assert( pItem->fg.isSubquery ); + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + pResults = pItem->u4.pSubq->pSelect->pEList; assert( pResults!=0 ); assert( iCol>=0 && iColnExpr ); pResults->a[iCol].fg.bUsed = 1; @@ -143262,9 +145235,9 @@ static int tableAndColumnIndex( assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=iStart; i<=iEnd; i++){ - iCol = sqlite3ColumnIndex(pSrc->a[i].pTab, zCol); + iCol = sqlite3ColumnIndex(pSrc->a[i].pSTab, zCol); if( iCol>=0 - && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0) + && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pSTab->aCol[iCol])==0) ){ if( piTab ){ sqlite3SrcItemColumnUsed(&pSrc->a[i], iCol); @@ -143393,10 +145366,10 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ pLeft = &pSrc->a[0]; pRight = &pLeft[1]; for(i=0; inSrc-1; i++, pRight++, pLeft++){ - Table *pRightTab = pRight->pTab; + Table *pRightTab = pRight->pSTab; u32 joinType; - if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue; + if( NEVER(pLeft->pSTab==0 || pRightTab==0) ) continue; joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON; /* If this is a NATURAL join, synthesize an appropriate USING clause @@ -143463,7 +145436,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ } pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iLeftCol); sqlite3SrcItemColumnUsed(&pSrc->a[iLeft], iLeftCol); - if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ + if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 && pParse->nErr==0 ){ /* This branch runs if the query contains one or more RIGHT or FULL ** JOINs. If only a single table on the left side of this join ** contains the zName column, then this branch is a no-op. @@ -143479,6 +145452,8 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ */ ExprList *pFuncArgs = 0; /* Arguments to the coalesce() */ static const Token tkCoalesce = { "coalesce", 8 }; + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); while( tableAndColumnIndex(pSrc, iLeft+1, i, zName, &iLeft, &iLeftCol, pRight->fg.isSynthUsing)!=0 ){ if( pSrc->a[iLeft].fg.isUsing==0 @@ -143495,7 +145470,13 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ if( pFuncArgs ){ pFuncArgs = sqlite3ExprListAppend(pParse, pFuncArgs, pE1); pE1 = sqlite3ExprFunction(pParse, pFuncArgs, &tkCoalesce, 0); + if( pE1 ){ + pE1->affExpr = SQLITE_AFF_DEFER; + } } + }else if( (pSrc->a[i+1].fg.jointype & JT_LEFT)!=0 && pParse->nErr==0 ){ + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); } pE2 = sqlite3CreateColumnExpr(db, pSrc, i+1, iRightCol); sqlite3SrcItemColumnUsed(pRight, iRightCol); @@ -144269,12 +146250,18 @@ static void selectInnerLoop( ** case the order does matter */ pushOntoSorter( pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); + pDest->iSDParm2 = 0; /* Signal that any Bloom filter is unpopulated */ }else{ int r1 = sqlite3GetTempReg(pParse); assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol, r1, pDest->zAffSdst, nResultCol); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); + if( pDest->iSDParm2 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + regResult, nResultCol); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); } break; @@ -144398,8 +146385,8 @@ static void selectInnerLoop( ** X extra columns. */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ - int nExtra = (N+X)*(sizeof(CollSeq*)+1) - sizeof(CollSeq*); - KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra); + int nExtra = (N+X)*(sizeof(CollSeq*)+1); + KeyInfo *p = sqlite3DbMallocRawNN(db, SZ_KEYINFO(0) + nExtra); if( p ){ p->aSortFlags = (u8*)&p->aColl[N+X]; p->nKeyField = (u16)N; @@ -144407,7 +146394,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ p->enc = ENC(db); p->db = db; p->nRef = 1; - memset(&p[1], 0, nExtra); + memset(p->aColl, 0, nExtra); }else{ return (KeyInfo*)sqlite3OomFault(db); } @@ -144816,8 +146803,12 @@ static const char *columnTypeImpl( SrcList *pTabList = pNC->pSrcList; for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); if( jnSrc ){ - pTab = pTabList->a[j].pTab; - pS = pTabList->a[j].pSelect; + pTab = pTabList->a[j].pSTab; + if( pTabList->a[j].fg.isSubquery ){ + pS = pTabList->a[j].u4.pSubq->pSelect; + }else{ + pS = 0; + } }else{ pNC = pNC->pNext; } @@ -145384,7 +147375,7 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ p->iLimit = iLimit = ++pParse->nMem; v = sqlite3GetVdbe(pParse); assert( v!=0 ); - if( sqlite3ExprIsInteger(pLimit->pLeft, &n) ){ + if( sqlite3ExprIsInteger(pLimit->pLeft, &n, pParse) ){ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); VdbeComment((v, "LIMIT counter")); if( n==0 ){ @@ -145864,7 +147855,7 @@ static int multiSelect( p->pPrior = pPrior; p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); if( p->pLimit - && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit) + && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit, pParse) && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) ){ p->nSelectRow = sqlite3LogEst((u64)nLimit); @@ -146104,6 +148095,7 @@ static int multiSelect( multi_select_end: pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; + pDest->iSDParm2 = dest.iSDParm2; if( pDelete ){ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } @@ -146208,6 +148200,11 @@ static int generateOutputSubroutine( r1, pDest->zAffSdst, pIn->nSdst); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1, pIn->iSdst, pIn->nSdst); + if( pDest->iSDParm2>0 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + pIn->iSdst, pIn->nSdst); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); break; } @@ -146786,32 +148783,32 @@ static Expr *substExpr( if( pSubst->isOuterJoin ){ ExprSetProperty(pNew, EP_CanBeNull); } - if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ - sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, - pExpr->flags & (EP_OuterON|EP_InnerON)); - } - sqlite3ExprDelete(db, pExpr); - pExpr = pNew; - if( pExpr->op==TK_TRUEFALSE ){ - pExpr->u.iValue = sqlite3ExprTruthValue(pExpr); - pExpr->op = TK_INTEGER; - ExprSetProperty(pExpr, EP_IntValue); + if( pNew->op==TK_TRUEFALSE ){ + pNew->u.iValue = sqlite3ExprTruthValue(pNew); + pNew->op = TK_INTEGER; + ExprSetProperty(pNew, EP_IntValue); } /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ { - CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pNew); CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pSubst->pCList->a[iColumn].pExpr ); - if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){ - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + if( pNat!=pColl || (pNew->op!=TK_COLUMN && pNew->op!=TK_COLLATE) ){ + pNew = sqlite3ExprAddCollateString(pSubst->pParse, pNew, (pColl ? pColl->zName : "BINARY") ); } } - ExprClearProperty(pExpr, EP_Collate); + ExprClearProperty(pNew, EP_Collate); + if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ + sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, + pExpr->flags & (EP_OuterON|EP_InnerON)); + } + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; } } }else{ @@ -146864,7 +148861,9 @@ static void substSelect( pSrc = p->pSrc; assert( pSrc!=0 ); for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - substSelect(pSubst, pItem->pSelect, 1); + if( pItem->fg.isSubquery ){ + substSelect(pSubst, pItem->u4.pSubq->pSelect, 1); + } if( pItem->fg.isTabFunc ){ substExprList(pSubst, pItem->u1.pFuncArg); } @@ -146895,7 +148894,7 @@ static void recomputeColumnsUsed( SrcItem *pSrcItem /* Which FROM clause item to recompute */ ){ Walker w; - if( NEVER(pSrcItem->pTab==0) ) return; + if( NEVER(pSrcItem->pSTab==0) ) return; memset(&w, 0, sizeof(w)); w.xExprCallback = recomputeColumnsUsedExpr; w.xSelectCallback = sqlite3SelectWalkNoop; @@ -146935,8 +148934,10 @@ static void srclistRenumberCursors( aCsrMap[pItem->iCursor+1] = pParse->nTab++; } pItem->iCursor = aCsrMap[pItem->iCursor+1]; - for(p=pItem->pSelect; p; p=p->pPrior){ - srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + if( pItem->fg.isSubquery ){ + for(p=pItem->u4.pSubq->pSelect; p; p=p->pPrior){ + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + } } } } @@ -147083,9 +149084,9 @@ static int compoundHasDifferentAffinities(Select *p){ ** from 2015-02-09.) ** ** (3) If the subquery is the right operand of a LEFT JOIN then -** (3a) the subquery may not be a join and -** (3b) the FROM clause of the subquery may not contain a virtual -** table and +** (3a) the subquery may not be a join +** (**) Was (3b): "the FROM clause of the subquery may not contain +** a virtual table" ** (**) Was: "The outer query may not have a GROUP BY." This case ** is now managed correctly ** (3d) the outer query may not be DISTINCT. @@ -147247,7 +149248,8 @@ static int flattenSubquery( assert( pSrc && iFrom>=0 && iFromnSrc ); pSubitem = &pSrc->a[iFrom]; iParent = pSubitem->iCursor; - pSub = pSubitem->pSelect; + assert( pSubitem->fg.isSubquery ); + pSub = pSubitem->u4.pSubq->pSelect; assert( pSub!=0 ); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -147300,7 +149302,7 @@ static int flattenSubquery( */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || IsVirtual(pSubSrc->a[0].pTab) /* (3b) */ + /**** || IsVirtual(pSubSrc->a[0].pSTab) (3b)-omitted */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ ){ @@ -147386,14 +149388,18 @@ static int flattenSubquery( pParse->zAuthContext = zSavedAuthContext; /* Delete the transient structures associated with the subquery */ - pSub1 = pSubitem->pSelect; - sqlite3DbFree(db, pSubitem->zDatabase); + + if( ALWAYS(pSubitem->fg.isSubquery) ){ + pSub1 = sqlite3SubqueryDetach(db, pSubitem); + }else{ + pSub1 = 0; + } + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->fg.fixedSchema==0 ); sqlite3DbFree(db, pSubitem->zName); sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; pSubitem->zName = 0; pSubitem->zAlias = 0; - pSubitem->pSelect = 0; assert( pSubitem->fg.isUsing!=0 || pSubitem->u3.pOn==0 ); /* If the sub-query is a compound SELECT statement, then (by restrictions @@ -147434,8 +149440,8 @@ static int flattenSubquery( ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Select *pPrior = p->pPrior; - Table *pItemTab = pSubitem->pTab; - pSubitem->pTab = 0; + Table *pItemTab = pSubitem->pSTab; + pSubitem->pSTab = 0; p->pOrderBy = 0; p->pPrior = 0; p->pLimit = 0; @@ -147443,7 +149449,7 @@ static int flattenSubquery( p->pLimit = pLimit; p->pOrderBy = pOrderBy; p->op = TK_ALL; - pSubitem->pTab = pItemTab; + pSubitem->pSTab = pItemTab; if( pNew==0 ){ p->pPrior = pPrior; }else{ @@ -147458,11 +149464,14 @@ static int flattenSubquery( TREETRACE(0x4,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } - assert( pSubitem->pSelect==0 ); + assert( pSubitem->fg.isSubquery==0 ); } sqlite3DbFree(db, aCsrMap); if( db->mallocFailed ){ - pSubitem->pSelect = pSub1; + assert( pSubitem->fg.fixedSchema==0 ); + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->u4.zDatabase==0 ); + sqlite3SrcItemAttachSubquery(pParse, pSubitem, pSub1, 0); return 1; } @@ -147473,8 +149482,8 @@ static int flattenSubquery( ** ** pSubitem->pTab is always non-NULL by test restrictions and tests above. */ - if( ALWAYS(pSubitem->pTab!=0) ){ - Table *pTabToDel = pSubitem->pTab; + if( ALWAYS(pSubitem->pSTab!=0) ){ + Table *pTabToDel = pSubitem->pSTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); @@ -147482,7 +149491,7 @@ static int flattenSubquery( }else{ pTabToDel->nTabRef--; } - pSubitem->pTab = 0; + pSubitem->pSTab = 0; } /* The following loop runs once for each term in a compound-subquery @@ -147536,13 +149545,16 @@ static int flattenSubquery( /* Transfer the FROM clause terms from the subquery into the ** outer query. */ + iNewParent = pSubSrc->a[0].iCursor; for(i=0; ia[i+iFrom]; - if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); assert( pItem->fg.isTabFunc==0 ); + assert( pItem->fg.isSubquery + || pItem->fg.fixedSchema + || pItem->u4.zDatabase==0 ); + if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); *pItem = pSubSrc->a[i]; pItem->fg.jointype |= ltorj; - iNewParent = pSubSrc->a[i].iCursor; memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } pSrc->a[iFrom].fg.jointype &= JT_LTORJ; @@ -147582,6 +149594,7 @@ static int flattenSubquery( pWhere = pSub->pWhere; pSub->pWhere = 0; if( isOuterJoin>0 ){ + assert( pSubSrc->nSrc==1 ); sqlite3SetJoinExpr(pWhere, iNewParent, EP_OuterON); } if( pWhere ){ @@ -147693,7 +149706,8 @@ static void constInsert( return; /* Already present. Return without doing anything. */ } } - if( sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEbHasAffBlob = 1; } @@ -147768,7 +149782,8 @@ static int propagateConstantExprRewriteOne( if( pColumn==pExpr ) continue; if( pColumn->iTable!=pExpr->iTable ) continue; if( pColumn->iColumn!=pExpr->iColumn ) continue; - if( bIgnoreAffBlob && sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEfg.isCorrelated || pItem->fg.isCte ){ return 0; } - assert( pItem->pTab!=0 ); - pTab = pItem->pTab; - assert( pItem->pSelect!=0 ); - pSub = pItem->pSelect; + assert( pItem->pSTab!=0 ); + pTab = pItem->pSTab; + assert( pItem->fg.isSubquery ); + pSub = pItem->u4.pSubq->pSelect; assert( pSub->pEList->nExpr==pTab->nCol ); for(pX=pSub; pX; pX=pX->pPrior){ if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ @@ -148354,13 +150370,13 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ if( p->pWhere || p->pEList->nExpr!=1 || p->pSrc->nSrc!=1 - || p->pSrc->a[0].pSelect + || p->pSrc->a[0].fg.isSubquery || pAggInfo->nFunc!=1 || p->pHaving ){ return 0; } - pTab = p->pSrc->a[0].pTab; + pTab = p->pSrc->a[0].pSTab; assert( pTab!=0 ); assert( !IsView(pTab) ); if( !IsOrdinaryTable(pTab) ) return 0; @@ -148385,7 +150401,7 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ ** pFrom->pIndex and return SQLITE_OK. */ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; char *zIndexedBy = pFrom->u1.zIndexedBy; Index *pIdx; assert( pTab!=0 ); @@ -148420,7 +150436,7 @@ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ ** above that generates the code for a compound SELECT with an ORDER BY clause ** uses a merge algorithm that requires the same collating sequence on the ** result columns as on the ORDER BY clause. See ticket -** http://www.sqlite.org/src/info/6709574d2a +** http://sqlite.org/src/info/6709574d2a ** ** This transformation is only needed for EXCEPT, INTERSECT, and UNION. ** The UNION ALL operator works fine with multiSelectOrderBy() even when @@ -148462,7 +150478,11 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ if( pNew==0 ) return WRC_Abort; memset(&dummy, 0, sizeof(dummy)); pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0); - if( pNewSrc==0 ) return WRC_Abort; + assert( pNewSrc!=0 || pParse->nErr ); + if( pParse->nErr ){ + sqlite3SrcListDelete(db, pNewSrc); + return WRC_Abort; + } *pNew = *p; p->pSrc = pNewSrc; p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0)); @@ -148477,7 +150497,7 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ #ifndef SQLITE_OMIT_WINDOWFUNC p->pWinDefn = 0; #endif - p->selFlags &= ~SF_Compound; + p->selFlags &= ~(u32)SF_Compound; assert( (p->selFlags & SF_Converted)==0 ); p->selFlags |= SF_Converted; assert( pNew->pPrior!=0 ); @@ -148517,7 +150537,7 @@ static struct Cte *searchWith( ){ const char *zName = pItem->zName; With *p; - assert( pItem->zDatabase==0 ); + assert( pItem->fg.fixedSchema || pItem->u4.zDatabase==0 ); assert( zName!=0 ); for(p=pWith; p; p=p->pOuter){ int i; @@ -148587,7 +150607,7 @@ static int resolveFromTermToCte( Cte *pCte; /* Matched CTE (or NULL if no match) */ With *pWith; /* The matching WITH */ - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( pParse->pWith==0 ){ /* There are no WITH clauses in the stack. No match is possible */ return 0; @@ -148597,7 +150617,8 @@ static int resolveFromTermToCte( ** go no further. */ return 0; } - if( pFrom->zDatabase!=0 ){ + assert( pFrom->fg.hadSchema==0 || pFrom->fg.notCte!=0 ); + if( pFrom->fg.fixedSchema==0 && pFrom->u4.zDatabase!=0 ){ /* The FROM term contains a schema qualifier (ex: main.t1) and so ** it cannot possibly be a CTE reference. */ return 0; @@ -148633,7 +150654,7 @@ static int resolveFromTermToCte( } if( cannotBeFunction(pParse, pFrom) ) return 2; - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return 2; pCteUse = pCte->pUse; @@ -148647,26 +150668,29 @@ static int resolveFromTermToCte( } pCteUse->eM10d = pCte->eM10d; } - pFrom->pTab = pTab; + pFrom->pSTab = pTab; pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; - pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pCte->pSelect, 1); if( db->mallocFailed ) return 2; - pFrom->pSelect->selFlags |= SF_CopyCte; - assert( pFrom->pSelect ); + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + pSel = pFrom->u4.pSubq->pSelect; + assert( pSel!=0 ); + pSel->selFlags |= SF_CopyCte; if( pFrom->fg.isIndexedBy ){ sqlite3ErrorMsg(pParse, "no such index: \"%s\"", pFrom->u1.zIndexedBy); return 2; } + assert( !pFrom->fg.isIndexedBy ); pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; /* Check if this is a recursive CTE. */ - pRecTerm = pSel = pFrom->pSelect; + pRecTerm = pSel; bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); while( bMayRecursive && pRecTerm->op==pSel->op ){ int i; @@ -148674,11 +150698,13 @@ static int resolveFromTermToCte( assert( pRecTerm->pPrior!=0 ); for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->zDatabase==0 - && pItem->zName!=0 + if( pItem->zName!=0 + && !pItem->fg.hadSchema + && ALWAYS( !pItem->fg.isSubquery ) + && (pItem->fg.fixedSchema || pItem->u4.zDatabase==0) && 0==sqlite3StrICmp(pItem->zName, pCte->zName) ){ - pItem->pTab = pTab; + pItem->pSTab = pTab; pTab->nTabRef++; pItem->fg.isRecursive = 1; if( pRecTerm->selFlags & SF_Recursive ){ @@ -148780,11 +150806,14 @@ SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){ ** SQLITE_NOMEM. */ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ - Select *pSel = pFrom->pSelect; + Select *pSel; Table *pTab; + assert( pFrom->fg.isSubquery ); + assert( pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; assert( pSel ); - pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); + pFrom->pSTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); if( pTab==0 ) return SQLITE_NOMEM; pTab->nTabRef = 1; if( pFrom->zAlias ){ @@ -148884,7 +150913,7 @@ static int selectExpander(Walker *pWalker, Select *p){ pEList = p->pEList; if( pParse->pWith && (p->selFlags & SF_View) ){ if( p->pWith==0 ){ - p->pWith = (With*)sqlite3DbMallocZero(db, sizeof(With)); + p->pWith = (With*)sqlite3DbMallocZero(db, SZ_WITH(1) ); if( p->pWith==0 ){ return WRC_Abort; } @@ -148904,33 +150933,35 @@ static int selectExpander(Walker *pWalker, Select *p){ */ for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab; - assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); - if( pFrom->pTab ) continue; + assert( pFrom->fg.isRecursive==0 || pFrom->pSTab!=0 ); + if( pFrom->pSTab ) continue; assert( pFrom->fg.isRecursive==0 ); if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY - Select *pSel = pFrom->pSelect; + Select *pSel; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; /* A sub-query in the FROM clause of a SELECT */ assert( pSel!=0 ); - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort; #endif #ifndef SQLITE_OMIT_CTE }else if( (rc = resolveFromTermToCte(pParse, pWalker, pFrom))!=0 ){ if( rc>1 ) return WRC_Abort; - pTab = pFrom->pTab; + pTab = pFrom->pSTab; assert( pTab!=0 ); #endif }else{ /* An ordinary table or view name in the FROM clause */ - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); + assert( pFrom->pSTab==0 ); + pFrom->pSTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); if( pTab==0 ) return WRC_Abort; if( pTab->nTabRef>=0xffff ){ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", pTab->zName); - pFrom->pTab = 0; + pFrom->pSTab = 0; return WRC_Abort; } pTab->nTabRef++; @@ -148942,7 +150973,7 @@ static int selectExpander(Walker *pWalker, Select *p){ i16 nCol; u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; - assert( pFrom->pSelect==0 ); + assert( pFrom->fg.isSubquery==0 ); if( IsView(pTab) ){ if( (db->flags & SQLITE_EnableView)==0 && pTab->pSchema!=db->aDb[1].pSchema @@ -148950,7 +150981,7 @@ static int selectExpander(Walker *pWalker, Select *p){ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", pTab->zName); } - pFrom->pSelect = sqlite3SelectDup(db, pTab->u.view.pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pTab->u.view.pSelect, 1); } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( ALWAYS(IsVirtual(pTab)) @@ -148966,7 +150997,9 @@ static int selectExpander(Walker *pWalker, Select *p){ nCol = pTab->nCol; pTab->nCol = -1; pWalker->eCode = 1; /* Turn on Select.selId renumbering */ - sqlite3WalkSelect(pWalker, pFrom->pSelect); + if( pFrom->fg.isSubquery ){ + sqlite3WalkSelect(pWalker, pFrom->u4.pSubq->pSelect); + } pWalker->eCode = eCodeOrig; pTab->nCol = nCol; } @@ -149053,7 +151086,7 @@ static int selectExpander(Walker *pWalker, Select *p){ } for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ int nAdd; /* Number of cols including rowid */ - Table *pTab = pFrom->pTab; /* Table for this data source */ + Table *pTab = pFrom->pSTab; /* Table for this data source */ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */ char *zTabName; /* AS name for this data source */ const char *zSchemaName = 0; /* Schema name for this data source */ @@ -149064,10 +151097,11 @@ static int selectExpander(Walker *pWalker, Select *p){ zTabName = pTab->zName; } if( db->mallocFailed ) break; - assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom->pSelect) ); + assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom) ); if( pFrom->fg.isNestedFrom ){ - assert( pFrom->pSelect!=0 ); - pNestedFrom = pFrom->pSelect->pEList; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + assert( pFrom->u4.pSubq->pSelect!=0 ); + pNestedFrom = pFrom->u4.pSubq->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid ); @@ -149306,14 +151340,12 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; assert( pTab!=0 ); - if( (pTab->tabFlags & TF_Ephemeral)!=0 ){ + if( (pTab->tabFlags & TF_Ephemeral)!=0 && pFrom->fg.isSubquery ){ /* A sub-query in the FROM clause of a SELECT */ - Select *pSel = pFrom->pSelect; - if( pSel ){ - sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); - } + Select *pSel = pFrom->u4.pSubq->pSelect; + sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); } } } @@ -149627,6 +151659,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); + if( pParse->nErr ) return; pList = pF->pFExpr->x.pList; if( pF->iOBTab>=0 ){ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs @@ -149667,7 +151700,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, iTop); sqlite3ReleaseTempRange(pParse, regAgg, nArg); @@ -149830,12 +151863,13 @@ static void updateAccumulator( } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3ReleaseTempRange(pParse, regAgg, nArg); } if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); } + if( pParse->nErr ) return; } if( regHit==0 && pAggInfo->nAccumulator ){ regHit = regAcc; @@ -149845,6 +151879,7 @@ static void updateAccumulator( } for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i)); + if( pParse->nErr ) return; } pAggInfo->directMode = 0; @@ -149960,25 +151995,28 @@ static SrcItem *isSelfJoinView( int iFirst, int iEnd /* Range of FROM-clause entries to search. */ ){ SrcItem *pItem; - assert( pThis->pSelect!=0 ); - if( pThis->pSelect->selFlags & SF_PushDown ) return 0; + Select *pSel; + assert( pThis->fg.isSubquery ); + pSel = pThis->u4.pSubq->pSelect; + assert( pSel!=0 ); + if( pSel->selFlags & SF_PushDown ) return 0; while( iFirsta[iFirst++]; - if( pItem->pSelect==0 ) continue; + if( !pItem->fg.isSubquery ) continue; if( pItem->fg.viaCoroutine ) continue; if( pItem->zName==0 ) continue; - assert( pItem->pTab!=0 ); - assert( pThis->pTab!=0 ); - if( pItem->pTab->pSchema!=pThis->pTab->pSchema ) continue; + assert( pItem->pSTab!=0 ); + assert( pThis->pSTab!=0 ); + if( pItem->pSTab->pSchema!=pThis->pSTab->pSchema ) continue; if( sqlite3_stricmp(pItem->zName, pThis->zName)!=0 ) continue; - pS1 = pItem->pSelect; - if( pItem->pTab->pSchema==0 && pThis->pSelect->selId!=pS1->selId ){ + pS1 = pItem->u4.pSubq->pSelect; + if( pItem->pSTab->pSchema==0 && pSel->selId!=pS1->selId ){ /* The query flattener left two different CTE tables with identical ** names in the same FROM clause. */ continue; } - if( pItem->pSelect->selFlags & SF_PushDown ){ + if( pS1->selFlags & SF_PushDown ){ /* The view was modified by some other optimization such as ** pushDownWhereTerms() */ continue; @@ -150014,6 +152052,7 @@ static void agginfoFree(sqlite3 *db, void *pArg){ ** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries ** * The outer query is a simple count(*) with no WHERE clause or other ** extraneous syntax. +** * None of the subqueries are DISTINCT (forumpost/a860f5fb2e 2025-03-10) ** ** Return TRUE if the optimization is undertaken. */ @@ -150022,6 +152061,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ Expr *pExpr; Expr *pCount; sqlite3 *db; + SrcItem *pFrom; if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; @@ -150036,17 +152076,22 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */ - pSub = p->pSrc->a[0].pSelect; - if( pSub==0 ) return 0; /* The FROM is a subquery */ + pFrom = p->pSrc->a; + if( pFrom->fg.isSubquery==0 ) return 0; /* FROM is a subquery */ + pSub = pFrom->u4.pSubq->pSelect; if( pSub->pPrior==0 ) return 0; /* Must be a compound */ if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */ do{ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ - if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ + if( pSub->selFlags & (SF_Aggregate|SF_Distinct) ){ + testcase( pSub->selFlags & SF_Aggregate ); + testcase( pSub->selFlags & SF_Distinct ); + return 0; /* Not an aggregate nor DISTINCT */ + } assert( pSub->pHaving==0 ); /* Due to the previous */ - pSub = pSub->pPrior; /* Repeat over compound */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */ @@ -150054,17 +152099,16 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ db = pParse->db; pCount = pExpr; pExpr = 0; - pSub = p->pSrc->a[0].pSelect; - p->pSrc->a[0].pSelect = 0; + pSub = sqlite3SubqueryDetach(db, pFrom); sqlite3SrcListDelete(db, p->pSrc); - p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc)); + p->pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); while( pSub ){ Expr *pTerm; pPrior = pSub->pPrior; pSub->pPrior = 0; pSub->pNext = 0; pSub->selFlags |= SF_Aggregate; - pSub->selFlags &= ~SF_Compound; + pSub->selFlags &= ~(u32)SF_Compound; pSub->nSelectRow = 0; sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; @@ -150079,7 +152123,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub = pPrior; } p->pEList->a[0].pExpr = pExpr; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x200 ){ @@ -150100,12 +152144,12 @@ static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){ for(i=0; inSrc; i++){ SrcItem *p1 = &pSrc->a[i]; if( p1==p0 ) continue; - if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ + if( p0->pSTab==p1->pSTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ return 1; } - if( p1->pSelect - && (p1->pSelect->selFlags & SF_NestedFrom)!=0 - && sameSrcAlias(p0, p1->pSelect->pSrc) + if( p1->fg.isSubquery + && (p1->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 + && sameSrcAlias(p0, p1->u4.pSubq->pSelect->pSrc) ){ return 1; } @@ -150170,13 +152214,13 @@ static int fromClauseTermCanBeCoroutine( if( i==0 ) break; i--; pItem--; - if( pItem->pSelect!=0 ) return 0; /* (1c-i) */ + if( pItem->fg.isSubquery ) return 0; /* (1c-i) */ } return 1; } /* -** Generate code for the SELECT statement given in the p argument. +** Generate byte-code for the SELECT statement given in the p argument. ** ** The results are returned according to the SelectDest structure. ** See comments in sqliteInt.h for further information. @@ -150187,6 +152231,40 @@ static int fromClauseTermCanBeCoroutine( ** ** This routine does NOT free the Select structure passed in. The ** calling function needs to do that. +** +** This is a long function. The following is an outline of the processing +** steps, with tags referencing various milestones: +** +** * Resolve names and similar preparation tag-select-0100 +** * Scan of the FROM clause tag-select-0200 +** + OUTER JOIN strength reduction tag-select-0220 +** + Sub-query ORDER BY removal tag-select-0230 +** + Query flattening tag-select-0240 +** * Separate subroutine for compound-SELECT tag-select-0300 +** * WHERE-clause constant propagation tag-select-0330 +** * Count()-of-VIEW optimization tag-select-0350 +** * Scan of the FROM clause again tag-select-0400 +** + Authorize unreferenced tables tag-select-0410 +** + Predicate push-down optimization tag-select-0420 +** + Omit unused subquery columns optimization tag-select-0440 +** + Generate code to implement subqueries tag-select-0480 +** - Co-routines tag-select-0482 +** - Reuse previously computed CTE tag-select-0484 +** - REuse previously computed VIEW tag-select-0486 +** - Materialize a VIEW or CTE tag-select-0488 +** * DISTINCT ORDER BY -> GROUP BY optimization tag-select-0500 +** * Set up for ORDER BY tag-select-0600 +** * Create output table tag-select-0630 +** * Prepare registers for LIMIT tag-select-0650 +** * Setup for DISTINCT tag-select-0680 +** * Generate code for non-aggregate and non-GROUP BY tag-select-0700 +** * Generate code for aggregate and/or GROUP BY tag-select-0800 +** + GROUP BY queries tag-select-0810 +** + non-GROUP BY queries tag-select-0820 +** - Special case of count() w/o GROUP BY tag-select-0821 +** - General case of non-GROUP BY aggregates tag-select-0822 +** * Sort results, as needed tag-select-0900 +** * Internal self-checks tag-select-1000 */ SQLITE_PRIVATE int sqlite3Select( Parse *pParse, /* The parser context */ @@ -150230,6 +152308,7 @@ SQLITE_PRIVATE int sqlite3Select( } #endif + /* tag-select-0100 */ assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); @@ -150251,7 +152330,7 @@ SQLITE_PRIVATE int sqlite3Select( testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; p->selFlags |= SF_NoopOrderBy; } sqlite3SelectPrep(pParse, p, 0); @@ -150281,7 +152360,7 @@ SQLITE_PRIVATE int sqlite3Select( if( sameSrcAlias(p0, p->pSrc) ){ sqlite3ErrorMsg(pParse, "target object/alias may not appear in FROM clause: %s", - p0->zAlias ? p0->zAlias : p0->pTab->zName + p0->zAlias ? p0->zAlias : p0->pSTab->zName ); goto select_end; } @@ -150290,7 +152369,7 @@ SQLITE_PRIVATE int sqlite3Select( ** and leaving this flag set can cause errors if a compound sub-query ** in p->pSrc is flattened into this query and this function called ** again as part of compound SELECT processing. */ - p->selFlags &= ~SF_UFSrcCheck; + p->selFlags &= ~(u32)SF_UFSrcCheck; } if( pDest->eDest==SRT_Output ){ @@ -150316,12 +152395,13 @@ SQLITE_PRIVATE int sqlite3Select( /* Try to do various optimizations (flattening subqueries, and strength ** reduction of join operators) in the FROM clause up into the main query + ** tag-select-0200 */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; - Select *pSub = pItem->pSelect; - Table *pTab = pItem->pTab; + Select *pSub = pItem->fg.isSubquery ? pItem->u4.pSubq->pSelect : 0; + Table *pTab = pItem->pSTab; /* The expander should have already created transient Table objects ** even for FROM clause elements such as subqueries that do not correspond @@ -150338,6 +152418,7 @@ SQLITE_PRIVATE int sqlite3Select( ** way that the i-th table cannot be the NULL row of a join, then ** perform the appropriate simplification. This is called ** "OUTER JOIN strength reduction" in the SQLite documentation. + ** tag-select-0220 */ if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor, @@ -150408,7 +152489,8 @@ SQLITE_PRIVATE int sqlite3Select( if( (pSub->selFlags & SF_Aggregate)!=0 ) continue; assert( pSub->pGroupBy==0 ); - /* If a FROM-clause subquery has an ORDER BY clause that is not + /* tag-select-0230: + ** If a FROM-clause subquery has an ORDER BY clause that is not ** really doing anything, then delete it now so that it does not ** interfere with query flattening. See the discussion at ** https://sqlite.org/forum/forumpost/2d76f2bcf65d256a @@ -150427,13 +152509,16 @@ SQLITE_PRIVATE int sqlite3Select( ** (a) The outer query has a different ORDER BY clause ** (b) The subquery is part of a join ** See forum post 062d576715d277c8 + ** (6) The subquery is not a recursive CTE. ORDER BY has a different + ** meaning for recursive CTEs and this optimization does not + ** apply. ** ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled. */ if( pSub->pOrderBy!=0 && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */ && pSub->pLimit==0 /* Condition (1) */ - && (pSub->selFlags & SF_OrderByReqd)==0 /* Condition (2) */ + && (pSub->selFlags & (SF_OrderByReqd|SF_Recursive))==0 /* (2) and (6) */ && (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ && OptimizationEnabled(db, SQLITE_OmitOrderBy) ){ @@ -150471,6 +152556,7 @@ SQLITE_PRIVATE int sqlite3Select( continue; } + /* tag-select-0240 */ if( flattenSubquery(pParse, p, i, isAgg) ){ if( pParse->nErr ) goto select_end; /* This subquery can be absorbed into its parent. */ @@ -150486,7 +152572,7 @@ SQLITE_PRIVATE int sqlite3Select( #ifndef SQLITE_OMIT_COMPOUND_SELECT /* Handle compound SELECT statements using the separate multiSelect() - ** procedure. + ** procedure. tag-select-0300 */ if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); @@ -150502,9 +152588,9 @@ SQLITE_PRIVATE int sqlite3Select( #endif /* Do the WHERE-clause constant propagation optimization if this is - ** a join. No need to speed time on this operation for non-join queries + ** a join. No need to spend time on this operation for non-join queries ** as the equivalent optimization will be handled by query planner in - ** sqlite3WhereBegin(). + ** sqlite3WhereBegin(). tag-select-0330 */ if( p->pWhere!=0 && p->pWhere->op==TK_AND @@ -150521,6 +152607,7 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } + /* tag-select-0350 */ if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ @@ -150528,20 +152615,26 @@ SQLITE_PRIVATE int sqlite3Select( pTabList = p->pSrc; } - /* For each term in the FROM clause, do two things: - ** (1) Authorized unreferenced tables - ** (2) Generate code for all sub-queries + /* Loop over all terms in the FROM clause and do two things for each term: + ** + ** (1) Authorize unreferenced tables + ** (2) Generate code for all sub-queries + ** + ** tag-select-0400 */ for(i=0; inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; SrcItem *pPrior; SelectDest dest; + Subquery *pSubq; Select *pSub; #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) const char *zSavedAuthContext; #endif - /* Issue SQLITE_READ authorizations with a fake column name for any + /* Authorized unreferenced tables. tag-select-0410 + ** + ** Issue SQLITE_READ authorizations with a fake column name for any ** tables that are referenced but from which no values are extracted. ** Examples of where these kinds of null SQLITE_READ authorizations ** would occur: @@ -150558,17 +152651,28 @@ SQLITE_PRIVATE int sqlite3Select( ** string for the fake column name seems safer. */ if( pItem->colUsed==0 && pItem->zName!=0 ){ - sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", pItem->zDatabase); + const char *zDb; + if( pItem->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, pItem->u4.pSchema); + zDb = db->aDb[iDb].zDbSName; + }else if( pItem->fg.isSubquery ){ + zDb = 0; + }else{ + zDb = pItem->u4.zDatabase; + } + sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", zDb); } #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* Generate code for all sub-queries in the FROM clause */ - pSub = pItem->pSelect; - if( pSub==0 || pItem->addrFillSub!=0 ) continue; + if( pItem->fg.isSubquery==0 ) continue; + pSubq = pItem->u4.pSubq; + assert( pSubq!=0 ); + pSub = pSubq->pSelect; /* The code for a subquery should only be generated once. */ - assert( pItem->addrFillSub==0 ); + if( pSubq->addrFillSub!=0 ) continue; /* Increment Parse.nHeight by the height of the largest expression ** tree referred to by this, the parent select. The child select @@ -150581,6 +152685,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Make copies of constant WHERE-clause terms in the outer query down ** inside the subquery. This can help the subquery to run more efficiently. + ** This is the "predicate push-down optimization". tag-select-0420 */ if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 @@ -150594,13 +152699,14 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewSelect(0, p, 0); } #endif - assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); + assert( pSubq->pSelect && (pSub->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-clause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL ** expressions, to avoid unneeded searching and computation. + ** tag-select-0440 */ if( OptimizationEnabled(db, SQLITE_NullUnusedCols) && disableUnusedSubqueryResultColumns(pItem) @@ -150618,32 +152724,33 @@ SQLITE_PRIVATE int sqlite3Select( zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; - /* Generate code to implement the subquery + /* Generate byte-code to implement the subquery tag-select-0480 */ if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){ /* Implement a co-routine that will return a single row of the result - ** set on each invocation. + ** set on each invocation. tag-select-0482 */ int addrTop = sqlite3VdbeCurrentAddr(v)+1; - pItem->regReturn = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, pSubq->regReturn, 0, addrTop); VdbeComment((v, "%!S", pItem)); - pItem->addrFillSub = addrTop; - sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); + pSubq->addrFillSub = addrTop; + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); ExplainQueryPlan((pParse, 1, "CO-ROUTINE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; pItem->fg.viaCoroutine = 1; - pItem->regResult = dest.iSdst; - sqlite3VdbeEndCoroutine(v, pItem->regReturn); + pSubq->regResult = dest.iSdst; + sqlite3VdbeEndCoroutine(v, pSubq->regReturn); + VdbeComment((v, "end %!S", pItem)); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ /* This is a CTE for which materialization code has already been ** generated. Invoke the subroutine to compute the materialization, - ** the make the pItem->iCursor be a copy of the ephemeral table that - ** holds the result of the materialization. */ + ** then make the pItem->iCursor be a copy of the ephemeral table that + ** holds the result of the materialization. tag-select-0484 */ CteUse *pCteUse = pItem->u2.pCteUse; sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); if( pItem->iCursor!=pCteUse->iCur ){ @@ -150653,25 +152760,30 @@ SQLITE_PRIVATE int sqlite3Select( pSub->nSelectRow = pCteUse->nRowEst; }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){ /* This view has already been materialized by a prior entry in - ** this same FROM clause. Reuse it. */ - if( pPrior->addrFillSub ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pPrior->regReturn, pPrior->addrFillSub); + ** this same FROM clause. Reuse it. tag-select-0486 */ + Subquery *pPriorSubq; + assert( pPrior->fg.isSubquery ); + pPriorSubq = pPrior->u4.pSubq; + assert( pPriorSubq!=0 ); + if( pPriorSubq->addrFillSub ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pPriorSubq->regReturn, + pPriorSubq->addrFillSub); } sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); - pSub->nSelectRow = pPrior->pSelect->nSelectRow; + pSub->nSelectRow = pPriorSubq->pSelect->nSelectRow; }else{ /* Materialize the view. If the view is not correlated, generate a ** subroutine to do the materialization so that subsequent uses of - ** the same view can reuse the materialization. */ + ** the same view can reuse the materialization. tag-select-0488 */ int topAddr; int onceAddr = 0; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS int addrExplain; #endif - pItem->regReturn = ++pParse->nMem; + pSubq->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp0(v, OP_Goto); - pItem->addrFillSub = topAddr+1; + pSubq->addrFillSub = topAddr+1; pItem->fg.isMaterialized = 1; if( pItem->fg.isCorrelated==0 ){ /* If the subquery is not correlated and if we are not inside of @@ -150686,17 +152798,17 @@ SQLITE_PRIVATE int sqlite3Select( ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); - sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); + sqlite3VdbeAddOp2(v, OP_Return, pSubq->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); sqlite3VdbeJumpHere(v, topAddr); sqlite3ClearTempRegCache(pParse); if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ CteUse *pCteUse = pItem->u2.pCteUse; - pCteUse->addrM9e = pItem->addrFillSub; - pCteUse->regRtn = pItem->regReturn; + pCteUse->addrM9e = pSubq->addrFillSub; + pCteUse->regRtn = pSubq->regReturn; pCteUse->iCur = pItem->iCursor; pCteUse->nRowEst = pSub->nSelectRow; } @@ -150722,7 +152834,9 @@ SQLITE_PRIVATE int sqlite3Select( } #endif - /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and + /* tag-select-0500 + ** + ** If the query is DISTINCT with an ORDER BY but is not an aggregate, and ** if the select-list is the same as the ORDER BY list, then this query ** can be rewritten as a GROUP BY. In other words, this: ** @@ -150739,12 +152853,18 @@ SQLITE_PRIVATE int sqlite3Select( */ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 + && OptimizationEnabled(db, SQLITE_GroupByOrder) #ifndef SQLITE_OMIT_WINDOWFUNC && p->pWin==0 #endif ){ - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); + if( pGroupBy ){ + for(i=0; inExpr; i++){ + pGroupBy->a[i].u.x.iOrderByCol = i+1; + } + } p->selFlags |= SF_Aggregate; /* Notice that even thought SF_Distinct has been cleared from p->selFlags, ** the sDistinct.isTnct is still set. Hence, isTnct represents the @@ -150766,7 +152886,7 @@ SQLITE_PRIVATE int sqlite3Select( ** If that is the case, then the OP_OpenEphemeral instruction will be ** changed to an OP_Noop once we figure out that the sorting index is ** not needed. The sSort.addrSortIndex variable is used to facilitate - ** that change. + ** that change. tag-select-0600 */ if( sSort.pOrderBy ){ KeyInfo *pKeyInfo; @@ -150783,6 +152903,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If the output is destined for a temporary table, open that table. + ** tag-select-0630 */ if( pDest->eDest==SRT_EphemTab ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); @@ -150800,7 +152921,7 @@ SQLITE_PRIVATE int sqlite3Select( } } - /* Set the limiter. + /* Set the limiter. tag-select-0650 */ iEnd = sqlite3VdbeMakeLabel(pParse); if( (p->selFlags & SF_FixedLimit)==0 ){ @@ -150812,7 +152933,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.sortFlags |= SORTFLAG_UseSorter; } - /* Open an ephemeral index to use for the distinct set. + /* Open an ephemeral index to use for the distinct set. tag-select-0680 */ if( p->selFlags & SF_Distinct ){ sDistinct.tabTnct = pParse->nTab++; @@ -150827,7 +152948,7 @@ SQLITE_PRIVATE int sqlite3Select( } if( !isAgg && pGroupBy==0 ){ - /* No aggregate functions and no GROUP BY clause */ + /* No aggregate functions and no GROUP BY clause. tag-select-0700 */ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0) | (p->selFlags & SF_FixedLimit); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -150846,6 +152967,12 @@ SQLITE_PRIVATE int sqlite3Select( if( pWInfo==0 ) goto select_end; if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); + if( pDest->eDest<=SRT_DistQueue && pDest->eDest>=SRT_DistFifo ){ + /* TUNING: For a UNION CTE, because UNION is implies DISTINCT, + ** reduce the estimated output row count by 8 (LogEst 30). + ** Search for tag-20250414a to see other cases */ + p->nSelectRow -= 30; + } } if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); @@ -150900,8 +153027,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3WhereEnd(pWInfo); } }else{ - /* This case when there exist aggregate functions or a GROUP BY clause - ** or both */ + /* This case is for when there exist aggregate functions or a GROUP BY + ** clause or both. tag-select-0800 */ NameContext sNC; /* Name context for processing aggregate information */ int iAMem; /* First Mem address for storing current GROUP BY */ int iBMem; /* First Mem address for previous GROUP BY */ @@ -151020,7 +153147,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Processing for aggregates with GROUP BY is very different and - ** much more complex than aggregates without a GROUP BY. + ** much more complex than aggregates without a GROUP BY. tag-select-0810 */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ @@ -151076,6 +153203,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); VdbeComment((v, "clear abort flag")); sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); + sqlite3ExprNullRegisterRange(pParse, iAMem, pGroupBy->nExpr); /* Begin a loop that will extract all source rows in GROUP BY order. ** This might involve two separate loops with an OP_Sort in between, or @@ -151207,12 +153335,29 @@ SQLITE_PRIVATE int sqlite3Select( sortOut, sortPTab); } for(j=0; jnExpr; j++){ + int iOrderByCol = pGroupBy->a[j].u.x.iOrderByCol; + if( groupBySort ){ sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); }else{ pAggInfo->directMode = 1; sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); } + + if( iOrderByCol ){ + Expr *pX = p->pEList->a[iOrderByCol-1].pExpr; + Expr *pBase = sqlite3ExprSkipCollateAndLikely(pX); + while( ALWAYS(pBase!=0) && pBase->op==TK_IF_NULL_ROW ){ + pX = pBase->pLeft; + pBase = sqlite3ExprSkipCollateAndLikely(pX); + } + if( ALWAYS(pBase!=0) + && pBase->op!=TK_AGG_COLUMN + && pBase->op!=TK_REGISTER + ){ + sqlite3ExprToRegister(pX, iAMem+j); + } + } } sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); @@ -151228,9 +153373,9 @@ SQLITE_PRIVATE int sqlite3Select( ** and resets the aggregate accumulator registers in preparation ** for the next GROUP BY batch. */ - sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); VdbeComment((v, "output one row")); + sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); VdbeComment((v, "check abort flag")); sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); @@ -151304,9 +153449,12 @@ SQLITE_PRIVATE int sqlite3Select( } } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ else { + /* Aggregate functions without GROUP BY. tag-select-0820 */ Table *pTab; if( (pTab = isSimpleCount(p, pAggInfo))!=0 ){ - /* If isSimpleCount() returns a pointer to a Table structure, then + /* tag-select-0821 + ** + ** If isSimpleCount() returns a pointer to a Table structure, then ** the SQL statement is of the form: ** ** SELECT count(*) FROM @@ -151365,6 +153513,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else{ + /* The general case of an aggregate query without GROUP BY + ** tag-select-0822 */ int regAcc = 0; /* "populate accumulators" flag */ ExprList *pDistinct = 0; u16 distFlag = 0; @@ -151453,7 +153603,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If there is an ORDER BY clause, then we need to sort the results - ** and send them to the callback one by one. + ** and send them to the callback one by one. tag-select-0900 */ if( sSort.pOrderBy ){ assert( p->pEList==pEList ); @@ -151476,6 +153626,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( db->mallocFailed==0 || pParse->nErr!=0 ); sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG + /* Internal self-checks. tag-select-1000 */ if( pAggInfo && !db->mallocFailed ){ #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x20 ){ @@ -151783,7 +153934,8 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ assert( pParse->db->pVtabCtx==0 ); #endif assert( pParse->bReturning ); - assert( &(pParse->u1.pReturning->retTrig) == pTrig ); + assert( !pParse->isCreate ); + assert( &(pParse->u1.d.pReturning->retTrig) == pTrig ); pTrig->table = pTab->zName; pTrig->pTabSchema = pTab->pSchema; pTrig->pNext = pList; @@ -151865,8 +154017,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( ** name on pTableName if we are reparsing out of the schema table */ if( db->init.busy && iDb!=1 ){ - sqlite3DbFree(db, pTableName->a[0].zDatabase); - pTableName->a[0].zDatabase = 0; + assert( pTableName->a[0].fg.fixedSchema==0 ); + assert( pTableName->a[0].fg.isSubquery==0 ); + sqlite3DbFree(db, pTableName->a[0].u4.zDatabase); + pTableName->a[0].u4.zDatabase = 0; } /* If the trigger name was unqualified, and the table is a temp table, @@ -152344,7 +154498,8 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr) } assert( pName->nSrc==1 ); - zDb = pName->a[0].zDatabase; + assert( pName->a[0].fg.fixedSchema==0 && pName->a[0].fg.isSubquery==0 ); + zDb = pName->a[0].u4.zDatabase; zName = pName->a[0].zName; assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); for(i=OMIT_TEMPDB; inDb; i++){ @@ -152581,7 +154736,9 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc( Schema *pSchema = pStep->pTrig->pSchema; pSrc->a[0].zName = zName; if( pSchema!=db->aDb[1].pSchema ){ - pSrc->a[0].pSchema = pSchema; + assert( pSrc->a[0].fg.fixedSchema || pSrc->a[0].u4.zDatabase==0 ); + pSrc->a[0].u4.pSchema = pSchema; + pSrc->a[0].fg.fixedSchema = 1; } if( pStep->pFrom ){ SrcList *pDup = sqlite3SrcListDup(db, pStep->pFrom, 0); @@ -152694,7 +154851,7 @@ static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ pSrc = pSelect->pSrc; assert( pSrc!=0 ); for(i=0; inSrc; i++){ - if( pSrc->a[i].pTab==pWalker->u.pTab ){ + if( pSrc->a[i].pSTab==pWalker->u.pTab ){ testcase( pSelect->selFlags & SF_Correlated ); pSelect->selFlags |= SF_Correlated; pWalker->eCode = 1; @@ -152746,7 +154903,8 @@ static void codeReturningTrigger( ExprList *pNew; Returning *pReturning; Select sSelect; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; assert( v!=0 ); if( !pParse->bReturning ){ @@ -152755,19 +154913,21 @@ static void codeReturningTrigger( return; } assert( db->pParse==pParse ); - pReturning = pParse->u1.pReturning; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pTrigger != &(pReturning->retTrig) ){ /* This RETURNING trigger is for a different statement */ return; } memset(&sSelect, 0, sizeof(sSelect)); - memset(&sFrom, 0, sizeof(sFrom)); + pFrom = (SrcList*)fromSpace; + memset(pFrom, 0, SZ_SRCLIST_1); sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); - sSelect.pSrc = &sFrom; - sFrom.nSrc = 1; - sFrom.a[0].pTab = pTab; - sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ - sFrom.a[0].iCursor = -1; + sSelect.pSrc = pFrom; + pFrom->nSrc = 1; + pFrom->a[0].pSTab = pTab; + pFrom->a[0].zName = pTab->zName; /* tag-20240424-1 */ + pFrom->a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ assert( db->mallocFailed==0 ); @@ -152985,6 +155145,8 @@ static TriggerPrg *codeRowTrigger( sSubParse.eTriggerOp = pTrigger->op; sSubParse.nQueryLoop = pParse->nQueryLoop; sSubParse.prepFlags = pParse->prepFlags; + sSubParse.oldmask = 0; + sSubParse.newmask = 0; v = sqlite3GetVdbe(&sSubParse); if( v ){ @@ -153117,7 +155279,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( ** invocation is disallowed if (a) the sub-program is really a trigger, ** not a foreign key action, and (b) the flag to enable recursive triggers ** is clear. */ - sqlite3VdbeChangeP5(v, (u8)bRecursive); + sqlite3VdbeChangeP5(v, (u16)bRecursive); } } @@ -153476,7 +155638,7 @@ static void updateFromSelect( Expr *pLimit2 = 0; ExprList *pOrderBy2 = 0; sqlite3 *db = pParse->db; - Table *pTab = pTabList->a[0].pTab; + Table *pTab = pTabList->a[0].pSTab; SrcList *pSrc; Expr *pWhere2; int eDest; @@ -153500,8 +155662,8 @@ static void updateFromSelect( if( pSrc ){ assert( pSrc->a[0].fg.notCte ); pSrc->a[0].iCursor = -1; - pSrc->a[0].pTab->nTabRef--; - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab->nTabRef--; + pSrc->a[0].pSTab = 0; } if( pPk ){ for(i=0; inKeyCol; i++){ @@ -153739,38 +155901,32 @@ SQLITE_PRIVATE void sqlite3Update( */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ - u8 hCol = sqlite3StrIHash(pChanges->a[i].zEName); /* If this is an UPDATE with a FROM clause, do not resolve expressions ** here. The call to sqlite3Select() below will do that. */ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } - for(j=0; jnCol; j++){ - if( pTab->aCol[j].hName==hCol - && sqlite3StrICmp(pTab->aCol[j].zCnName, pChanges->a[i].zEName)==0 - ){ - if( j==pTab->iPKey ){ - chngRowid = 1; - pRowidExpr = pChanges->a[i].pExpr; - iRowidExpr = i; - }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ - chngPk = 1; - } + j = sqlite3ColumnIndex(pTab, pChanges->a[i].zEName); + if( j>=0 ){ + if( j==pTab->iPKey ){ + chngRowid = 1; + pRowidExpr = pChanges->a[i].pExpr; + iRowidExpr = i; + }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ + chngPk = 1; + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ - testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); - testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); - sqlite3ErrorMsg(pParse, - "cannot UPDATE generated column \"%s\"", - pTab->aCol[j].zCnName); - goto update_cleanup; - } -#endif - aXRef[j] = i; - break; + else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ + testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); + testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); + sqlite3ErrorMsg(pParse, + "cannot UPDATE generated column \"%s\"", + pTab->aCol[j].zCnName); + goto update_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + aXRef[j] = i; + }else{ if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zEName) ){ j = -1; chngRowid = 1; @@ -154749,7 +156905,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( int nClause = 0; /* Counter of ON CONFLICT clauses */ assert( pTabList->nSrc==1 ); - assert( pTabList->a[0].pTab!=0 ); + assert( pTabList->a[0].pSTab!=0 ); assert( pUpsert!=0 ); assert( pUpsert->pUpsertTarget!=0 ); @@ -154768,7 +156924,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( if( rc ) return rc; /* Check to see if the conflict target matches the rowid. */ - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; pTarget = pUpsert->pUpsertTarget; iCursor = pTabList->a[0].iCursor; if( HasRowid(pTab) @@ -155093,7 +157249,7 @@ SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse, Token *pNm, Expr *pInto){ #else /* When SQLITE_BUG_COMPATIBLE_20160819 is defined, unrecognized arguments ** to VACUUM are silently ignored. This is a back-out of a bug fix that - ** occurred on 2016-08-19 (https://www.sqlite.org/src/info/083f9e6270). + ** occurred on 2016-08-19 (https://sqlite.org/src/info/083f9e6270). ** The buggy behavior is required for binary compatibility with some ** legacy applications. */ iDb = sqlite3FindDb(pParse->db, pNm); @@ -155139,6 +157295,9 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( const char *zDbMain; /* Schema name of database to vacuum */ const char *zOut; /* Name of output file */ u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */ + u64 iRandom; /* Random value used for zDbVacuum[] */ + char zDbVacuum[42]; /* Name of the ATTACH-ed database used for vacuum */ + if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); @@ -155169,7 +157328,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_mTrace = db->mTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks; + db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_Comments; db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum; db->flags &= ~(u64)(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_Defensive | SQLITE_CountRows); @@ -155179,27 +157338,29 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( pMain = db->aDb[iDb].pBt; isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); - /* Attach the temporary database as 'vacuum_db'. The synchronous pragma + /* Attach the temporary database as 'vacuum_XXXXXX'. The synchronous pragma ** can be set to 'off' for this file, as it is not recovered if a crash ** occurs anyway. The integrity of the database is maintained by a ** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** ** An optimization would be to use a non-journaled pager. - ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but + ** (Later:) I tried setting "PRAGMA vacuum_XXXXXX.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially ** empty. Only the journal header is written. Apparently it takes more ** time to parse and run the PRAGMA to turn journalling off than it does ** to write the journal header file. */ + sqlite3_randomness(sizeof(iRandom),&iRandom); + sqlite3_snprintf(sizeof(zDbVacuum), zDbVacuum, "vacuum_%016llx", iRandom); nDb = db->nDb; - rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS vacuum_db", zOut); + rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS %s", zOut, zDbVacuum); db->openFlags = saved_openFlags; if( rc!=SQLITE_OK ) goto end_of_vacuum; assert( (db->nDb-1)==nDb ); pDb = &db->aDb[nDb]; - assert( strcmp(pDb->zDbSName,"vacuum_db")==0 ); + assert( strcmp(pDb->zDbSName,zDbVacuum)==0 ); pTemp = pDb->pBt; if( pOut ){ sqlite3_file *id = sqlite3PagerFile(sqlite3BtreePager(pTemp)); @@ -155276,11 +157437,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** the contents to the temporary database. */ rc = execSqlF(db, pzErrMsg, - "SELECT'INSERT INTO vacuum_db.'||quote(name)" + "SELECT'INSERT INTO %s.'||quote(name)" "||' SELECT*FROM\"%w\".'||quote(name)" - "FROM vacuum_db.sqlite_schema " + "FROM %s.sqlite_schema " "WHERE type='table'AND coalesce(rootpage,1)>0", - zDbMain + zDbVacuum, zDbMain, zDbVacuum ); assert( (db->mDbFlags & DBFLAG_Vacuum)!=0 ); db->mDbFlags &= ~DBFLAG_Vacuum; @@ -155292,11 +157453,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** from the schema table. */ rc = execSqlF(db, pzErrMsg, - "INSERT INTO vacuum_db.sqlite_schema" + "INSERT INTO %s.sqlite_schema" " SELECT*FROM \"%w\".sqlite_schema" " WHERE type IN('view','trigger')" " OR(type='table'AND rootpage=0)", - zDbMain + zDbVacuum, zDbMain ); if( rc ) goto end_of_vacuum; @@ -155872,11 +158033,12 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ ** schema table. We just need to update that slot with all ** the information we've collected. ** - ** The VM register number pParse->regRowid holds the rowid of an + ** The VM register number pParse->u1.cr.regRowid holds the rowid of an ** entry in the sqlite_schema table that was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " @@ -155885,7 +158047,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ pTab->zName, pTab->zName, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); @@ -156223,7 +158385,9 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ z = (const unsigned char*)zCreateTable; for(i=0; aKeyword[i]; i++){ int tokenType = 0; - do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + do{ + z += sqlite3GetToken(z, &tokenType); + }while( tokenType==TK_SPACE || tokenType==TK_COMMENT ); if( tokenType!=aKeyword[i] ){ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); return SQLITE_ERROR; @@ -156260,6 +158424,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; + assert( IsOrdinaryTable(pNew) ); sqlite3ExprListDelete(db, pNew->u.tab.pDfltList); pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); @@ -156934,11 +159099,13 @@ struct WhereLoop { u16 nTop; /* Size of TOP vector */ u16 nDistinctCol; /* Index columns used to sort for DISTINCT */ Index *pIndex; /* Index used, or NULL */ + ExprList *pOrderBy; /* ORDER BY clause if this is really a subquery */ } btree; struct { /* Information for virtual tables */ int idxNum; /* Index number */ u32 needFree : 1; /* True if sqlite3_free(idxStr) is needed */ u32 bOmitOffset : 1; /* True to let virtual table handle offset */ + u32 bIdxNumHex : 1; /* Show idxNum as hex in EXPLAIN QUERY PLAN */ i8 isOrdered; /* True if satisfies ORDER BY */ u16 omitMask; /* Terms that may be omitted */ char *idxStr; /* Index identifier string */ @@ -156951,6 +159118,10 @@ struct WhereLoop { /**** whereLoopXfer() copies fields above ***********************/ # define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) u16 nLSlot; /* Number of slots allocated for aLTerm[] */ +#ifdef WHERETRACE_ENABLED + LogEst rStarDelta; /* Cost delta due to star-schema heuristic. Not + ** initialized unless pWInfo->bStarUsed */ +#endif WhereTerm **aLTerm; /* WhereTerms used */ WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */ @@ -156999,7 +159170,7 @@ struct WherePath { Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ LogEst nRow; /* Estimated number of rows generated by this path */ LogEst rCost; /* Total cost of this path */ - LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */ + LogEst rUnsort; /* Total cost of this path ignoring sorting costs */ i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ }; @@ -157272,8 +159443,13 @@ struct WhereInfo { unsigned bDeferredSeek :1; /* Uses OP_DeferredSeek */ unsigned untestedTerms :1; /* Not all WHERE terms resolved by outer loop */ unsigned bOrderedInnerLoop:1;/* True if only the inner-most loop is ordered */ - unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned bStarDone :1; /* True if check for star-query is complete */ + unsigned bStarUsed :1; /* True if star-query heuristic is used */ LogEst nRowOut; /* Estimated number of output rows */ +#ifdef WHERETRACE_ENABLED + LogEst rTotalCost; /* Total cost of the solution */ +#endif int iTop; /* The very beginning of the WHERE loop */ int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ @@ -157281,9 +159457,14 @@ struct WhereInfo { Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ WhereClause sWC; /* Decomposition of the WHERE clause */ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ - WhereLevel a[1]; /* Information about each nest loop in WHERE */ + WhereLevel a[FLEXARRAY]; /* Information about each nest loop in WHERE */ }; +/* +** The size (in bytes) of a WhereInfo object that holds N WhereLevels. +*/ +#define SZ_WHEREINFO(N) ROUND8(offsetof(WhereInfo,a)+(N)*sizeof(WhereLevel)) + /* ** Private interfaces - callable only by other where.c routines. ** @@ -157319,9 +159500,17 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( const WhereInfo *pWInfo, /* WHERE clause */ const WhereLevel *pLevel /* Bloom filter on this level */ ); +SQLITE_PRIVATE void sqlite3WhereAddExplainText( + Parse *pParse, /* Parse context */ + int addr, + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +); #else # define sqlite3WhereExplainOneScan(u,v,w,x) 0 # define sqlite3WhereExplainBloomFilter(u,v,w) 0 +# define sqlite3WhereAddExplainText(u,v,w,x,y) #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3WhereAddScanStatus( @@ -157424,7 +159613,8 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); #define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ #define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ #define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ - /* 0x02000000 -- available for reuse */ +#define WHERE_COROUTINE 0x02000000 /* Implemented by co-routine. + ** NB: False-negatives are possible */ #define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -157522,38 +159712,38 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ } /* -** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG -** was defined at compile-time. If it is not a no-op, a single OP_Explain -** opcode is added to the output to describe the table scan strategy in pLevel. -** -** If an OP_Explain opcode is added to the VM, its address is returned. -** Otherwise, if no OP_Explain is coded, zero is returned. +** This function sets the P4 value of an existing OP_Explain opcode to +** text describing the loop in pLevel. If the OP_Explain opcode already has +** a P4 value, it is freed before it is overwritten. */ -SQLITE_PRIVATE int sqlite3WhereExplainOneScan( +SQLITE_PRIVATE void sqlite3WhereAddExplainText( Parse *pParse, /* Parse context */ + int addr, /* Address of OP_Explain opcode */ SrcList *pTabList, /* Table list this loop refers to */ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ - int ret = 0; #if !defined(SQLITE_DEBUG) if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { + VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe, addr); + SrcItem *pItem = &pTabList->a[pLevel->iFrom]; - Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ WhereLoop *pLoop; /* The controlling WhereLoop object */ u32 flags; /* Flags that describe this loop */ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) char *zMsg; /* Text to add to EQP output */ +#endif StrAccum str; /* EQP output string */ char zBuf[100]; /* Initial space for EQP output string */ + if( db->mallocFailed ) return; + pLoop = pLevel->pWLoop; flags = pLoop->wsFlags; - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0; isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) @@ -157569,7 +159759,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( assert( pLoop->u.btree.pIndex!=0 ); pIdx = pLoop->u.btree.pIndex; assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); - if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ + if( !HasRowid(pItem->pSTab) && IsPrimaryKeyIndex(pIdx) ){ if( isSearch ){ zFmt = "PRIMARY KEY"; } @@ -157577,7 +159767,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( zFmt = "AUTOMATIC PARTIAL COVERING INDEX"; }else if( flags & WHERE_AUTO_INDEX ){ zFmt = "AUTOMATIC COVERING INDEX"; - }else if( flags & WHERE_IDX_ONLY ){ + }else if( flags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ zFmt = "COVERING INDEX %s"; }else{ zFmt = "INDEX %s"; @@ -157612,7 +159802,9 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( (flags & WHERE_VIRTUALTABLE)!=0 ){ - sqlite3_str_appendf(&str, " VIRTUAL TABLE INDEX %d:%s", + sqlite3_str_appendall(&str, " VIRTUAL TABLE INDEX "); + sqlite3_str_appendf(&str, + pLoop->u.vtab.bIdxNumHex ? "0x%x:%s" : "%d:%s", pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr); } #endif @@ -157627,10 +159819,50 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( sqlite3_str_append(&str, " (~1 row)", 9); } #endif +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) zMsg = sqlite3StrAccumFinish(&str); sqlite3ExplainBreakpoint("",zMsg); - ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), - pParse->addrExplain, 0, zMsg,P4_DYNAMIC); +#endif + + assert( pOp->opcode==OP_Explain ); + assert( pOp->p4type==P4_DYNAMIC || pOp->p4.z==0 ); + sqlite3DbFree(db, pOp->p4.z); + pOp->p4type = P4_DYNAMIC; + pOp->p4.z = sqlite3StrAccumFinish(&str); + } +} + + +/* +** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. +** +** If an OP_Explain opcode is added to the VM, its address is returned. +** Otherwise, if no OP_Explain is coded, zero is returned. +*/ +SQLITE_PRIVATE int sqlite3WhereExplainOneScan( + Parse *pParse, /* Parse context */ + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +){ + int ret = 0; +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) +#endif + { + if( (pLevel->pWLoop->wsFlags & WHERE_MULTI_OR)==0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + ){ + Vdbe *v = pParse->pVdbe; + int addr = sqlite3VdbeCurrentAddr(v); + ret = sqlite3VdbeAddOp3( + v, OP_Explain, addr, pParse->addrExplain, pLevel->pWLoop->rRun + ); + sqlite3WhereAddExplainText(pParse, addr, pTabList, pLevel, wctrlFlags); + } } return ret; } @@ -157665,7 +159897,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( sqlite3_str_appendf(&str, "BLOOM FILTER ON %S (", pItem); pLoop = pLevel->pWLoop; if( pLoop->wsFlags & WHERE_IPK ){ - const Table *pTab = pItem->pTab; + const Table *pTab = pItem->pSTab; if( pTab->iPKey>=0 ){ sqlite3_str_appendf(&str, "%s=?", pTab->aCol[pTab->iPKey].zCnName); }else{ @@ -157728,8 +159960,11 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); } }else{ - int addr = pSrclist->a[pLvl->iFrom].addrFillSub; - VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1); + int addr; + VdbeOp *pOp; + assert( pSrclist->a[pLvl->iFrom].fg.isSubquery ); + addr = pSrclist->a[pLvl->iFrom].u4.pSubq->addrFillSub; + pOp = sqlite3VdbeGetOp(v, addr-1); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine ); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr ); sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1); @@ -157872,11 +160107,44 @@ static void updateRangeAffinityStr( } } +/* +** The pOrderBy->a[].u.x.iOrderByCol values might be incorrect because +** columns might have been rearranged in the result set. This routine +** fixes them up. +** +** pEList is the new result set. The pEList->a[].u.x.iOrderByCol values +** contain the *old* locations of each expression. This is a temporary +** use of u.x.iOrderByCol, not its intended use. The caller must reset +** u.x.iOrderByCol back to zero for all entries in pEList before the +** caller returns. +** +** This routine changes pOrderBy->a[].u.x.iOrderByCol values from +** pEList->a[N].u.x.iOrderByCol into N+1. (The "+1" is because of the 1-based +** indexing used by iOrderByCol.) Or if no match, iOrderByCol is set to zero. +*/ +static void adjustOrderByCol(ExprList *pOrderBy, ExprList *pEList){ + int i, j; + if( pOrderBy==0 ) return; + for(i=0; inExpr; i++){ + int t = pOrderBy->a[i].u.x.iOrderByCol; + if( t==0 ) continue; + for(j=0; jnExpr; j++){ + if( pEList->a[j].u.x.iOrderByCol==t ){ + pOrderBy->a[i].u.x.iOrderByCol = j+1; + break; + } + } + if( j>=pEList->nExpr ){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } + } +} + /* ** pX is an expression of the form: (vector) IN (SELECT ...) ** In other words, it is a vector IN operator with a SELECT clause on the -** LHS. But not all terms in the vector are indexable and the terms might +** RHS. But not all terms in the vector are indexable and the terms might ** not be in the correct order for indexing. ** ** This routine makes a copy of the input pX expression and then adjusts @@ -157932,9 +160200,12 @@ static Expr *removeUnindexableInClauseTerms( int iField; assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); iField = pLoop->aLTerm[i]->u.x.iField - 1; - if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ + if( NEVER(pOrigRhs->a[iField].pExpr==0) ){ + continue; /* Duplicate PK column */ + } pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; + if( pRhs ) pRhs->a[pRhs->nExpr-1].u.x.iOrderByCol = iField+1; if( pOrigLhs ){ assert( pOrigLhs->a[iField].pExpr!=0 ); pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr); @@ -157948,6 +160219,7 @@ static Expr *removeUnindexableInClauseTerms( pNew->pLeft->x.pList = pLhs; } pSelect->pEList = pRhs; + pSelect->selId = ++pParse->nSelect; /* Req'd for SubrtnSig validity */ if( pLhs && pLhs->nExpr==1 ){ /* Take care here not to generate a TK_VECTOR containing only a ** single value. Since the parser never creates such a vector, some @@ -157957,18 +160229,16 @@ static Expr *removeUnindexableInClauseTerms( sqlite3ExprDelete(db, pNew->pLeft); pNew->pLeft = p; } - if( pSelect->pOrderBy ){ - /* If the SELECT statement has an ORDER BY clause, zero the - ** iOrderByCol variables. These are set to non-zero when an - ** ORDER BY term exactly matches one of the terms of the - ** result-set. Since the result-set of the SELECT statement may - ** have been modified or reordered, these variables are no longer - ** set correctly. Since setting them is just an optimization, - ** it's easiest just to zero them here. */ - ExprList *pOrderBy = pSelect->pOrderBy; - for(i=0; inExpr; i++){ - pOrderBy->a[i].u.x.iOrderByCol = 0; - } + + /* If either the ORDER BY clause or the GROUP BY clause contains + ** references to result-set columns, those references might now be + ** obsolete. So fix them up. + */ + assert( pRhs!=0 || db->mallocFailed ); + if( pRhs ){ + adjustOrderByCol(pSelect->pOrderBy, pRhs); + adjustOrderByCol(pSelect->pGroupBy, pRhs); + for(i=0; inExpr; i++) pRhs->a[i].u.x.iOrderByCol = 0; } #if 0 @@ -157983,6 +160253,138 @@ static Expr *removeUnindexableInClauseTerms( } +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Generate code for a single X IN (....) term of the WHERE clause. +** +** This is a special-case of codeEqualityTerm() that works for IN operators +** only. It is broken out into a subroutine because this case is +** uncommon and by splitting it off into a subroutine, the common case +** runs faster. +** +** The current value for the constraint is left in register iTarget. +** This routine sets up a loop that will iterate over all values of X. +*/ +static SQLITE_NOINLINE void codeINTerm( + Parse *pParse, /* The parsing context */ + WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ + WhereLevel *pLevel, /* The level of the FROM clause we are working on */ + int iEq, /* Index of the equality term within this level */ + int bRev, /* True for reverse-order IN operations */ + int iTarget /* Attempt to leave results in this register */ +){ + Expr *pX = pTerm->pExpr; + int eType = IN_INDEX_NOOP; + int iTab; + struct InLoop *pIn; + WhereLoop *pLoop = pLevel->pWLoop; + Vdbe *v = pParse->pVdbe; + int i; + int nEq = 0; + int *aiMap = 0; + + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 + && pLoop->u.btree.pIndex!=0 + && pLoop->u.btree.pIndex->aSortOrder[iEq] + ){ + testcase( iEq==0 ); + testcase( bRev ); + bRev = !bRev; + } + assert( pX->op==TK_IN ); + + for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ + disableTerm(pLevel, pTerm); + return; + } + } + for(i=iEq; inLTerm; i++){ + assert( pLoop->aLTerm[i]!=0 ); + if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; + } + + iTab = 0; + if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); + }else{ + sqlite3 *db = pParse->db; + Expr *pXMod = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); + if( !db->mallocFailed ){ + aiMap = (int*)sqlite3DbMallocZero(db, sizeof(int)*nEq); + eType = sqlite3FindInIndex(pParse, pXMod, IN_INDEX_LOOP, 0, aiMap, &iTab); + } + sqlite3ExprDelete(db, pXMod); + } + + if( eType==IN_INDEX_INDEX_DESC ){ + testcase( bRev ); + bRev = !bRev; + } + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); + VdbeCoverageIf(v, bRev); + VdbeCoverageIf(v, !bRev); + + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + pLoop->wsFlags |= WHERE_IN_ABLE; + if( pLevel->u.in.nIn==0 ){ + pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); + } + if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ + pLoop->wsFlags |= WHERE_IN_EARLYOUT; + } + + i = pLevel->u.in.nIn; + pLevel->u.in.nIn += nEq; + pLevel->u.in.aInLoop = + sqlite3WhereRealloc(pTerm->pWC->pWInfo, + pLevel->u.in.aInLoop, + sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); + pIn = pLevel->u.in.aInLoop; + if( pIn ){ + int iMap = 0; /* Index in aiMap[] */ + pIn += i; + for(i=iEq; inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iOut = iTarget + i - iEq; + if( eType==IN_INDEX_ROWID ){ + pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); + }else{ + int iCol = aiMap ? aiMap[iMap++] : 0; + pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); + } + sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); + if( i==iEq ){ + pIn->iCur = iTab; + pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; + if( iEq>0 ){ + pIn->iBase = iTarget - i; + pIn->nPrefix = i; + }else{ + pIn->nPrefix = 0; + } + }else{ + pIn->eEndLoopOp = OP_Noop; + } + pIn++; + } + } + testcase( iEq>0 + && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 + && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); + if( iEq>0 + && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 + ){ + sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); + } + }else{ + pLevel->u.in.nIn = 0; + } + sqlite3DbFree(pParse->db, aiMap); +} +#endif + + /* ** Generate code for a single equality term of the WHERE clause. An equality ** term can be either X=expr or X IN (...). pTerm is the term to be @@ -158007,7 +160409,6 @@ static int codeEqualityTerm( int iTarget /* Attempt to leave results in this register */ ){ Expr *pX = pTerm->pExpr; - Vdbe *v = pParse->pVdbe; int iReg; /* Register holding results */ assert( pLevel->pWLoop->aLTerm[iEq]==pTerm ); @@ -158016,125 +160417,12 @@ static int codeEqualityTerm( iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); }else if( pX->op==TK_ISNULL ){ iReg = iTarget; - sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_Null, 0, iReg); #ifndef SQLITE_OMIT_SUBQUERY }else{ - int eType = IN_INDEX_NOOP; - int iTab; - struct InLoop *pIn; - WhereLoop *pLoop = pLevel->pWLoop; - int i; - int nEq = 0; - int *aiMap = 0; - - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 - && pLoop->u.btree.pIndex!=0 - && pLoop->u.btree.pIndex->aSortOrder[iEq] - ){ - testcase( iEq==0 ); - testcase( bRev ); - bRev = !bRev; - } assert( pX->op==TK_IN ); iReg = iTarget; - - for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ - disableTerm(pLevel, pTerm); - return iTarget; - } - } - for(i=iEq;inLTerm; i++){ - assert( pLoop->aLTerm[i]!=0 ); - if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; - } - - iTab = 0; - if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); - }else{ - Expr *pExpr = pTerm->pExpr; - if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ - sqlite3 *db = pParse->db; - pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); - if( !db->mallocFailed ){ - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); - pExpr->iTable = iTab; - } - sqlite3ExprDelete(db, pX); - }else{ - int n = sqlite3ExprVectorSize(pX->pLeft); - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); - } - pX = pExpr; - } - - if( eType==IN_INDEX_INDEX_DESC ){ - testcase( bRev ); - bRev = !bRev; - } - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); - VdbeCoverageIf(v, bRev); - VdbeCoverageIf(v, !bRev); - - assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); - pLoop->wsFlags |= WHERE_IN_ABLE; - if( pLevel->u.in.nIn==0 ){ - pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); - } - if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ - pLoop->wsFlags |= WHERE_IN_EARLYOUT; - } - - i = pLevel->u.in.nIn; - pLevel->u.in.nIn += nEq; - pLevel->u.in.aInLoop = - sqlite3WhereRealloc(pTerm->pWC->pWInfo, - pLevel->u.in.aInLoop, - sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); - pIn = pLevel->u.in.aInLoop; - if( pIn ){ - int iMap = 0; /* Index in aiMap[] */ - pIn += i; - for(i=iEq;inLTerm; i++){ - if( pLoop->aLTerm[i]->pExpr==pX ){ - int iOut = iReg + i - iEq; - if( eType==IN_INDEX_ROWID ){ - pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); - }else{ - int iCol = aiMap ? aiMap[iMap++] : 0; - pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); - } - sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); - if( i==iEq ){ - pIn->iCur = iTab; - pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; - if( iEq>0 ){ - pIn->iBase = iReg - i; - pIn->nPrefix = i; - }else{ - pIn->nPrefix = 0; - } - }else{ - pIn->eEndLoopOp = OP_Noop; - } - pIn++; - } - } - testcase( iEq>0 - && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 - && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); - if( iEq>0 - && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 - ){ - sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); - } - }else{ - pLevel->u.in.nIn = 0; - } - sqlite3DbFree(pParse->db, aiMap); + codeINTerm(pParse, pTerm, pLevel, iEq, bRev, iTarget); #endif } @@ -158806,7 +161094,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iCur = pTabItem->iCursor; pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; - VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); + VdbeModuleComment((v, "Begin WHERE-loop%d: %s", + iLevel, pTabItem->pSTab->zName)); #if WHERETRACE_ENABLED /* 0x4001 */ if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n", @@ -158861,11 +161150,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* Special case of a FROM clause subquery implemented as a co-routine */ if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + int regYield; + Subquery *pSubq; + assert( pTabItem->fg.isSubquery && pTabItem->u4.pSubq!=0 ); + pSubq = pTabItem->u4.pSubq; + regYield = pSubq->regReturn; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pTabItem->pSTab->zName)); pLevel->op = OP_Goto; }else @@ -158910,6 +161203,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); + /* The instruction immediately prior to OP_VFilter must be an OP_Integer + ** that sets the "argc" value for xVFilter. This is necessary for + ** resolveP2() to work correctly. See tag-20250207a. */ sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, pLoop->u.vtab.idxStr, pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); @@ -159500,12 +161796,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iLeftJoin==0 ){ /* If a partial index is driving the loop, try to eliminate WHERE clause ** terms from the query that must be true due to the WHERE clause of - ** the partial index. + ** the partial index. This optimization does not work on an outer join, + ** as shown by: ** - ** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work - ** for a LEFT JOIN. + ** 2019-11-02 ticket 623eff57e76d45f6 (LEFT JOIN) + ** 2025-05-29 forum post 7dee41d32506c4ae (RIGHT JOIN) */ - if( pIdx->pPartIdxWhere ){ + if( pIdx->pPartIdxWhere && pLevel->pRJ==0 ){ whereApplyPartialIndexConstraints(pIdx->pPartIdxWhere, iCur, pWC); } }else{ @@ -159594,7 +161891,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int untestedTerms = 0; /* Some terms not completely tested */ int ii; /* Loop counter */ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; pTerm = pLoop->aLTerm[0]; assert( pTerm!=0 ); @@ -159612,8 +161909,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int nNotReady; /* The number of notReady tables */ SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3DbMallocRawNN(db, - sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); + pOrTab = sqlite3DbMallocRawNN(db, SZ_SRCLIST(nNotReady+1)); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); pOrTab->nSrc = pOrTab->nAlloc; @@ -159664,7 +161960,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** ** This optimization also only applies if the (x1 OR x2 OR ...) term ** is not contained in the ON clause of a LEFT JOIN. - ** See ticket http://www.sqlite.org/src/info/f2369304e4 + ** See ticket http://sqlite.org/src/info/f2369304e4 ** ** 2022-02-04: Do not push down slices of a row-value comparison. ** In other words, "w" or "y" may not be a slice of a vector. Otherwise, @@ -160053,7 +162349,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** least once. This is accomplished by storing the PK for the row in ** both the iMatch index and the regBloom Bloom filter. */ - pTab = pWInfo->pTabList->a[pLevel->iFrom].pTab; + pTab = pWInfo->pTabList->a[pLevel->iFrom].pSTab; if( HasRowid(pTab) ){ r = sqlite3GetTempRange(pParse, 2); sqlite3ExprCodeGetColumnOfTable(v, pTab, pLevel->iTabCur, -1, r+1); @@ -160156,11 +162452,12 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( WhereInfo *pSubWInfo; WhereLoop *pLoop = pLevel->pWLoop; SrcItem *pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; Bitmask mAll = 0; int k; - ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pTab->zName)); + ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pSTab->zName)); sqlite3VdbeNoJumpsOutsideSubrtn(v, pRJ->addrSubrtn, pRJ->endSubrtn, pRJ->regReturn); for(k=0; kpTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; if( pRight->fg.viaCoroutine ){ + Subquery *pSubq; + assert( pRight->fg.isSubquery && pRight->u4.pSubq!=0 ); + pSubq = pRight->u4.pSubq; + assert( pSubq->pSelect!=0 && pSubq->pSelect->pEList!=0 ); sqlite3VdbeAddOp3( - v, OP_Null, 0, pRight->regResult, - pRight->regResult + pRight->pSelect->pEList->nExpr-1 + v, OP_Null, 0, pSubq->regResult, + pSubq->regResult + pSubq->pSelect->pEList->nExpr-1 ); } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); @@ -160196,13 +162497,14 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( sqlite3ExprDup(pParse->db, pTerm->pExpr, 0)); } } - sFrom.nSrc = 1; - sFrom.nAlloc = 1; - memcpy(&sFrom.a[0], pTabItem, sizeof(SrcItem)); - sFrom.a[0].fg.jointype = 0; + pFrom = (SrcList*)fromSpace; + pFrom->nSrc = 1; + pFrom->nAlloc = 1; + memcpy(&pFrom->a[0], pTabItem, sizeof(SrcItem)); + pFrom->a[0].fg.jointype = 0; assert( pParse->withinRJSubrtn < 100 ); pParse->withinRJSubrtn++; - pSubWInfo = sqlite3WhereBegin(pParse, &sFrom, pSubWhere, 0, 0, 0, + pSubWInfo = sqlite3WhereBegin(pParse, pFrom, pSubWhere, 0, 0, 0, WHERE_RIGHT_JOIN, 0); if( pSubWInfo ){ int iCur = pLevel->iTabCur; @@ -160210,7 +162512,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( int nPk; int jmp; int addrCont = sqlite3WhereContinueLabel(pSubWInfo); - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; if( HasRowid(pTab) ){ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, r); nPk = 1; @@ -160343,7 +162645,12 @@ static int allowedOp(int op){ assert( TK_LT>TK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS; + assert( TK_INTK_GE ) return 0; + if( op>=TK_EQ ) return 1; + return op==TK_IN || op==TK_ISNULL || op==TK_IS; } /* @@ -160376,15 +162683,16 @@ static u16 exprCommute(Parse *pParse, Expr *pExpr){ static u16 operatorMask(int op){ u16 c; assert( allowedOp(op) ); - if( op==TK_IN ){ + if( op>=TK_EQ ){ + assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); + c = (u16)(WO_EQ<<(op-TK_EQ)); + }else if( op==TK_IN ){ c = WO_IN; }else if( op==TK_ISNULL ){ c = WO_ISNULL; - }else if( op==TK_IS ){ - c = WO_IS; }else{ - assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); - c = (u16)(WO_EQ<<(op-TK_EQ)); + assert( op==TK_IS ); + c = WO_IS; } assert( op!=TK_ISNULL || c==WO_ISNULL ); assert( op!=TK_IN || c==WO_IN ); @@ -160455,12 +162763,28 @@ static int isLikeOrGlob( z = (u8*)pRight->u.zToken; } if( z ){ - - /* Count the number of prefix characters prior to the first wildcard */ + /* Count the number of prefix bytes prior to the first wildcard, + ** U+fffd character, or malformed utf-8. If the underlying database + ** has a UTF16LE encoding, then only consider ASCII characters. Note that + ** the encoding of z[] is UTF8 - we are dealing with only UTF8 here in this + ** code, but the database engine itself might be processing content using a + ** different encoding. */ cnt = 0; while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ cnt++; - if( c==wc[3] && z[cnt]!=0 ) cnt++; + if( c==wc[3] && z[cnt]>0 && z[cnt]<0x80 ){ + cnt++; + }else if( c>=0x80 ){ + const u8 *z2 = z+cnt-1; + if( c==0xff || sqlite3Utf8Read(&z2)==0xfffd /* bad utf-8 */ + || ENC(db)==SQLITE_UTF16LE + ){ + cnt--; + break; + }else{ + cnt = (int)(z2-z); + } + } } /* The optimization is possible only if (1) the pattern does not begin @@ -160471,11 +162795,11 @@ static int isLikeOrGlob( ** range search. The third is because the caller assumes that the pattern ** consists of at least one character after all escapes have been ** removed. */ - if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){ + if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && ALWAYS(255!=(u8)z[cnt-1]) ){ Expr *pPrefix; /* A "complete" match if the pattern ends with "*" or "%" */ - *pisComplete = c==wc[0] && z[cnt+1]==0; + *pisComplete = c==wc[0] && z[cnt+1]==0 && ENC(db)!=SQLITE_UTF16LE; /* Get the pattern prefix. Remove all escapes from the prefix. */ pPrefix = sqlite3Expr(db, TK_STRING, (char*)z); @@ -160671,6 +162995,13 @@ static int isAuxiliaryVtabOperator( } } } + }else if( pExpr->op>=TK_EQ ){ + /* Comparison operators are a common case. Save a few comparisons for + ** that common case by terminating early. */ + assert( TK_NE < TK_EQ ); + assert( TK_ISNOT < TK_EQ ); + assert( TK_NOTNULL < TK_EQ ); + return 0; }else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){ int res = 0; Expr *pLeft = pExpr->pLeft; @@ -161144,30 +163475,42 @@ static void exprAnalyzeOrTerm( ** 1. The SQLITE_Transitive optimization must be enabled ** 2. Must be either an == or an IS operator ** 3. Not originating in the ON clause of an OUTER JOIN -** 4. The affinities of A and B must be compatible -** 5a. Both operands use the same collating sequence OR -** 5b. The overall collating sequence is BINARY +** 4. The operator is not IS or else the query does not contain RIGHT JOIN +** 5. The affinities of A and B must be compatible +** 6a. Both operands use the same collating sequence OR +** 6b. The overall collating sequence is BINARY ** If this routine returns TRUE, that means that the RHS can be substituted ** for the LHS anyplace else in the WHERE clause where the LHS column occurs. ** This is an optimization. No harm comes from returning 0. But if 1 is ** returned when it should not be, then incorrect answers might result. */ -static int termIsEquivalence(Parse *pParse, Expr *pExpr){ +static int termIsEquivalence(Parse *pParse, Expr *pExpr, SrcList *pSrc){ char aff1, aff2; CollSeq *pColl; - if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; - if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; - if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; + if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; /* (1) */ + if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; /* (2) */ + if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* (3) */ + assert( pSrc!=0 ); + if( pExpr->op==TK_IS + && pSrc->nSrc + && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 + ){ + return 0; /* (4) */ + } aff1 = sqlite3ExprAffinity(pExpr->pLeft); aff2 = sqlite3ExprAffinity(pExpr->pRight); if( aff1!=aff2 && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2)) ){ - return 0; + return 0; /* (5) */ } pColl = sqlite3ExprCompareCollSeq(pParse, pExpr); - if( sqlite3IsBinary(pColl) ) return 1; - return sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight); + if( !sqlite3IsBinary(pColl) + && !sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight) + ){ + return 0; /* (6) */ + } + return 1; } /* @@ -161187,7 +163530,9 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ if( ALWAYS(pSrc!=0) ){ int i; for(i=0; inSrc; i++){ - mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); + if( pSrc->a[i].fg.isSubquery ){ + mask |= exprSelectUsage(pMaskSet, pSrc->a[i].u4.pSubq->pSelect); + } if( pSrc->a[i].fg.isUsing==0 ){ mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].u3.pOn); } @@ -161225,7 +163570,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( int iCur; do{ iCur = pFrom->a[j].iCursor; - for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[j].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr==0 ) continue; for(i=0; inKeyCol; i++){ if( pIdx->aiColumn[i]!=XN_EXPR ) continue; @@ -161269,7 +163614,7 @@ static int exprMightBeIndexed( for(i=0; inSrc; i++){ Index *pIdx; - for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[i].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr ){ return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i); } @@ -161430,8 +163775,8 @@ static void exprAnalyze( if( op==TK_IS ) pNew->wtFlags |= TERM_IS; pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; - - if( termIsEquivalence(pParse, pDup) ){ + assert( pWInfo->pTabList!=0 ); + if( termIsEquivalence(pParse, pDup, pWInfo->pTabList) ){ pTerm->eOperator |= WO_EQUIV; eExtraOp = WO_EQUIV; } @@ -161597,9 +163942,8 @@ static void exprAnalyze( } if( !db->mallocFailed ){ - u8 c, *pC; /* Last character before the first wildcard */ + u8 *pC; /* Last character before the first wildcard */ pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; - c = *pC; if( noCase ){ /* The point is to increment the last character before the first ** wildcard. But if we increment '@', that will push it into the @@ -161607,10 +163951,17 @@ static void exprAnalyze( ** inequality. To avoid this, make sure to also run the full ** LIKE on all candidate expressions by clearing the isComplete flag */ - if( c=='A'-1 ) isComplete = 0; - c = sqlite3UpperToLower[c]; + if( *pC=='A'-1 ) isComplete = 0; + *pC = sqlite3UpperToLower[*pC]; } - *pC = c + 1; + + /* Increment the value of the last utf8 character in the prefix. */ + while( *pC==0xBF && pC>(u8*)pStr2->u.zToken ){ + *pC = 0x80; + pC--; + } + assert( *pC!=0xFF ); /* isLikeOrGlob() guarantees this */ + (*pC)++; } zCollSeqName = noCase ? "NOCASE" : sqlite3StrBINARY; pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); @@ -161812,7 +164163,7 @@ static void whereAddLimitExpr( Expr *pNew; int iVal = 0; - if( sqlite3ExprIsInteger(pExpr, &iVal) && iVal>=0 ){ + if( sqlite3ExprIsInteger(pExpr, &iVal, pParse) && iVal>=0 ){ Expr *pVal = sqlite3Expr(db, TK_INTEGER, 0); if( pVal==0 ) return; ExprSetProperty(pVal, EP_IntValue); @@ -161857,7 +164208,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */ if( p->pGroupBy==0 && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ - && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ + && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pSTab)) /* 3 */ ){ ExprList *pOrderBy = p->pOrderBy; int iCsr = p->pSrc->a[0].iCursor; @@ -162078,7 +164429,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( Expr *pColRef; Expr *pTerm; if( pItem->fg.isTabFunc==0 ) return; - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); pArgs = pItem->u1.pFuncArg; if( pArgs==0 ) return; @@ -162153,11 +164504,16 @@ struct HiddenIndexInfo { int eDistinct; /* Value to return from sqlite3_vtab_distinct() */ u32 mIn; /* Mask of terms that are IN (...) */ u32 mHandleIn; /* Terms that vtab will handle as IN (...) */ - sqlite3_value *aRhs[1]; /* RHS values for constraints. MUST BE LAST - ** because extra space is allocated to hold up - ** to nTerm such values */ + sqlite3_value *aRhs[FLEXARRAY]; /* RHS values for constraints. MUST BE LAST + ** Extra space is allocated to hold up + ** to nTerm such values */ }; +/* Size (in bytes) of a HiddenIndeInfo object sufficient to hold as +** many as N constraints */ +#define SZ_HIDDENINDEXINFO(N) \ + (offsetof(HiddenIndexInfo,aRhs) + (N)*sizeof(sqlite3_value*)) + /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); @@ -162762,7 +165118,7 @@ static int isDistinctRedundant( ** clause is redundant. */ if( pTabList->nSrc!=1 ) return 0; iBase = pTabList->a[0].iCursor; - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; /* If any of the expressions is an IPK column on table iBase, then return ** true. Note: The (p->iTable==iBase) part of this test may be false if the @@ -162837,6 +165193,12 @@ static void translateColumnToCopy( VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart); int iEnd = sqlite3VdbeCurrentAddr(v); if( pParse->db->mallocFailed ) return; +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("CHECKING for column-to-copy on cursor %d for %d..%d\n", + iTabCur, iStart, iEnd); + } +#endif for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ @@ -162951,13 +165313,52 @@ static int constraintCompatibleWithOuterJoin( return 0; } if( (pSrc->fg.jointype & (JT_LEFT|JT_RIGHT))!=0 - && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && NEVER(ExprHasProperty(pTerm->pExpr, EP_InnerON)) ){ return 0; } return 1; } +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Return true if column iCol of table pTab seem like it might be a +** good column to use as part of a query-time index. +** +** Current algorithm (subject to improvement!): +** +** 1. If iCol is already the left-most column of some other index, +** then return false. +** +** 2. If iCol is part of an existing index that has an aiRowLogEst of +** more than 20, then return false. +** +** 3. If no disqualifying conditions above are found, return true. +** +** 2025-01-03: I experimented with a new rule that returns false if the +** the datatype of the column is "BOOLEAN". This did not improve +** performance on any queries at hand, but it did burn CPU cycles, so the +** idea was not committed. +*/ +static SQLITE_NOINLINE int columnIsGoodIndexCandidate( + const Table *pTab, + int iCol +){ + const Index *pIdx; + for(pIdx = pTab->pIndex; pIdx!=0; pIdx=pIdx->pNext){ + int j; + for(j=0; jnKeyCol; j++){ + if( pIdx->aiColumn[j]==iCol ){ + if( j==0 ) return 0; + if( pIdx->hasStat1 && pIdx->aiRowLogEst[j+1]>20 ) return 0; + break; + } + } + } + return 1; +} +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + #ifndef SQLITE_OMIT_AUTOMATIC_INDEX @@ -162972,6 +165373,8 @@ static int termCanDriveIndex( const Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; + int leftCol; + if( pTerm->leftCursor!=pSrc->iCursor ) return 0; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; assert( (pSrc->fg.jointype & JT_RIGHT)==0 ); @@ -162982,11 +165385,12 @@ static int termCanDriveIndex( } if( (pTerm->prereqRight & notReady)!=0 ) return 0; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - if( pTerm->u.x.leftColumn<0 ) return 0; - aff = pSrc->pTab->aCol[pTerm->u.x.leftColumn].affinity; + leftCol = pTerm->u.x.leftColumn; + if( leftCol<0 ) return 0; + aff = pSrc->pSTab->aCol[leftCol].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; testcase( pTerm->pExpr->op==TK_IS ); - return 1; + return columnIsGoodIndexCandidate(pSrc->pSTab, leftCol); } #endif @@ -163019,7 +165423,7 @@ static void explainAutomaticIndex( sqlite3_str *pStr = sqlite3_str_new(pParse->db); sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName); assert( pIdx->nColumn>1 ); - assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID || !HasRowid(pTab) ); for(ii=0; ii<(pIdx->nColumn-1); ii++){ const char *zName = 0; int iCol = pIdx->aiColumn[ii]; @@ -163094,7 +165498,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( nKeyCol = 0; pTabList = pWC->pWInfo->pTabList; pSrc = &pTabList->a[pLevel->iFrom]; - pTable = pSrc->pTab; + pTable = pSrc->pSTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; idxCols = 0; @@ -163150,6 +165554,19 @@ static SQLITE_NOINLINE void constructAutomaticIndex( }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } + if( !HasRowid(pTable) ){ + /* For WITHOUT ROWID tables, ensure that all PRIMARY KEY columns are + ** either in the idxCols mask or in the extraCols mask */ + for(i=0; inCol; i++){ + if( (pTable->aCol[i].colFlags & COLFLAG_PRIMKEY)==0 ) continue; + if( i>=BMS-1 ){ + extraCols |= MASKBIT(BMS-1); + break; + } + if( idxCols & MASKBIT(i) ) continue; + extraCols |= MASKBIT(i); + } + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); @@ -163161,7 +165578,10 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } /* Construct the Index object to describe this index */ - pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); + assert( nKeyCol <= pTable->nCol + MAX(0, pTable->nCol - BMS + 1) ); + /* ^-- This guarantees that the number of index columns will fit in the u16 */ + pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+HasRowid(pTable), + 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; pLoop->u.btree.pIndex = pIdx; pIdx->zName = "auto-index"; @@ -163217,8 +165637,10 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } } assert( n==nKeyCol ); - pIdx->aiColumn[n] = XN_ROWID; - pIdx->azColl[n] = sqlite3StrBINARY; + if( HasRowid(pTable) ){ + pIdx->aiColumn[n] = XN_ROWID; + pIdx->azColl[n] = sqlite3StrBINARY; + } /* Create the automatic index */ explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp); @@ -163236,12 +165658,17 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Fill the automatic index with content */ assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); if( pSrc->fg.viaCoroutine ){ - int regYield = pSrc->regReturn; + int regYield; + Subquery *pSubq; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + assert( pSubq!=0 ); + regYield = pSubq->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pSrc->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pSTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } @@ -163263,11 +165690,12 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); if( pSrc->fg.viaCoroutine ){ + assert( pSrc->fg.isSubquery && pSrc->u4.pSubq!=0 ); sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pSrc->regResult, pLevel->iIdxCur); + pSrc->u4.pSubq->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); pSrc->fg.viaCoroutine = 0; }else{ @@ -163358,7 +165786,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( iSrc = pLevel->iFrom; pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); sz = sqlite3LogEstToInt(pTab->nRowLogEst); if( sz<10000 ){ @@ -163389,7 +165817,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( int r1 = sqlite3GetTempRange(pParse, n); int jj; for(jj=0; jjpTable==pItem->pTab ); + assert( pIdx->pTable==pItem->pSTab ); sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj); } sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); @@ -163427,6 +165855,20 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( #ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Return term iTerm of the WhereClause passed as the first argument. Terms +** are numbered from 0 upwards, starting with the terms in pWC->a[], then +** those in pWC->pOuter->a[] (if any), and so on. +*/ +static WhereTerm *termFromWhereClause(WhereClause *pWC, int iTerm){ + WhereClause *p; + for(p=pWC; p; p=p->pOuter){ + if( iTermnTerm ) return &p->a[iTerm]; + iTerm -= p->nTerm; + } + return 0; +} + /* ** Allocate and populate an sqlite3_index_info structure. It is the ** responsibility of the caller to eventually release the structure @@ -163453,9 +165895,10 @@ static sqlite3_index_info *allocateIndexInfo( const Table *pTab; int eDistinct = 0; ExprList *pOrderBy = pWInfo->pOrderBy; + WhereClause *p; assert( pSrc!=0 ); - pTab = pSrc->pTab; + pTab = pSrc->pSTab; assert( pTab!=0 ); assert( IsVirtual(pTab) ); @@ -163463,28 +165906,30 @@ static sqlite3_index_info *allocateIndexInfo( ** Mark each term with the TERM_OK flag. Set nTerm to the number of ** terms found. */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - pTerm->wtFlags &= ~TERM_OK; - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->prereqRight & mUnusable ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_IS ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; + for(p=pWC, nTerm=0; p; p=p->pOuter){ + for(i=0, pTerm=p->a; inTerm; i++, pTerm++){ + pTerm->wtFlags &= ~TERM_OK; + if( pTerm->leftCursor != pSrc->iCursor ) continue; + if( pTerm->prereqRight & mUnusable ) continue; + assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); + testcase( pTerm->eOperator & WO_IN ); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_IS ); + testcase( pTerm->eOperator & WO_ALL ); + if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; + if( pTerm->wtFlags & TERM_VNULL ) continue; - assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - assert( pTerm->u.x.leftColumn>=XN_ROWID ); - assert( pTerm->u.x.leftColumnnCol ); - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 - && !constraintCompatibleWithOuterJoin(pTerm,pSrc) - ){ - continue; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + assert( pTerm->u.x.leftColumn>=XN_ROWID ); + assert( pTerm->u.x.leftColumnnCol ); + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + continue; + } + nTerm++; + pTerm->wtFlags |= TERM_OK; } - nTerm++; - pTerm->wtFlags |= TERM_OK; } /* If the ORDER BY clause contains only columns in the current @@ -163546,8 +165991,8 @@ static sqlite3_index_info *allocateIndexInfo( */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) - + sizeof(sqlite3_value*)*nTerm ); + + sizeof(*pIdxOrderBy)*nOrderBy + + SZ_HIDDENINDEXINFO(nTerm) ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; @@ -163559,53 +166004,69 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->aConstraint = pIdxCons; pIdxInfo->aOrderBy = pIdxOrderBy; pIdxInfo->aConstraintUsage = pUsage; + pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; + if( HasRowid(pTab)==0 ){ + /* Ensure that all bits associated with PK columns are set. This is to + ** ensure they are available for cases like RIGHT joins or OR loops. */ + Index *pPk = sqlite3PrimaryKeyIndex((Table*)pTab); + assert( pPk!=0 ); + for(i=0; inKeyCol; i++){ + int iCol = pPk->aiColumn[i]; + assert( iCol>=0 ); + if( iCol>=BMS-1 ) iCol = BMS-1; + pIdxInfo->colUsed |= MASKBIT(iCol); + } + } pHidden->pWC = pWC; pHidden->pParse = pParse; pHidden->eDistinct = eDistinct; pHidden->mIn = 0; - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - u16 op; - if( (pTerm->wtFlags & TERM_OK)==0 ) continue; - pIdxCons[j].iColumn = pTerm->u.x.leftColumn; - pIdxCons[j].iTermOffset = i; - op = pTerm->eOperator & WO_ALL; - if( op==WO_IN ){ - if( (pTerm->wtFlags & TERM_SLICE)==0 ){ - pHidden->mIn |= SMASKBIT32(j); - } - op = WO_EQ; - } - if( op==WO_AUX ){ - pIdxCons[j].op = pTerm->eMatchOp; - }else if( op & (WO_ISNULL|WO_IS) ){ - if( op==WO_ISNULL ){ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; - }else{ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; - } - }else{ - pIdxCons[j].op = (u8)op; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); - - if( op & (WO_LT|WO_LE|WO_GT|WO_GE) - && sqlite3ExprIsVector(pTerm->pExpr->pRight) - ){ - testcase( j!=i ); - if( j<16 ) mNoOmit |= (1 << j); - if( op==WO_LT ) pIdxCons[j].op = WO_LE; - if( op==WO_GT ) pIdxCons[j].op = WO_GE; + for(p=pWC, i=j=0; p; p=p->pOuter){ + int nLast = i+p->nTerm;; + for(pTerm=p->a; iwtFlags & TERM_OK)==0 ) continue; + pIdxCons[j].iColumn = pTerm->u.x.leftColumn; + pIdxCons[j].iTermOffset = i; + op = pTerm->eOperator & WO_ALL; + if( op==WO_IN ){ + if( (pTerm->wtFlags & TERM_SLICE)==0 ){ + pHidden->mIn |= SMASKBIT32(j); + } + op = WO_EQ; + } + if( op==WO_AUX ){ + pIdxCons[j].op = pTerm->eMatchOp; + }else if( op & (WO_ISNULL|WO_IS) ){ + if( op==WO_ISNULL ){ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; + }else{ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; + } + }else{ + pIdxCons[j].op = (u8)op; + /* The direct assignment in the previous line is possible only because + ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The + ** following asserts verify this fact. */ + assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); + assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); + assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); + assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); + assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); + assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); + + if( op & (WO_LT|WO_LE|WO_GT|WO_GE) + && sqlite3ExprIsVector(pTerm->pExpr->pRight) + ){ + testcase( j!=i ); + if( j<16 ) mNoOmit |= (1 << j); + if( op==WO_LT ) pIdxCons[j].op = WO_LE; + if( op==WO_GT ) pIdxCons[j].op = WO_GE; + } } - } - j++; + j++; + } } assert( j==nTerm ); pIdxInfo->nConstraint = j; @@ -163625,6 +166086,17 @@ static sqlite3_index_info *allocateIndexInfo( return pIdxInfo; } +/* +** Free and zero the sqlite3_index_info.idxStr value if needed. +*/ +static void freeIdxStr(sqlite3_index_info *pIdxInfo){ + if( pIdxInfo->needToFreeIdxStr ){ + sqlite3_free(pIdxInfo->idxStr); + pIdxInfo->idxStr = 0; + pIdxInfo->needToFreeIdxStr = 0; + } +} + /* ** Free an sqlite3_index_info structure allocated by allocateIndexInfo() ** and possibly modified by xBestIndex methods. @@ -163640,6 +166112,7 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ sqlite3ValueFree(pHidden->aRhs[i]); /* IMP: R-14553-25174 */ pHidden->aRhs[i] = 0; } + freeIdxStr(pIdxInfo); sqlite3DbFree(db, pIdxInfo); } @@ -163660,9 +166133,11 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ ** that this is required. */ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ - sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; + sqlite3_vtab *pVtab; + assert( IsVirtual(pTab) ); + pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); @@ -164354,7 +166829,7 @@ static int whereInScanEst( #endif /* SQLITE_ENABLE_STAT4 */ -#ifdef WHERETRACE_ENABLED +#if defined(WHERETRACE_ENABLED) || defined(SQLITE_DEBUG) /* ** Print the content of a WhereTerm object */ @@ -164398,6 +166873,9 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3TreeViewExpr(0, pTerm->pExpr, 0); } } +SQLITE_PRIVATE void sqlite3ShowWhereTerm(WhereTerm *pTerm){ + sqlite3WhereTermPrint(pTerm, 0); +} #endif #ifdef WHERETRACE_ENABLED @@ -164429,17 +166907,19 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ ** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 */ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + WhereInfo *pWInfo; if( pWC ){ - WhereInfo *pWInfo = pWC->pWInfo; + pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+3)/4; SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); sqlite3DebugPrintf(" %12s", pItem->zAlias ? pItem->zAlias : pTab->zName); }else{ + pWInfo = 0; sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); } @@ -164471,7 +166951,12 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause }else{ sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } - sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + if( pWInfo && pWInfo->bStarUsed && p->rStarDelta!=0 ){ + sqlite3DebugPrintf(" cost %d,%d,%d delta=%d\n", + p->rSetup, p->rRun, p->nOut, p->rStarDelta); + }else{ + sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + } if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){ int i; for(i=0; inLTerm; i++){ @@ -164605,7 +167090,7 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ ** and Y has additional constraints that might speed the search that X lacks ** but the cost of running X is not more than the cost of running Y. ** -** In other words, return true if the cost relationwship between X and Y +** In other words, return true if the cost relationship between X and Y ** is inverted and needs to be adjusted. ** ** Case 1: @@ -164991,7 +167476,7 @@ static void whereLoopOutputAdjust( Expr *pRight = pTerm->pExpr->pRight; int k = 0; testcase( pTerm->pExpr->op==TK_IS ); - if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){ + if( sqlite3ExprIsInteger(pRight, &k, 0) && k>=(-1) && k<=1 ){ k = 10; }else{ k = 20; @@ -165143,11 +167628,8 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered || pProbe->bLowQual ){ - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ - opMask &= ~(WO_EQ|WO_IN|WO_IS); - } + if( pProbe->bUnordered ){ + opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); } assert( pNew->u.btree.nEqnColumn ); @@ -165220,6 +167702,7 @@ static int whereLoopAddBtreeIndex( if( ExprUseXSelect(pExpr) ){ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ int i; + int bRedundant = 0; nIn = 46; assert( 46==sqlite3LogEst(25) ); /* The expression may actually be of the form (x, y) IN (SELECT...). @@ -165228,7 +167711,20 @@ static int whereLoopAddBtreeIndex( ** for each such term. The following loop checks that pTerm is the ** first such term in use, and sets nIn back to 0 if it is not. */ for(i=0; inLTerm-1; i++){ - if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0; + if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ){ + nIn = 0; + if( pNew->aLTerm[i]->u.x.iField == pTerm->u.x.iField ){ + /* Detect when two or more columns of an index match the same + ** column of a vector IN operater, and avoid adding the column + ** to the WhereLoop more than once. See tag-20250707-01 + ** in test/rowvalue.test */ + bRedundant = 1; + } + } + } + if( bRedundant ){ + pNew->nLTerm--; + continue; } }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ /* "x IN (value, value, ...)" */ @@ -165288,7 +167784,7 @@ static int whereLoopAddBtreeIndex( || (iCol>=0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1) ){ if( iCol==XN_ROWID || pProbe->uniqNotNull - || (pProbe->nKeyCol==1 && pProbe->onError && eOp==WO_EQ) + || (pProbe->nKeyCol==1 && pProbe->onError && (eOp & WO_EQ)) ){ pNew->wsFlags |= WHERE_ONEROW; }else{ @@ -165421,7 +167917,7 @@ static int whereLoopAddBtreeIndex( ** 2. Stepping forward in the index pNew->nOut times to find all ** additional matching entries. */ - assert( pSrc->pTab->szTabRow>0 ); + assert( pSrc->pSTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior ** pages are small. Thus szIdxRow gives a good estimate of seek cost. @@ -165429,7 +167925,7 @@ static int whereLoopAddBtreeIndex( ** under-estimate the scanning cost. */ rCostIdx = pNew->nOut + 16; }else{ - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pSTab->szTabRow; } rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); @@ -165460,7 +167956,7 @@ static int whereLoopAddBtreeIndex( if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 && pNew->u.btree.nEqnColumn && (pNew->u.btree.nEqnKeyCol || - pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) + pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ if( pNew->u.btree.nEq>3 ){ sqlite3ProgressCheck(pParse); @@ -165583,13 +168079,13 @@ static int whereUsablePartialIndex( if( !whereUsablePartialIndex(iTab,jointype,pWC,pWhere->pLeft) ) return 0; pWhere = pWhere->pRight; } - if( pParse->db->flags & SQLITE_EnableQPSG ) pParse = 0; for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){ Expr *pExpr; pExpr = pTerm->pExpr; if( (!ExprHasProperty(pExpr, EP_OuterON) || pExpr->w.iJoin==iTab) && ((jointype & JT_OUTER)==0 || ExprHasProperty(pExpr, EP_OuterON)) && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab) + && !sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, -1) && (pTerm->wtFlags & TERM_VNULL)==0 ){ return 1; @@ -165894,9 +168390,9 @@ static int whereLoopAddBtree( pWInfo = pBuilder->pWInfo; pTabList = pWInfo->pTabList; pSrc = pTabList->a + pNew->iTab; - pTab = pSrc->pTab; + pTab = pSrc->pSTab; pWC = pBuilder->pWC; - assert( !IsVirtual(pSrc->pTab) ); + assert( !IsVirtual(pSrc->pSTab) ); if( pSrc->fg.isIndexedBy ){ assert( pSrc->fg.isCte==0 ); @@ -165921,7 +168417,7 @@ static int whereLoopAddBtree( sPk.idxType = SQLITE_IDXTYPE_IPK; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0; - pFirst = pSrc->pTab->pIndex; + pFirst = pSrc->pSTab->pIndex; if( pSrc->fg.notIndexed==0 ){ /* The real indices of the table are only considered if the ** NOT INDEXED qualifier is omitted from the FROM clause */ @@ -165938,7 +168434,6 @@ static int whereLoopAddBtree( && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 && !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ - && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->fg.isCorrelated /* Not a correlated subquery */ && !pSrc->fg.isRecursive /* Not a recursive common table expression. */ && (pSrc->fg.jointype & JT_RIGHT)==0 /* Not the right tab of a RIGHT JOIN */ @@ -166004,6 +168499,7 @@ static int whereLoopAddBtree( pNew->u.btree.nEq = 0; pNew->u.btree.nBtm = 0; pNew->u.btree.nTop = 0; + pNew->u.btree.nDistinctCol = 0; pNew->nSkip = 0; pNew->nLTerm = 0; pNew->iSortIdx = 0; @@ -166011,6 +168507,7 @@ static int whereLoopAddBtree( pNew->prereq = mPrereq; pNew->nOut = rSize; pNew->u.btree.pIndex = pProbe; + pNew->u.btree.pOrderBy = 0; b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ @@ -166040,6 +168537,10 @@ static int whereLoopAddBtree( #endif ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); + if( pSrc->fg.isSubquery ){ + if( pSrc->fg.viaCoroutine ) pNew->wsFlags |= WHERE_COROUTINE; + pNew->u.btree.pOrderBy = pSrc->u4.pSubq->pSelect->pOrderBy; + } rc = whereLoopInsert(pBuilder, pNew); pNew->nOut = rSize; if( rc ) break; @@ -166081,7 +168582,7 @@ static int whereLoopAddBtree( && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) ){ WHERETRACE(0x200, - ("-> %s a covering index according to bitmasks\n", + ("-> %s is a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; } @@ -166242,7 +168743,7 @@ static int whereLoopAddVirtualOne( ** arguments mUsable and mExclude. */ pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; for(i=0; ia[pIdxCons->iTermOffset]; + WhereTerm *pTerm = termFromWhereClause(pWC, pIdxCons->iTermOffset); pIdxCons->usable = 0; if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight && (pTerm->eOperator & mExclude)==0 @@ -166261,11 +168762,10 @@ static int whereLoopAddVirtualOne( pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; pIdxInfo->estimatedRows = 25; pIdxInfo->idxFlags = 0; - pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; pHidden->mHandleIn = 0; /* Invoke the virtual table xBestIndex() method */ - rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo); + rc = vtabBestIndex(pParse, pSrc->pSTab, pIdxInfo); if( rc ){ if( rc==SQLITE_CONSTRAINT ){ /* If the xBestIndex method returns SQLITE_CONSTRAINT, that means @@ -166273,6 +168773,7 @@ static int whereLoopAddVirtualOne( ** Make no entries in the loop table. */ WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n")); + freeIdxStr(pIdxInfo); return SQLITE_OK; } return rc; @@ -166290,18 +168791,17 @@ static int whereLoopAddVirtualOne( int j = pIdxCons->iTermOffset; if( iTerm>=nConstraint || j<0 - || j>=pWC->nTerm + || (pTerm = termFromWhereClause(pWC, j))==0 || pNew->aLTerm[iTerm]!=0 || pIdxCons->usable==0 ){ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } testcase( iTerm==nConstraint-1 ); testcase( j==0 ); testcase( j==pWC->nTerm-1 ); - pTerm = &pWC->a[j]; pNew->prereq |= pTerm->prereqRight; assert( iTermnLSlot ); pNew->aLTerm[iTerm] = pTerm; @@ -166346,11 +168846,7 @@ static int whereLoopAddVirtualOne( ** the plan cannot be used. In these cases set variable *pbRetryLimit ** to true to tell the caller to retry with LIMIT and OFFSET ** disabled. */ - if( pIdxInfo->needToFreeIdxStr ){ - sqlite3_free(pIdxInfo->idxStr); - pIdxInfo->idxStr = 0; - pIdxInfo->needToFreeIdxStr = 0; - } + freeIdxStr(pIdxInfo); *pbRetryLimit = 1; return SQLITE_OK; } @@ -166362,8 +168858,8 @@ static int whereLoopAddVirtualOne( if( pNew->aLTerm[i]==0 ){ /* The non-zero argvIdx values must be contiguous. Raise an ** error if they are not */ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } } @@ -166374,6 +168870,7 @@ static int whereLoopAddVirtualOne( pNew->u.vtab.idxStr = pIdxInfo->idxStr; pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? pIdxInfo->nOrderBy : 0); + pNew->u.vtab.bIdxNumHex = (pIdxInfo->idxFlags&SQLITE_INDEX_SCAN_HEX)!=0; pNew->rSetup = 0; pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); @@ -166418,7 +168915,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int if( iCons>=0 && iConsnConstraint ){ CollSeq *pC = 0; int iTerm = pIdxInfo->aConstraint[iCons].iTermOffset; - Expr *pX = pHidden->pWC->a[iTerm].pExpr; + Expr *pX = termFromWhereClause(pHidden->pWC, iTerm)->pExpr; if( pX->pLeft ){ pC = sqlite3ExprCompareCollSeq(pHidden->pParse, pX); } @@ -166464,7 +168961,9 @@ SQLITE_API int sqlite3_vtab_rhs_value( rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */ }else{ if( pH->aRhs[iCons]==0 ){ - WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset]; + WhereTerm *pTerm = termFromWhereClause( + pH->pWC, pIdxInfo->aConstraint[iCons].iTermOffset + ); rc = sqlite3ValueFromExpr( pH->pParse->db, pTerm->pExpr->pRight, ENC(pH->pParse->db), SQLITE_AFF_BLOB, &pH->aRhs[iCons] @@ -166562,7 +169061,7 @@ static int whereLoopAddVirtual( pWC = pBuilder->pWC; pNew = pBuilder->pNew; pSrc = &pWInfo->pTabList->a[pNew->iTab]; - assert( IsVirtual(pSrc->pTab) ); + assert( IsVirtual(pSrc->pSTab) ); p = allocateIndexInfo(pWInfo, pWC, mUnusable, pSrc, &mNoOmit); if( p==0 ) return SQLITE_NOMEM_BKPT; pNew->rSetup = 0; @@ -166576,7 +169075,7 @@ static int whereLoopAddVirtual( } /* First call xBestIndex() with all constraints usable. */ - WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName)); + WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pSTab->zName)); WHERETRACE(0x800, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry @@ -166620,9 +169119,8 @@ static int whereLoopAddVirtual( Bitmask mNext = ALLBITS; assert( mNext>0 ); for(i=0; ia[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq - ); + int iTerm = p->aConstraint[i].iTermOffset; + Bitmask mThis = termFromWhereClause(pWC, iTerm)->prereqRight & ~mPrereq; if( mThis>mPrev && mThisneedToFreeIdxStr ) sqlite3_free(p->idxStr); freeIndexInfo(pParse->db, p); - WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pTab->zName, rc)); + WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pSTab->zName, rc)); return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ @@ -166732,7 +169229,7 @@ static int whereLoopAddOr( } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable); }else #endif @@ -166846,7 +169343,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ mPrereq = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ SrcItem *p; for(p=&pItem[1]; pfg.jointype & (JT_OUTER|JT_CROSS)) ){ @@ -166878,6 +169375,97 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ return rc; } +/* Implementation of the order-by-subquery optimization: +** +** WhereLoop pLoop, which the iLoop-th term of the nested loop, is really +** a subquery or CTE that has an ORDER BY clause. See if any of the terms +** in the subquery ORDER BY clause will satisfy pOrderBy from the outer +** query. Mark off all satisfied terms (by setting bits in *pOBSat) and +** return TRUE if they do. If not, return false. +** +** Example: +** +** CREATE TABLE t1(a,b,c, PRIMARY KEY(a,b)); +** CREATE TABLE t2(x,y); +** WITH t3(p,q) AS MATERIALIZED (SELECT x+y, x-y FROM t2 ORDER BY x+y) +** SELECT * FROM t3 JOIN t1 ON a=q ORDER BY p, b; +** +** The CTE named "t3" comes out in the natural order of "p", so the first +** first them of "ORDER BY p,b" is satisfied by a sequential scan of "t3" +** and sorting only needs to occur on the second term "b". +** +** Limitations: +** +** (1) The optimization is not applied if the outer ORDER BY contains +** a COLLATE clause. The optimization might be applied if the +** outer ORDER BY uses NULLS FIRST, NULLS LAST, ASC, and/or DESC as +** long as the subquery ORDER BY does the same. But if the +** outer ORDER BY uses COLLATE, even a redundant COLLATE, the +** optimization is bypassed. +** +** (2) The subquery ORDER BY terms must exactly match subquery result +** columns, including any COLLATE annotations. This routine relies +** on iOrderByCol to do matching between order by terms and result +** columns, and iOrderByCol will not be set if the result column +** and ORDER BY collations differ. +** +** (3) The subquery and outer ORDER BY can be in opposite directions as +** long as the subquery is materialized. If the subquery is +** implemented as a co-routine, the sort orders must be in the same +** direction because there is no way to run a co-routine backwards. +*/ +static SQLITE_NOINLINE int wherePathMatchSubqueryOB( + WhereInfo *pWInfo, /* The WHERE clause */ + WhereLoop *pLoop, /* The nested loop term that is a subquery */ + int iLoop, /* Which level of the nested loop. 0==outermost */ + int iCur, /* Cursor used by the this loop */ + ExprList *pOrderBy, /* The ORDER BY clause on the whole query */ + Bitmask *pRevMask, /* When loops need to go in reverse order */ + Bitmask *pOBSat /* Which terms of pOrderBy are satisfied so far */ +){ + int iOB; /* Index into pOrderBy->a[] */ + int jSub; /* Index into pSubOB->a[] */ + u8 rev = 0; /* True if iOB and jSub sort in opposite directions */ + u8 revIdx = 0; /* Sort direction for jSub */ + Expr *pOBExpr; /* Current term of outer ORDER BY */ + ExprList *pSubOB; /* Complete ORDER BY on the subquery */ + + pSubOB = pLoop->u.btree.pOrderBy; + assert( pSubOB!=0 ); + for(iOB=0; (MASKBIT(iOB) & *pOBSat)!=0; iOB++){} + for(jSub=0; jSubnExpr && iOBnExpr; jSub++, iOB++){ + if( pSubOB->a[jSub].u.x.iOrderByCol==0 ) break; + pOBExpr = pOrderBy->a[iOB].pExpr; + if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) break; + if( pOBExpr->iTable!=iCur ) break; + if( pOBExpr->iColumn!=pSubOB->a[jSub].u.x.iOrderByCol-1 ) break; + if( (pWInfo->wctrlFlags & WHERE_GROUPBY)==0 ){ + u8 sfOB = pOrderBy->a[iOB].fg.sortFlags; /* sortFlags for iOB */ + u8 sfSub = pSubOB->a[jSub].fg.sortFlags; /* sortFlags for jSub */ + if( (sfSub & KEYINFO_ORDER_BIGNULL) != (sfOB & KEYINFO_ORDER_BIGNULL) ){ + break; + } + revIdx = sfSub & KEYINFO_ORDER_DESC; + if( jSub>0 ){ + if( (rev^revIdx)!=(sfOB & KEYINFO_ORDER_DESC) ){ + break; + } + }else{ + rev = revIdx ^ (sfOB & KEYINFO_ORDER_DESC); + if( rev ){ + if( (pLoop->wsFlags & WHERE_COROUTINE)!=0 ){ + /* Cannot run a co-routine in reverse order */ + break; + } + *pRevMask |= MASKBIT(iLoop); + } + } + } + *pOBSat |= MASKBIT(iOB); + } + return jSub>0; +} + /* ** Examine a WherePath (with the addition of the extra WhereLoop of the 6th ** parameters) to see if it outputs rows in the requested ORDER BY @@ -166980,8 +169568,6 @@ static i8 wherePathSatisfiesOrderBy( obSat = obDone; } break; - }else if( wctrlFlags & WHERE_DISTINCTBY ){ - pLoop->u.btree.nDistinctCol = 0; } iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; @@ -167023,9 +169609,18 @@ static i8 wherePathSatisfiesOrderBy( if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ if( pLoop->wsFlags & WHERE_IPK ){ + if( pLoop->u.btree.pOrderBy + && OptimizationEnabled(db, SQLITE_OrderBySubq) + && wherePathMatchSubqueryOB(pWInfo,pLoop,iLoop,iCur, + pOrderBy,pRevMask, &obSat) + ){ + nColumn = 0; + isOrderDistinct = 0; + }else{ + nColumn = 1; + } pIndex = 0; nKeyCol = 0; - nColumn = 1; }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ return 0; }else{ @@ -167035,7 +169630,7 @@ static i8 wherePathSatisfiesOrderBy( assert( pIndex->aiColumn[nColumn-1]==XN_ROWID || !HasRowid(pIndex->pTable)); /* All relevant terms of the index must also be non-NULL in order - ** for isOrderDistinct to be true. So the isOrderDistint value + ** for isOrderDistinct to be true. So the isOrderDistinct value ** computed here might be a false positive. Corrections will be ** made at tag-20210426-1 below */ isOrderDistinct = IsUniqueIndex(pIndex) @@ -167120,7 +169715,7 @@ static i8 wherePathSatisfiesOrderBy( } /* Find the ORDER BY term that corresponds to the j-th column - ** of the index and mark that ORDER BY term off + ** of the index and mark that ORDER BY term having been satisfied. */ isMatch = 0; for(i=0; bOnce && inLevel; /* Number of terms in the join */ + WhereLoop *pWLoop; /* For looping over WhereLoops */ + +#ifdef SQLITE_DEBUG + /* The star-query detection code below makes use of the following + ** properties of the WhereLoop list, so verify them before + ** continuing: + ** (1) .maskSelf is the bitmask corresponding to .iTab + ** (2) The WhereLoop list is in ascending .iTab order + */ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + assert( pWLoop->maskSelf==MASKBIT(pWLoop->iTab) ); + assert( pWLoop->pNextLoop==0 || pWLoop->iTab<=pWLoop->pNextLoop->iTab ); + } +#endif /* SQLITE_DEBUG */ + + if( nLoop>=5 + && !pWInfo->bStarDone + && OptimizationEnabled(pWInfo->pParse->db, SQLITE_StarQuery) + ){ + SrcItem *aFromTabs; /* All terms of the FROM clause */ + int iFromIdx; /* Term of FROM clause is the candidate fact-table */ + Bitmask m; /* Bitmask for candidate fact-table */ + Bitmask mSelfJoin = 0; /* Tables that cannot be dimension tables */ + WhereLoop *pStart; /* Where to start searching for dimension-tables */ + + pWInfo->bStarDone = 1; /* Only do this computation once */ + + /* Look for fact tables with four or more dimensions where the + ** dimension tables are not separately from the fact tables by an outer + ** or cross join. Adjust cost weights if found. + */ + assert( !pWInfo->bStarUsed ); + aFromTabs = pWInfo->pTabList->a; + pStart = pWInfo->pLoops; + for(iFromIdx=0, m=1; iFromIdxfg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* If the candidate fact-table is the right table of an outer join + ** restrict the search for dimension-tables to be tables to the right + ** of the fact-table. */ + if( iFromIdx+4 > nLoop ) break; /* Impossible to reach nDep>=4 */ + while( pStart && pStart->iTab<=iFromIdx ){ + pStart = pStart->pNextLoop; + } + } + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( (aFromTabs[pWLoop->iTab].fg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* Fact-tables and dimension-tables cannot be separated by an + ** outer join (at least for the definition of fact- and dimension- + ** used by this heuristic). */ + break; + } + if( (pWLoop->prereq & m)!=0 /* pWInfo depends on iFromIdx */ + && (pWLoop->maskSelf & mSeen)==0 /* pWInfo not already a dependency */ + && (pWLoop->maskSelf & mSelfJoin)==0 /* Not a self-join */ + ){ + if( aFromTabs[pWLoop->iTab].pSTab==pFactTab->pSTab ){ + mSelfJoin |= m; + }else{ + nDep++; + mSeen |= pWLoop->maskSelf; + } + } + } + if( nDep<=3 ) continue; + + /* If we reach this point, it means that pFactTab is a fact table + ** with four or more dimensions connected by inner joins. Proceed + ** to make cost adjustments. */ + +#ifdef WHERETRACE_ENABLED + /* Make sure rStarDelta values are initialized */ + if( !pWInfo->bStarUsed ){ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + pWLoop->rStarDelta = 0; + } + } +#endif + pWInfo->bStarUsed = 1; + + /* Compute the maximum cost of any WhereLoop for the + ** fact table plus one epsilon */ + mxRun = LOGEST_MIN; + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->iTabiTab>iFromIdx ) break; + if( pWLoop->rRun>mxRun ) mxRun = pWLoop->rRun; + } + if( ALWAYS(mxRunpNextLoop){ + if( (pWLoop->maskSelf & mSeen)==0 ) continue; + if( pWLoop->nLTerm ) continue; + if( pWLoop->rRuniTab; + sqlite3DebugPrintf( + "Increase SCAN cost of dimension %s(%d) of fact %s(%d) to %d\n", + pDim->zAlias ? pDim->zAlias: pDim->pSTab->zName, pWLoop->iTab, + pFactTab->zAlias ? pFactTab->zAlias : pFactTab->pSTab->zName, + iFromIdx, mxRun + ); + } + pWLoop->rStarDelta = mxRun - pWLoop->rRun; +#endif /* WHERETRACE_ENABLED */ + pWLoop->rRun = mxRun; + } + } + } +#ifdef WHERETRACE_ENABLED /* 0x80000 */ + if( (sqlite3WhereTrace & 0x80000)!=0 && pWInfo->bStarUsed ){ + sqlite3DebugPrintf("WhereLoops changed by star-query heuristic:\n"); + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->rStarDelta ){ + sqlite3WhereLoopPrint(pWLoop, &pWInfo->sWC); + } + } + } +#endif + } + return pWInfo->bStarUsed ? 18 : 12; +} + +/* +** Two WhereLoop objects, pCandidate and pBaseline, are known to have the +** same cost. Look deep into each to see if pCandidate is even slightly +** better than pBaseline. Return false if it is, if pCandidate is is preferred. +** Return true if pBaseline is preferred or if we cannot tell the difference. +** +** Result Meaning +** -------- ---------------------------------------------------------- +** true We cannot tell the difference in pCandidate and pBaseline +** false pCandidate seems like a better choice than pBaseline +*/ +static SQLITE_NOINLINE int whereLoopIsNoBetter( + const WhereLoop *pCandidate, + const WhereLoop *pBaseline +){ + if( (pCandidate->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( (pBaseline->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( pCandidate->u.btree.pIndex->szIdxRow < + pBaseline->u.btree.pIndex->szIdxRow ) return 0; + return 1; +} + /* ** Given the list of WhereLoop objects at pWInfo->pLoops, this routine ** attempts to find the lowest cost path that visits each WhereLoop @@ -167348,7 +170153,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxI = 0; /* Index of next entry to replace */ int nOrderBy; /* Number of ORDER BY clause terms */ LogEst mxCost = 0; /* Maximum cost of a set of paths */ - LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */ + LogEst mxUnsort = 0; /* Maximum unsorted cost of a set of path */ int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ WherePath *aFrom; /* All nFrom paths at the previous level */ WherePath *aTo; /* The nTo best paths at the current level */ @@ -167362,13 +170167,27 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pParse = pWInfo->pParse; nLoop = pWInfo->nLevel; - /* TUNING: For simple queries, only the best path is tracked. - ** For 2-way joins, the 5 best paths are followed. - ** For joins of 3 or more tables, track the 10 best paths */ - mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); - assert( nLoop<=pWInfo->pTabList->nSrc ); WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n", nRowEst, pParse->nQueryLoop)); + /* TUNING: mxChoice is the maximum number of possible paths to preserve + ** at each step. Based on the number of loops in the FROM clause: + ** + ** nLoop mxChoice + ** ----- -------- + ** 1 1 // the most common case + ** 2 5 + ** 3+ 12 or 18 // see computeMxChoice() + */ + if( nLoop<=1 ){ + mxChoice = 1; + }else if( nLoop==2 ){ + mxChoice = 5; + }else if( pParse->nErr ){ + mxChoice = 1; + }else{ + mxChoice = computeMxChoice(pWInfo); + } + assert( nLoop<=pWInfo->pTabList->nSrc ); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned @@ -167433,7 +170252,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ LogEst rCost; /* Cost of path (pFrom+pWLoop) */ - LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ + LogEst rUnsort; /* Unsorted cost of (pFrom+pWLoop) */ i8 isOrdered; /* isOrdered for (pFrom+pWLoop) */ Bitmask maskNew; /* Mask of src visited by (..) */ Bitmask revMask; /* Mask of rev-order loops for (..) */ @@ -167451,8 +170270,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ /* At this point, pWLoop is a candidate to be the next loop. ** Compute its cost */ - rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); - rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); + rUnsort = pWLoop->rRun + pFrom->nRow; + if( pWLoop->rSetup ){ + rUnsort = sqlite3LogEstAdd(pWLoop->rSetup, rUnsort); + } + rUnsort = sqlite3LogEstAdd(rUnsort, pFrom->rUnsort); nOut = pFrom->nRow + pWLoop->nOut; maskNew = pFrom->maskLoop | pWLoop->maskSelf; isOrdered = pFrom->isOrdered; @@ -167474,15 +170296,15 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** extra encouragement to the query planner to select a plan ** where the rows emerge in the correct order without any sorting ** required. */ - rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3; + rCost = sqlite3LogEstAdd(rUnsort, aSortCost[isOrdered]) + 3; WHERETRACE(0x002, ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n", aSortCost[isOrdered], (nOrderBy-isOrdered), nOrderBy, - rUnsorted, rCost)); + rUnsort, rCost)); }else{ - rCost = rUnsorted; - rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */ + rCost = rUnsort; + rUnsort -= 2; /* TUNING: Slight bias in favor of no-sort plans */ } /* Check to see if pWLoop should be added to the set of @@ -167496,6 +170318,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range ** of legal values for isOrdered, -1..64. */ + testcase( nTo==0 ); for(jj=0, pTo=aTo; jjmaskLoop==maskNew && ((pTo->isOrdered^isOrdered)&0x80)==0 @@ -167507,7 +170330,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( jj>=nTo ){ /* None of the existing best-so-far paths match the candidate. */ if( nTo>=mxChoice - && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted)) + && (rCost>mxCost || (rCost==mxCost && rUnsort>=mxUnsort)) ){ /* The current candidate is no better than any of the mxChoice ** paths currently in the best-so-far buffer. So discard @@ -167515,7 +170338,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("Skip %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167534,7 +170357,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("New %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167545,24 +170368,23 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** pTo or if the candidate should be skipped. ** ** The conditional is an expanded vector comparison equivalent to: - ** (pTo->rCost,pTo->nRow,pTo->rUnsorted) <= (rCost,nOut,rUnsorted) + ** (pTo->rCost,pTo->nRow,pTo->rUnsort) <= (rCost,nOut,rUnsort) */ - if( pTo->rCostrCost==rCost - && (pTo->nRownRow==nOut && pTo->rUnsorted<=rUnsorted) - ) - ) + if( (pTo->rCostrCost==rCost && pTo->nRowrCost==rCost && pTo->nRow==nOut && pTo->rUnsortrCost==rCost && pTo->nRow==nOut && pTo->rUnsort==rUnsort + && whereLoopIsNoBetter(pWLoop, pTo->aLoop[iLoop]) ) ){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Skip %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" vs %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif /* Discard the candidate path from further consideration */ @@ -167576,11 +170398,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Update %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" was %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif } @@ -167589,20 +170411,20 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pTo->revLoop = revMask; pTo->nRow = nOut; pTo->rCost = rCost; - pTo->rUnsorted = rUnsorted; + pTo->rUnsort = rUnsort; pTo->isOrdered = isOrdered; memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); pTo->aLoop[iLoop] = pWLoop; if( nTo>=mxChoice ){ mxI = 0; mxCost = aTo[0].rCost; - mxUnsorted = aTo[0].nRow; + mxUnsort = aTo[0].nRow; for(jj=1, pTo=&aTo[1]; jjrCost>mxCost - || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted) + || (pTo->rCost==mxCost && pTo->rUnsort>mxUnsort) ){ mxCost = pTo->rCost; - mxUnsorted = pTo->rUnsorted; + mxUnsort = pTo->rUnsort; mxI = jj; } } @@ -167612,17 +170434,32 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* >=2 */ if( sqlite3WhereTrace & 0x02 ){ + LogEst rMin, rFloor = 0; + int nDone = 0; + int nProgress; sqlite3DebugPrintf("---- after round %d ----\n", iLoop); - for(ii=0, pTo=aTo; iirCost, pTo->nRow, - pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); - if( pTo->isOrdered>0 ){ - sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); - }else{ - sqlite3DebugPrintf("\n"); + do{ + nProgress = 0; + rMin = 0x7fff; + for(ii=0, pTo=aTo; iirCost>rFloor && pTo->rCostrCost; + } + for(ii=0, pTo=aTo; iirCost==rMin ){ + sqlite3DebugPrintf(" %s cost=%-3d nrow=%-3d order=%c", + wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, + pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); + if( pTo->isOrdered>0 ){ + sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); + }else{ + sqlite3DebugPrintf("\n"); + } + nDone++; + nProgress++; + } } - } + rFloor = rMin; + }while( nDone0 ); } #endif @@ -167717,6 +170554,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } pWInfo->nRowOut = pFrom->nRow; +#ifdef WHERETRACE_ENABLED + pWInfo->rTotalCost = pFrom->rCost; +#endif /* Free temporary memory and return success */ sqlite3StackFreeNN(pParse->db, pSpace); @@ -167827,7 +170667,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; assert( pWInfo->pTabList->nSrc>=1 ); pItem = pWInfo->pTabList->a; - pTab = pItem->pTab; + pTab = pItem->pSTab; if( IsVirtual(pTab) ) return 0; if( pItem->fg.isIndexedBy || pItem->fg.notIndexed ){ testcase( pItem->fg.isIndexedBy ); @@ -168017,6 +170857,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( WhereTerm *pTerm, *pEnd; SrcItem *pItem; WhereLoop *pLoop; + Bitmask m1; pLoop = pWInfo->a[i].pWLoop; pItem = &pWInfo->pTabList->a[pLoop->iTab]; if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ) continue; @@ -168037,13 +170878,16 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( } if( hasRightJoin && ExprHasProperty(pTerm->pExpr, EP_InnerON) - && pTerm->pExpr->w.iJoin==pItem->iCursor + && NEVER(pTerm->pExpr->w.iJoin==pItem->iCursor) ){ break; /* restriction (5) */ } } if( pTerm drop loop %c not used\n", pLoop->cId)); + WHERETRACE(0xffffffff,("-> omit unused FROM-clause term %c\n",pLoop->cId)); + m1 = MASKBIT(i)-1; + testcase( ((pWInfo->revMask>>1) & ~m1)!=0 ); + pWInfo->revMask = (m1 & pWInfo->revMask) | ((pWInfo->revMask>>1) & ~m1); notReady &= ~pLoop->maskSelf; for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ @@ -168090,7 +170934,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( WhereLoop *pLoop = pWInfo->a[i].pWLoop; const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 @@ -168113,58 +170957,6 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } -/* -** Expression Node callback for sqlite3ExprCanReturnSubtype(). -** -** Only a function call is able to return a subtype. So if the node -** is not a function call, return WRC_Prune immediately. -** -** A function call is able to return a subtype if it has the -** SQLITE_RESULT_SUBTYPE property. -** -** Assume that every function is able to pass-through a subtype from -** one of its argument (using sqlite3_result_value()). Most functions -** are not this way, but we don't have a mechanism to distinguish those -** that are from those that are not, so assume they all work this way. -** That means that if one of its arguments is another function and that -** other function is able to return a subtype, then this function is -** able to return a subtype. -*/ -static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ - int n; - FuncDef *pDef; - sqlite3 *db; - if( pExpr->op!=TK_FUNCTION ){ - return WRC_Prune; - } - assert( ExprUseXList(pExpr) ); - db = pWalker->pParse->db; - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - pWalker->eCode = 1; - return WRC_Prune; - } - return WRC_Continue; -} - -/* -** Return TRUE if expression pExpr is able to return a subtype. -** -** A TRUE return does not guarantee that a subtype will be returned. -** It only indicates that a subtype return is possible. False positives -** are acceptable as they only disable an optimization. False negatives, -** on the other hand, can lead to incorrect answers. -*/ -static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ - Walker w; - memset(&w, 0, sizeof(w)); - w.pParse = pParse; - w.xExprCallback = exprNodeCanReturnSubtype; - sqlite3WalkExpr(&w, pExpr); - return w.eCode; -} - /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -168198,12 +170990,6 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( continue; } if( sqlite3ExprIsConstant(0,pExpr) ) continue; - if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ - /* Functions that might set a subtype should not be replaced by the - ** value taken from an expression index since the index omits the - ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - continue; - } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; p->pIENext = pParse->pIdxEpr; @@ -168246,8 +171032,8 @@ static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){ SrcItem *pItem = &pWInfo->pTabList->a[ii]; if( !pItem->fg.isCte || pItem->u2.pCteUse->eM10d!=M10d_Yes - || NEVER(pItem->pSelect==0) - || pItem->pSelect->pOrderBy==0 + || NEVER(pItem->fg.isSubquery==0) + || pItem->u4.pSubq->pSelect->pOrderBy==0 ){ pWInfo->revMask |= MASKBIT(ii); } @@ -168411,10 +171197,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)); - if( nTabList>1 ){ - nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); - } + nByteWInfo = SZ_WHEREINFO(nTabList); pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -168626,12 +171409,13 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ whereInterstageHeuristic(pWInfo); - wherePathSolver(pWInfo, pWInfo->nRowOut+1); + wherePathSolver(pWInfo, pWInfo->nRowOut<0 ? 1 : pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } /* TUNING: Assume that a DISTINCT clause on a subquery reduces - ** the output size by a factor of 8 (LogEst -30). + ** the output size by a factor of 8 (LogEst -30). Search for + ** tag-20250414a to see other cases. */ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){ WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n", @@ -168650,7 +171434,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( db->mallocFailed==0 ); #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ - sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); + sqlite3DebugPrintf("---- Solution cost=%d, nRow=%d", + pWInfo->rTotalCost, pWInfo->nRowOut); if( pWInfo->nOBSat>0 ){ sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); } @@ -168737,15 +171522,15 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){ int wsFlags = pWInfo->a[0].pWLoop->wsFlags; int bOnerow = (wsFlags & WHERE_ONEROW)!=0; - assert( !(wsFlags & WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pTab) ); + assert( !(wsFlags&WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pSTab) ); if( bOnerow || ( 0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW) - && !IsVirtual(pTabList->a[0].pTab) + && !IsVirtual(pTabList->a[0].pSTab) && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK)) && OptimizationEnabled(db, SQLITE_OnePass) )){ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; - if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){ + if( HasRowid(pTabList->a[0].pSTab) && (wsFlags & WHERE_IDX_ONLY) ){ if( wctrlFlags & WHERE_ONEPASS_MULTIROW ){ bFordelete = OPFLAG_FORDELETE; } @@ -168763,7 +171548,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( SrcItem *pTabItem; pTabItem = &pTabList->a[pLevel->iFrom]; - pTab = pTabItem->pTab; + pTab = pTabItem->pSTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; if( (pTab->tabFlags & TF_Ephemeral)!=0 || IsView(pTab) ){ @@ -168834,7 +171619,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( iIndexCur = pLevel->iTabCur; op = 0; }else if( pWInfo->eOnePass!=ONEPASS_OFF ){ - Index *pJ = pTabItem->pTab->pIndex; + Index *pJ = pTabItem->pSTab->pIndex; iIndexCur = iAuxArg; assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); while( ALWAYS(pJ) && pJ!=pIx ){ @@ -168901,7 +171686,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeAddOp2(v, OP_Blob, 65536, pRJ->regBloom); pRJ->regReturn = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, pRJ->regReturn); - assert( pTab==pTabItem->pTab ); + assert( pTab==pTabItem->pSTab ); if( HasRowid(pTab) ){ KeyInfo *pInfo; sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRJ->iMatch, 1); @@ -168940,13 +171725,18 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wsFlags = pLevel->pWLoop->wsFlags; pSrc = &pTabList->a[pLevel->iFrom]; if( pSrc->fg.isMaterialized ){ - if( pSrc->fg.isCorrelated ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); + Subquery *pSubq; + int iOnce = 0; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + if( pSrc->fg.isCorrelated==0 ){ + iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); }else{ - int iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); - sqlite3VdbeJumpHere(v, iOnce); + iOnce = 0; } + sqlite3VdbeAddOp2(v, OP_Gosub, pSubq->regReturn, pSubq->addrFillSub); + VdbeComment((v, "materialize %!S", pSrc)); + if( iOnce ) sqlite3VdbeJumpHere(v, iOnce); } assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ @@ -169006,6 +171796,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ if( (db->flags & SQLITE_VdbeAddopTrace)==0 ) return; sqlite3VdbePrintOp(0, pc, pOp); + sqlite3ShowWhereTerm(0); /* So compiler won't complain about unused func */ } #endif @@ -169159,9 +171950,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ assert( pLevel->iTabCur==pSrc->iCursor ); if( pSrc->fg.viaCoroutine ){ int m, n; - n = pSrc->regResult; - assert( pSrc->pTab!=0 ); - m = pSrc->pTab->nCol; + assert( pSrc->fg.isSubquery ); + n = pSrc->u4.pSubq->regResult; + assert( pSrc->pSTab!=0 ); + m = pSrc->pSTab->nCol; sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); @@ -169185,7 +171977,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ sqlite3VdbeJumpHere(v, addr); } VdbeModuleComment((v, "End WHERE-loop%d: %s", i, - pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); + pWInfo->pTabList->a[pLevel->iFrom].pSTab->zName)); } assert( pWInfo->nLevel<=pTabList->nSrc ); @@ -169194,7 +171986,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ VdbeOp *pOp, *pLastOp; Index *pIdx = 0; SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; @@ -169213,9 +172005,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); - assert( pTabItem->regResult>=0 ); + assert( pTabItem->fg.isSubquery ); + assert( pTabItem->u4.pSubq->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, - pTabItem->regResult, 0); + pTabItem->u4.pSubq->regResult, 0); continue; } @@ -169303,14 +172096,28 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; OpcodeRewriteTrace(db, k, pOp); - }else{ - /* Unable to translate the table reference into an index - ** reference. Verify that this is harmless - that the - ** table being referenced really is open. - */ + }else if( pLoop->wsFlags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + /* An error. pLoop is supposed to be a covering index loop, + ** and yet the VM code refers to a column of the table that + ** is not part of the index. */ sqlite3ErrorMsg(pParse, "internal query planner error"); pParse->rc = SQLITE_INTERNAL; + }else{ + /* The WHERE_EXPRIDX flag is set by the planner when it is likely + ** that pLoop is a covering index loop, but it is not possible + ** to be 100% sure. In this case, any OP_Explain opcode + ** corresponding to this loop describes the index as a "COVERING + ** INDEX". But, pOp proves that pLoop is not actually a covering + ** index loop. So clear the WHERE_EXPRIDX flag and rewrite the + ** text that accompanies the OP_Explain opcode, if any. */ + pLoop->wsFlags &= ~WHERE_EXPRIDX; + sqlite3WhereAddExplainText(pParse, + pLevel->addrBody-1, + pTabList, + pLevel, + pWInfo->wctrlFlags + ); } } }else if( pOp->opcode==OP_Rowid ){ @@ -170257,7 +173064,7 @@ static ExprList *exprListAppendList( int iDummy; Expr *pSub; pSub = sqlite3ExprSkipCollateAndLikely(pDup); - if( sqlite3ExprIsInteger(pSub, &iDummy) ){ + if( sqlite3ExprIsInteger(pSub, &iDummy, 0) ){ pSub->op = TK_NULL; pSub->flags &= ~(EP_IntValue|EP_IsTrue|EP_IsFalse); pSub->u.zToken = 0; @@ -170343,7 +173150,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ p->pWhere = 0; p->pGroupBy = 0; p->pHaving = 0; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; p->selFlags |= SF_WinRewrite; /* Create the ORDER BY clause for the sub-select. This is the concatenation @@ -170425,9 +173232,10 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( pSub!=0 || p->pSrc==0 ); /* Due to db->mallocFailed test inside ** of sqlite3DbMallocRawNN() called from ** sqlite3SrcListAppend() */ - if( p->pSrc ){ + if( p->pSrc==0 ){ + sqlite3SelectDelete(db, pSub); + }else if( sqlite3SrcItemAttachSubquery(pParse, &p->pSrc->a[0], pSub, 0) ){ Table *pTab2; - p->pSrc->a[0].pSelect = pSub; p->pSrc->a[0].fg.isCorrelated = 1; sqlite3SrcListAssignCursors(pParse, p->pSrc); pSub->selFlags |= SF_Expanded|SF_OrderByReqd; @@ -170441,7 +173249,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ }else{ memcpy(pTab, pTab2, sizeof(Table)); pTab->tabFlags |= TF_Ephemeral; - p->pSrc->a[0].pTab = pTab; + p->pSrc->a[0].pSTab = pTab; pTab = pTab2; memset(&w, 0, sizeof(w)); w.xExprCallback = sqlite3WindowExtraAggFuncDepth; @@ -170449,8 +173257,6 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ w.xSelectCallback2 = sqlite3WalkerDepthDecrease; sqlite3WalkSelect(&w, pSub); } - }else{ - sqlite3SelectDelete(db, pSub); } if( db->mallocFailed ) rc = SQLITE_NOMEM; @@ -170737,10 +173543,15 @@ SQLITE_PRIVATE int sqlite3WindowCompare( ** and initialize registers and cursors used by sqlite3WindowCodeStep(). */ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){ - int nEphExpr = pSelect->pSrc->a[0].pSelect->pEList->nExpr; - Window *pMWin = pSelect->pWin; Window *pWin; - Vdbe *v = sqlite3GetVdbe(pParse); + int nEphExpr; + Window *pMWin; + Vdbe *v; + + assert( pSelect->pSrc->a[0].fg.isSubquery ); + nEphExpr = pSelect->pSrc->a[0].u4.pSubq->pSelect->pEList->nExpr; + pMWin = pSelect->pWin; + v = sqlite3GetVdbe(pParse); sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pMWin->iEphCsr, nEphExpr); sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+1, pMWin->iEphCsr); @@ -171014,6 +173825,7 @@ static void windowAggStep( int regArg; int nArg = pWin->bExprArgs ? 0 : windowArgCount(pWin); int i; + int addrIf = 0; assert( bInverse==0 || pWin->eStart!=TK_UNBOUNDED ); @@ -171030,6 +173842,18 @@ static void windowAggStep( } regArg = reg; + if( pWin->pFilter ){ + int regTmp; + assert( ExprUseXList(pWin->pOwner) ); + assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); + assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); + regTmp = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); + addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); + VdbeCoverage(v); + sqlite3ReleaseTempReg(pParse, regTmp); + } + if( pMWin->regStartRowid==0 && (pFunc->funcFlags & SQLITE_FUNC_MINMAX) && (pWin->eStart!=TK_UNBOUNDED) @@ -171049,25 +173873,13 @@ static void windowAggStep( } sqlite3VdbeJumpHere(v, addrIsNull); }else if( pWin->regApp ){ + assert( pWin->pFilter==0 ); assert( pFunc->zName==nth_valueName || pFunc->zName==first_valueName ); assert( bInverse==0 || bInverse==1 ); sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1-bInverse, 1); }else if( pFunc->xSFunc!=noopStepFunc ){ - int addrIf = 0; - if( pWin->pFilter ){ - int regTmp; - assert( ExprUseXList(pWin->pOwner) ); - assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); - assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); - regTmp = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); - addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); - VdbeCoverage(v); - sqlite3ReleaseTempReg(pParse, regTmp); - } - if( pWin->bExprArgs ){ int iOp = sqlite3VdbeCurrentAddr(v); int iEnd; @@ -171094,12 +173906,13 @@ static void windowAggStep( sqlite3VdbeAddOp3(v, bInverse? OP_AggInverse : OP_AggStep, bInverse, regArg, pWin->regAccum); sqlite3VdbeAppendP4(v, pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); if( pWin->bExprArgs ){ sqlite3ReleaseTempRange(pParse, regArg, nArg); } - if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } + + if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } } @@ -172137,7 +174950,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( Vdbe *v = sqlite3GetVdbe(pParse); int csrWrite; /* Cursor used to write to eph. table */ int csrInput = p->pSrc->a[0].iCursor; /* Cursor of sub-select */ - int nInput = p->pSrc->a[0].pTab->nCol; /* Number of cols returned by sub */ + int nInput = p->pSrc->a[0].pSTab->nCol; /* Number of cols returned by sub */ int iInput; /* To iterate through sub cols */ int addrNe; /* Address of OP_Ne */ int addrGosubFlush = 0; /* Address of OP_Gosub to flush: */ @@ -172477,6 +175290,11 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( /* #include "sqliteInt.h" */ +/* +** Verify that the pParse->isCreate field is set +*/ +#define ASSERT_IS_CREATE assert(pParse->isCreate) + /* ** Disable all error recovery processing in the parser push-down ** automaton. @@ -172526,6 +175344,13 @@ struct TrigEvent { int a; IdList * b; }; struct FrameBound { int eType; Expr *pExpr; }; +/* +** Generate a syntax error +*/ +static void parserSyntaxError(Parse *pParse, Token *p){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", p); +} + /* ** Disable lookaside memory allocation for objects that might be ** shared across database connections. @@ -172533,6 +175358,10 @@ struct FrameBound { int eType; Expr *pExpr; }; static void disableLookaside(Parse *pParse){ sqlite3 *db = pParse->db; pParse->disableLookaside++; +#ifdef SQLITE_DEBUG + pParse->isCreate = 1; +#endif + memset(&pParse->u1.cr, 0, sizeof(pParse->u1.cr)); DisableLookaside; } @@ -172734,132 +175563,132 @@ static void updateDeleteLimitError( #define TK_OR 43 #define TK_AND 44 #define TK_IS 45 -#define TK_MATCH 46 -#define TK_LIKE_KW 47 -#define TK_BETWEEN 48 -#define TK_IN 49 -#define TK_ISNULL 50 -#define TK_NOTNULL 51 -#define TK_NE 52 -#define TK_EQ 53 -#define TK_GT 54 -#define TK_LE 55 -#define TK_LT 56 -#define TK_GE 57 -#define TK_ESCAPE 58 -#define TK_ID 59 -#define TK_COLUMNKW 60 -#define TK_DO 61 -#define TK_FOR 62 -#define TK_IGNORE 63 -#define TK_INITIALLY 64 -#define TK_INSTEAD 65 -#define TK_NO 66 -#define TK_KEY 67 -#define TK_OF 68 -#define TK_OFFSET 69 -#define TK_PRAGMA 70 -#define TK_RAISE 71 -#define TK_RECURSIVE 72 -#define TK_REPLACE 73 -#define TK_RESTRICT 74 -#define TK_ROW 75 -#define TK_ROWS 76 -#define TK_TRIGGER 77 -#define TK_VACUUM 78 -#define TK_VIEW 79 -#define TK_VIRTUAL 80 -#define TK_WITH 81 -#define TK_NULLS 82 -#define TK_FIRST 83 -#define TK_LAST 84 -#define TK_CURRENT 85 -#define TK_FOLLOWING 86 -#define TK_PARTITION 87 -#define TK_PRECEDING 88 -#define TK_RANGE 89 -#define TK_UNBOUNDED 90 -#define TK_EXCLUDE 91 -#define TK_GROUPS 92 -#define TK_OTHERS 93 -#define TK_TIES 94 -#define TK_GENERATED 95 -#define TK_ALWAYS 96 -#define TK_MATERIALIZED 97 -#define TK_REINDEX 98 -#define TK_RENAME 99 -#define TK_CTIME_KW 100 -#define TK_ANY 101 -#define TK_BITAND 102 -#define TK_BITOR 103 -#define TK_LSHIFT 104 -#define TK_RSHIFT 105 -#define TK_PLUS 106 -#define TK_MINUS 107 -#define TK_STAR 108 -#define TK_SLASH 109 -#define TK_REM 110 -#define TK_CONCAT 111 -#define TK_PTR 112 -#define TK_COLLATE 113 -#define TK_BITNOT 114 -#define TK_ON 115 -#define TK_INDEXED 116 -#define TK_STRING 117 -#define TK_JOIN_KW 118 -#define TK_CONSTRAINT 119 -#define TK_DEFAULT 120 -#define TK_NULL 121 -#define TK_PRIMARY 122 -#define TK_UNIQUE 123 -#define TK_CHECK 124 -#define TK_REFERENCES 125 -#define TK_AUTOINCR 126 -#define TK_INSERT 127 -#define TK_DELETE 128 -#define TK_UPDATE 129 -#define TK_SET 130 -#define TK_DEFERRABLE 131 -#define TK_FOREIGN 132 -#define TK_DROP 133 -#define TK_UNION 134 -#define TK_ALL 135 -#define TK_EXCEPT 136 -#define TK_INTERSECT 137 -#define TK_SELECT 138 -#define TK_VALUES 139 -#define TK_DISTINCT 140 -#define TK_DOT 141 -#define TK_FROM 142 -#define TK_JOIN 143 -#define TK_USING 144 -#define TK_ORDER 145 -#define TK_GROUP 146 -#define TK_HAVING 147 -#define TK_LIMIT 148 -#define TK_WHERE 149 -#define TK_RETURNING 150 -#define TK_INTO 151 -#define TK_NOTHING 152 -#define TK_FLOAT 153 -#define TK_BLOB 154 -#define TK_INTEGER 155 -#define TK_VARIABLE 156 -#define TK_CASE 157 -#define TK_WHEN 158 -#define TK_THEN 159 -#define TK_ELSE 160 -#define TK_INDEX 161 -#define TK_ALTER 162 -#define TK_ADD 163 -#define TK_WINDOW 164 -#define TK_OVER 165 -#define TK_FILTER 166 -#define TK_COLUMN 167 -#define TK_AGG_FUNCTION 168 -#define TK_AGG_COLUMN 169 -#define TK_TRUEFALSE 170 -#define TK_ISNOT 171 +#define TK_ISNOT 46 +#define TK_MATCH 47 +#define TK_LIKE_KW 48 +#define TK_BETWEEN 49 +#define TK_IN 50 +#define TK_ISNULL 51 +#define TK_NOTNULL 52 +#define TK_NE 53 +#define TK_EQ 54 +#define TK_GT 55 +#define TK_LE 56 +#define TK_LT 57 +#define TK_GE 58 +#define TK_ESCAPE 59 +#define TK_ID 60 +#define TK_COLUMNKW 61 +#define TK_DO 62 +#define TK_FOR 63 +#define TK_IGNORE 64 +#define TK_INITIALLY 65 +#define TK_INSTEAD 66 +#define TK_NO 67 +#define TK_KEY 68 +#define TK_OF 69 +#define TK_OFFSET 70 +#define TK_PRAGMA 71 +#define TK_RAISE 72 +#define TK_RECURSIVE 73 +#define TK_REPLACE 74 +#define TK_RESTRICT 75 +#define TK_ROW 76 +#define TK_ROWS 77 +#define TK_TRIGGER 78 +#define TK_VACUUM 79 +#define TK_VIEW 80 +#define TK_VIRTUAL 81 +#define TK_WITH 82 +#define TK_NULLS 83 +#define TK_FIRST 84 +#define TK_LAST 85 +#define TK_CURRENT 86 +#define TK_FOLLOWING 87 +#define TK_PARTITION 88 +#define TK_PRECEDING 89 +#define TK_RANGE 90 +#define TK_UNBOUNDED 91 +#define TK_EXCLUDE 92 +#define TK_GROUPS 93 +#define TK_OTHERS 94 +#define TK_TIES 95 +#define TK_GENERATED 96 +#define TK_ALWAYS 97 +#define TK_MATERIALIZED 98 +#define TK_REINDEX 99 +#define TK_RENAME 100 +#define TK_CTIME_KW 101 +#define TK_ANY 102 +#define TK_BITAND 103 +#define TK_BITOR 104 +#define TK_LSHIFT 105 +#define TK_RSHIFT 106 +#define TK_PLUS 107 +#define TK_MINUS 108 +#define TK_STAR 109 +#define TK_SLASH 110 +#define TK_REM 111 +#define TK_CONCAT 112 +#define TK_PTR 113 +#define TK_COLLATE 114 +#define TK_BITNOT 115 +#define TK_ON 116 +#define TK_INDEXED 117 +#define TK_STRING 118 +#define TK_JOIN_KW 119 +#define TK_CONSTRAINT 120 +#define TK_DEFAULT 121 +#define TK_NULL 122 +#define TK_PRIMARY 123 +#define TK_UNIQUE 124 +#define TK_CHECK 125 +#define TK_REFERENCES 126 +#define TK_AUTOINCR 127 +#define TK_INSERT 128 +#define TK_DELETE 129 +#define TK_UPDATE 130 +#define TK_SET 131 +#define TK_DEFERRABLE 132 +#define TK_FOREIGN 133 +#define TK_DROP 134 +#define TK_UNION 135 +#define TK_ALL 136 +#define TK_EXCEPT 137 +#define TK_INTERSECT 138 +#define TK_SELECT 139 +#define TK_VALUES 140 +#define TK_DISTINCT 141 +#define TK_DOT 142 +#define TK_FROM 143 +#define TK_JOIN 144 +#define TK_USING 145 +#define TK_ORDER 146 +#define TK_GROUP 147 +#define TK_HAVING 148 +#define TK_LIMIT 149 +#define TK_WHERE 150 +#define TK_RETURNING 151 +#define TK_INTO 152 +#define TK_NOTHING 153 +#define TK_FLOAT 154 +#define TK_BLOB 155 +#define TK_INTEGER 156 +#define TK_VARIABLE 157 +#define TK_CASE 158 +#define TK_WHEN 159 +#define TK_THEN 160 +#define TK_ELSE 161 +#define TK_INDEX 162 +#define TK_ALTER 163 +#define TK_ADD 164 +#define TK_WINDOW 165 +#define TK_OVER 166 +#define TK_FILTER 167 +#define TK_COLUMN 168 +#define TK_AGG_FUNCTION 169 +#define TK_AGG_COLUMN 170 +#define TK_TRUEFALSE 171 #define TK_FUNCTION 172 #define TK_UPLUS 173 #define TK_UMINUS 174 @@ -172873,7 +175702,8 @@ static void updateDeleteLimitError( #define TK_ERROR 182 #define TK_QNUMBER 183 #define TK_SPACE 184 -#define TK_ILLEGAL 185 +#define TK_COMMENT 185 +#define TK_ILLEGAL 186 #endif /**************** End token definitions ***************************************/ @@ -172938,31 +175768,31 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 322 +#define YYNOCODE 323 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 101 +#define YYWILDCARD 102 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - ExprList* yy14; - With* yy59; - Cte* yy67; - Upsert* yy122; - IdList* yy132; - int yy144; - const char* yy168; - SrcList* yy203; - Window* yy211; - OnOrUsing yy269; - struct TrigEvent yy286; - struct {int value; int mask;} yy383; - u32 yy391; - TriggerStep* yy427; - Expr* yy454; - u8 yy462; - struct FrameBound yy509; - Select* yy555; + u32 yy9; + struct TrigEvent yy28; + With* yy125; + IdList* yy204; + struct FrameBound yy205; + TriggerStep* yy319; + const char* yy342; + Cte* yy361; + ExprList* yy402; + Upsert* yy403; + OnOrUsing yy421; + u8 yy444; + struct {int value; int mask;} yy481; + Window* yy483; + int yy502; + SrcList* yy563; + Expr* yy590; + Select* yy637; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -172984,7 +175814,7 @@ typedef union { #define YYNSTATE 583 #define YYNRULE 409 #define YYNRULE_WITH_ACTION 344 -#define YYNTOKEN 186 +#define YYNTOKEN 187 #define YY_MAX_SHIFT 582 #define YY_MIN_SHIFTREDUCE 845 #define YY_MAX_SHIFTREDUCE 1253 @@ -172993,8 +175823,8 @@ typedef union { #define YY_NO_ACTION 1256 #define YY_MIN_REDUCE 1257 #define YY_MAX_REDUCE 1665 -#define YY_MIN_DSTRCTR 205 -#define YY_MAX_DSTRCTR 319 +#define YY_MIN_DSTRCTR 206 +#define YY_MAX_DSTRCTR 320 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -173077,569 +175907,582 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2142) +#define YY_ACTTAB_COUNT (2207) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 576, 128, 125, 232, 1622, 549, 576, 1290, 1281, 576, - /* 10 */ 328, 576, 1300, 212, 576, 128, 125, 232, 578, 412, - /* 20 */ 578, 391, 1542, 51, 51, 523, 405, 1293, 529, 51, - /* 30 */ 51, 983, 51, 51, 81, 81, 1107, 61, 61, 984, - /* 40 */ 1107, 1292, 380, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 50 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 1577, 412, - /* 60 */ 287, 287, 7, 287, 287, 422, 1050, 1050, 1064, 1067, - /* 70 */ 289, 556, 492, 573, 524, 561, 573, 497, 561, 482, - /* 80 */ 530, 262, 229, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 90 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 128, 125, - /* 100 */ 232, 1506, 132, 132, 132, 132, 131, 131, 130, 130, - /* 110 */ 130, 129, 126, 450, 1204, 1255, 1, 1, 582, 2, - /* 120 */ 1259, 1571, 420, 1582, 379, 320, 1174, 153, 1174, 1584, - /* 130 */ 412, 378, 1582, 543, 1341, 330, 111, 570, 570, 570, - /* 140 */ 293, 1054, 132, 132, 132, 132, 131, 131, 130, 130, - /* 150 */ 130, 129, 126, 450, 135, 136, 90, 1228, 1228, 1063, - /* 160 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 287, - /* 170 */ 287, 1204, 1205, 1204, 255, 287, 287, 510, 507, 506, - /* 180 */ 137, 455, 573, 212, 561, 447, 446, 505, 573, 1616, - /* 190 */ 561, 134, 134, 134, 134, 127, 400, 243, 132, 132, - /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 210 */ 282, 471, 345, 132, 132, 132, 132, 131, 131, 130, - /* 220 */ 130, 130, 129, 126, 450, 574, 155, 936, 936, 454, - /* 230 */ 227, 521, 1236, 412, 1236, 134, 134, 134, 134, 132, - /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 250 */ 450, 130, 130, 130, 129, 126, 450, 135, 136, 90, - /* 260 */ 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, - /* 270 */ 134, 134, 128, 125, 232, 450, 576, 412, 397, 1249, - /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, - /* 290 */ 130, 130, 129, 126, 450, 381, 387, 1204, 383, 81, - /* 300 */ 81, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, - /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, - /* 320 */ 131, 131, 130, 130, 130, 129, 126, 450, 131, 131, - /* 330 */ 130, 130, 130, 129, 126, 450, 556, 1204, 302, 319, - /* 340 */ 567, 121, 568, 480, 4, 555, 1149, 1657, 1628, 1657, - /* 350 */ 45, 128, 125, 232, 1204, 1205, 1204, 1250, 571, 1169, - /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, - /* 370 */ 126, 450, 1169, 287, 287, 1169, 1019, 576, 422, 1019, - /* 380 */ 412, 451, 1602, 582, 2, 1259, 573, 44, 561, 95, - /* 390 */ 320, 110, 153, 565, 1204, 1205, 1204, 522, 522, 1341, - /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1228, 1228, 1063, - /* 410 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 295, - /* 420 */ 1149, 1658, 1040, 1658, 1204, 1147, 319, 567, 119, 119, - /* 430 */ 343, 466, 331, 343, 287, 287, 120, 556, 451, 577, - /* 440 */ 451, 1169, 1169, 1028, 319, 567, 438, 573, 210, 561, - /* 450 */ 1339, 1451, 546, 531, 1169, 1169, 1598, 1169, 1169, 416, - /* 460 */ 319, 567, 243, 132, 132, 132, 132, 131, 131, 130, - /* 470 */ 130, 130, 129, 126, 450, 1028, 1028, 1030, 1031, 35, - /* 480 */ 44, 1204, 1205, 1204, 472, 287, 287, 1328, 412, 1307, - /* 490 */ 372, 1595, 359, 225, 454, 1204, 195, 1328, 573, 1147, - /* 500 */ 561, 1333, 1333, 274, 576, 1188, 576, 340, 46, 196, - /* 510 */ 537, 217, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, - /* 520 */ 1053, 133, 133, 134, 134, 134, 134, 19, 19, 19, - /* 530 */ 19, 412, 581, 1204, 1259, 511, 1204, 319, 567, 320, - /* 540 */ 944, 153, 425, 491, 430, 943, 1204, 488, 1341, 1450, - /* 550 */ 532, 1277, 1204, 1205, 1204, 135, 136, 90, 1228, 1228, - /* 560 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 570 */ 575, 132, 132, 132, 132, 131, 131, 130, 130, 130, - /* 580 */ 129, 126, 450, 287, 287, 528, 287, 287, 372, 1595, - /* 590 */ 1204, 1205, 1204, 1204, 1205, 1204, 573, 486, 561, 573, - /* 600 */ 889, 561, 412, 1204, 1205, 1204, 886, 40, 22, 22, - /* 610 */ 220, 243, 525, 1449, 132, 132, 132, 132, 131, 131, - /* 620 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 630 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 640 */ 134, 412, 180, 454, 1204, 879, 255, 287, 287, 510, - /* 650 */ 507, 506, 372, 1595, 1568, 1331, 1331, 576, 889, 505, - /* 660 */ 573, 44, 561, 559, 1207, 135, 136, 90, 1228, 1228, - /* 670 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 680 */ 81, 81, 422, 576, 377, 132, 132, 132, 132, 131, - /* 690 */ 131, 130, 130, 130, 129, 126, 450, 297, 287, 287, - /* 700 */ 460, 1204, 1205, 1204, 1204, 534, 19, 19, 448, 448, - /* 710 */ 448, 573, 412, 561, 230, 436, 1187, 535, 319, 567, - /* 720 */ 363, 432, 1207, 1435, 132, 132, 132, 132, 131, 131, - /* 730 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 740 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 750 */ 134, 412, 211, 949, 1169, 1041, 1110, 1110, 494, 547, - /* 760 */ 547, 1204, 1205, 1204, 7, 539, 1570, 1169, 376, 576, - /* 770 */ 1169, 5, 1204, 486, 3, 135, 136, 90, 1228, 1228, - /* 780 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 790 */ 576, 513, 19, 19, 427, 132, 132, 132, 132, 131, - /* 800 */ 131, 130, 130, 130, 129, 126, 450, 305, 1204, 433, - /* 810 */ 225, 1204, 385, 19, 19, 273, 290, 371, 516, 366, - /* 820 */ 515, 260, 412, 538, 1568, 549, 1024, 362, 437, 1204, - /* 830 */ 1205, 1204, 902, 1552, 132, 132, 132, 132, 131, 131, - /* 840 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 850 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 860 */ 134, 412, 1435, 514, 1281, 1204, 1205, 1204, 1204, 1205, - /* 870 */ 1204, 903, 48, 342, 1568, 1568, 1279, 1627, 1568, 911, - /* 880 */ 576, 129, 126, 450, 110, 135, 136, 90, 1228, 1228, - /* 890 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 900 */ 265, 576, 459, 19, 19, 132, 132, 132, 132, 131, - /* 910 */ 131, 130, 130, 130, 129, 126, 450, 1345, 204, 576, - /* 920 */ 459, 458, 50, 47, 19, 19, 49, 434, 1105, 573, - /* 930 */ 497, 561, 412, 428, 108, 1224, 1569, 1554, 376, 205, - /* 940 */ 550, 550, 81, 81, 132, 132, 132, 132, 131, 131, - /* 950 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 960 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 970 */ 134, 480, 576, 1204, 576, 1541, 412, 1435, 969, 315, - /* 980 */ 1659, 398, 284, 497, 969, 893, 1569, 1569, 376, 376, - /* 990 */ 1569, 461, 376, 1224, 459, 80, 80, 81, 81, 497, - /* 1000 */ 374, 114, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, - /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, - /* 1020 */ 131, 130, 130, 130, 129, 126, 450, 1204, 1505, 576, - /* 1030 */ 1204, 1205, 1204, 1366, 316, 486, 281, 281, 497, 431, - /* 1040 */ 557, 288, 288, 402, 1340, 471, 345, 298, 429, 573, - /* 1050 */ 576, 561, 81, 81, 573, 374, 561, 971, 386, 132, - /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 1070 */ 450, 231, 117, 81, 81, 287, 287, 231, 287, 287, - /* 1080 */ 576, 1511, 576, 1336, 1204, 1205, 1204, 139, 573, 556, - /* 1090 */ 561, 573, 412, 561, 441, 456, 969, 213, 558, 1511, - /* 1100 */ 1513, 1550, 969, 143, 143, 145, 145, 1368, 314, 478, - /* 1110 */ 444, 970, 412, 850, 851, 852, 135, 136, 90, 1228, - /* 1120 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1130 */ 134, 357, 412, 397, 1148, 304, 135, 136, 90, 1228, - /* 1140 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1150 */ 134, 1575, 323, 6, 862, 7, 135, 124, 90, 1228, - /* 1160 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1170 */ 134, 409, 408, 1511, 212, 132, 132, 132, 132, 131, - /* 1180 */ 131, 130, 130, 130, 129, 126, 450, 411, 118, 1204, - /* 1190 */ 116, 10, 352, 265, 355, 132, 132, 132, 132, 131, - /* 1200 */ 131, 130, 130, 130, 129, 126, 450, 576, 324, 306, - /* 1210 */ 576, 306, 1250, 469, 158, 132, 132, 132, 132, 131, - /* 1220 */ 131, 130, 130, 130, 129, 126, 450, 207, 1224, 1126, - /* 1230 */ 65, 65, 470, 66, 66, 412, 447, 446, 882, 531, - /* 1240 */ 335, 258, 257, 256, 1127, 1233, 1204, 1205, 1204, 327, - /* 1250 */ 1235, 874, 159, 576, 16, 480, 1085, 1040, 1234, 1128, - /* 1260 */ 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, - /* 1270 */ 134, 134, 134, 134, 1029, 576, 81, 81, 1028, 1040, - /* 1280 */ 922, 576, 463, 1236, 576, 1236, 1224, 502, 107, 1435, - /* 1290 */ 923, 6, 576, 410, 1498, 882, 1029, 480, 21, 21, - /* 1300 */ 1028, 332, 1380, 334, 53, 53, 497, 81, 81, 874, - /* 1310 */ 1028, 1028, 1030, 445, 259, 19, 19, 533, 132, 132, - /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 1330 */ 551, 301, 1028, 1028, 1030, 107, 532, 545, 121, 568, - /* 1340 */ 1188, 4, 1126, 1576, 449, 576, 462, 7, 1282, 418, - /* 1350 */ 462, 350, 1435, 576, 518, 571, 544, 1127, 121, 568, - /* 1360 */ 442, 4, 1188, 464, 533, 1180, 1223, 9, 67, 67, - /* 1370 */ 487, 576, 1128, 303, 410, 571, 54, 54, 451, 576, - /* 1380 */ 123, 944, 576, 417, 576, 333, 943, 1379, 576, 236, - /* 1390 */ 565, 576, 1574, 564, 68, 68, 7, 576, 451, 362, - /* 1400 */ 419, 182, 69, 69, 541, 70, 70, 71, 71, 540, - /* 1410 */ 565, 72, 72, 484, 55, 55, 473, 1180, 296, 1040, - /* 1420 */ 56, 56, 296, 493, 541, 119, 119, 410, 1573, 542, - /* 1430 */ 569, 418, 7, 120, 1244, 451, 577, 451, 465, 1040, - /* 1440 */ 1028, 576, 1557, 552, 476, 119, 119, 527, 259, 121, - /* 1450 */ 568, 240, 4, 120, 576, 451, 577, 451, 576, 477, - /* 1460 */ 1028, 576, 156, 576, 57, 57, 571, 576, 286, 229, - /* 1470 */ 410, 336, 1028, 1028, 1030, 1031, 35, 59, 59, 219, - /* 1480 */ 983, 60, 60, 220, 73, 73, 74, 74, 984, 451, - /* 1490 */ 75, 75, 1028, 1028, 1030, 1031, 35, 96, 216, 291, - /* 1500 */ 552, 565, 1188, 318, 395, 395, 394, 276, 392, 576, - /* 1510 */ 485, 859, 474, 1311, 410, 541, 576, 417, 1530, 1144, - /* 1520 */ 540, 399, 1188, 292, 237, 1153, 326, 38, 23, 576, - /* 1530 */ 1040, 576, 20, 20, 325, 299, 119, 119, 164, 76, - /* 1540 */ 76, 1529, 121, 568, 120, 4, 451, 577, 451, 203, - /* 1550 */ 576, 1028, 141, 141, 142, 142, 576, 322, 39, 571, - /* 1560 */ 341, 1021, 110, 264, 239, 901, 900, 423, 242, 908, - /* 1570 */ 909, 370, 173, 77, 77, 43, 479, 1310, 264, 62, - /* 1580 */ 62, 369, 451, 1028, 1028, 1030, 1031, 35, 1601, 1192, - /* 1590 */ 453, 1092, 238, 291, 565, 163, 1309, 110, 395, 395, - /* 1600 */ 394, 276, 392, 986, 987, 859, 481, 346, 264, 110, - /* 1610 */ 1032, 489, 576, 1188, 503, 1088, 261, 261, 237, 576, - /* 1620 */ 326, 121, 568, 1040, 4, 347, 1376, 413, 325, 119, - /* 1630 */ 119, 948, 319, 567, 351, 78, 78, 120, 571, 451, - /* 1640 */ 577, 451, 79, 79, 1028, 354, 356, 576, 360, 1092, - /* 1650 */ 110, 576, 974, 942, 264, 123, 457, 358, 239, 576, - /* 1660 */ 519, 451, 939, 1104, 123, 1104, 173, 576, 1032, 43, - /* 1670 */ 63, 63, 1324, 565, 168, 168, 1028, 1028, 1030, 1031, - /* 1680 */ 35, 576, 169, 169, 1308, 872, 238, 157, 1589, 576, - /* 1690 */ 86, 86, 365, 89, 568, 375, 4, 1103, 941, 1103, - /* 1700 */ 123, 576, 1040, 1389, 64, 64, 1188, 1434, 119, 119, - /* 1710 */ 571, 576, 82, 82, 563, 576, 120, 165, 451, 577, - /* 1720 */ 451, 413, 1362, 1028, 144, 144, 319, 567, 576, 1374, - /* 1730 */ 562, 498, 279, 451, 83, 83, 1439, 576, 166, 166, - /* 1740 */ 576, 1289, 554, 576, 1280, 565, 576, 12, 576, 1268, - /* 1750 */ 457, 146, 146, 1267, 576, 1028, 1028, 1030, 1031, 35, - /* 1760 */ 140, 140, 1269, 167, 167, 1609, 160, 160, 1359, 150, - /* 1770 */ 150, 149, 149, 311, 1040, 576, 312, 147, 147, 313, - /* 1780 */ 119, 119, 222, 235, 576, 1188, 396, 576, 120, 576, - /* 1790 */ 451, 577, 451, 1192, 453, 1028, 508, 291, 148, 148, - /* 1800 */ 1421, 1612, 395, 395, 394, 276, 392, 85, 85, 859, - /* 1810 */ 87, 87, 84, 84, 553, 576, 294, 576, 1426, 338, - /* 1820 */ 339, 1425, 237, 300, 326, 1416, 1409, 1028, 1028, 1030, - /* 1830 */ 1031, 35, 325, 344, 403, 483, 226, 1307, 52, 52, - /* 1840 */ 58, 58, 368, 1371, 1502, 566, 1501, 121, 568, 221, - /* 1850 */ 4, 208, 268, 209, 390, 1244, 1549, 1188, 1372, 1370, - /* 1860 */ 1369, 1547, 239, 184, 571, 233, 421, 1241, 95, 218, - /* 1870 */ 173, 1507, 193, 43, 91, 94, 178, 186, 467, 188, - /* 1880 */ 468, 1422, 13, 189, 190, 191, 501, 451, 245, 108, - /* 1890 */ 238, 401, 1428, 1427, 1430, 475, 404, 1496, 197, 565, - /* 1900 */ 14, 490, 249, 101, 1518, 496, 349, 280, 251, 201, - /* 1910 */ 353, 499, 252, 406, 1270, 253, 517, 1327, 1326, 435, - /* 1920 */ 1325, 1318, 103, 893, 1296, 413, 227, 407, 1040, 1626, - /* 1930 */ 319, 567, 1625, 1297, 119, 119, 439, 367, 1317, 1295, - /* 1940 */ 1624, 526, 120, 440, 451, 577, 451, 1594, 309, 1028, - /* 1950 */ 310, 373, 266, 267, 457, 1580, 1579, 443, 138, 1394, - /* 1960 */ 552, 1393, 11, 1483, 384, 115, 317, 1350, 109, 536, - /* 1970 */ 42, 579, 382, 214, 1349, 388, 1198, 389, 275, 277, - /* 1980 */ 278, 1028, 1028, 1030, 1031, 35, 580, 1265, 414, 1260, - /* 1990 */ 170, 415, 183, 1534, 1535, 1533, 171, 154, 307, 1532, - /* 2000 */ 846, 223, 224, 88, 452, 215, 172, 321, 234, 1102, - /* 2010 */ 152, 1188, 1100, 329, 185, 174, 1223, 925, 187, 241, - /* 2020 */ 337, 244, 1116, 192, 175, 176, 424, 426, 97, 194, - /* 2030 */ 98, 99, 100, 177, 1119, 1115, 246, 247, 161, 24, - /* 2040 */ 248, 348, 1238, 264, 1108, 250, 495, 199, 198, 15, - /* 2050 */ 861, 500, 369, 254, 504, 509, 512, 200, 102, 25, - /* 2060 */ 179, 361, 26, 364, 104, 891, 308, 162, 105, 904, - /* 2070 */ 520, 106, 1185, 1069, 1155, 17, 228, 27, 1154, 283, - /* 2080 */ 285, 263, 978, 202, 972, 123, 28, 1175, 29, 30, - /* 2090 */ 1179, 1171, 31, 1173, 1160, 41, 32, 206, 548, 33, - /* 2100 */ 110, 1178, 1083, 8, 112, 1070, 113, 1068, 1072, 34, - /* 2110 */ 1073, 560, 1125, 269, 1124, 270, 36, 18, 1194, 1033, - /* 2120 */ 873, 151, 122, 37, 393, 271, 272, 572, 181, 1193, - /* 2130 */ 1256, 1256, 1256, 935, 1256, 1256, 1256, 1256, 1256, 1256, - /* 2140 */ 1256, 1617, + /* 0 */ 130, 127, 234, 282, 282, 1328, 576, 1307, 460, 289, + /* 10 */ 289, 576, 1622, 381, 576, 1328, 573, 576, 562, 413, + /* 20 */ 1300, 1542, 573, 481, 562, 524, 460, 459, 558, 82, + /* 30 */ 82, 983, 294, 375, 51, 51, 498, 61, 61, 984, + /* 40 */ 82, 82, 1577, 137, 138, 91, 7, 1228, 1228, 1063, + /* 50 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 413, + /* 60 */ 288, 288, 182, 288, 288, 481, 536, 288, 288, 130, + /* 70 */ 127, 234, 432, 573, 525, 562, 573, 557, 562, 1290, + /* 80 */ 573, 421, 562, 137, 138, 91, 559, 1228, 1228, 1063, + /* 90 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 296, + /* 100 */ 460, 398, 1249, 134, 134, 134, 134, 133, 133, 132, + /* 110 */ 132, 132, 131, 128, 451, 451, 1050, 1050, 1064, 1067, + /* 120 */ 1255, 1, 1, 582, 2, 1259, 581, 1174, 1259, 1174, + /* 130 */ 321, 413, 155, 321, 1584, 155, 379, 112, 481, 1341, + /* 140 */ 456, 299, 1341, 134, 134, 134, 134, 133, 133, 132, + /* 150 */ 132, 132, 131, 128, 451, 137, 138, 91, 498, 1228, + /* 160 */ 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, + /* 170 */ 136, 1204, 862, 1281, 288, 288, 283, 288, 288, 523, + /* 180 */ 523, 1250, 139, 578, 7, 578, 1345, 573, 1169, 562, + /* 190 */ 573, 1054, 562, 136, 136, 136, 136, 129, 573, 547, + /* 200 */ 562, 1169, 245, 1541, 1169, 245, 133, 133, 132, 132, + /* 210 */ 132, 131, 128, 451, 302, 134, 134, 134, 134, 133, + /* 220 */ 133, 132, 132, 132, 131, 128, 451, 1575, 1204, 1205, + /* 230 */ 1204, 7, 470, 550, 455, 413, 550, 455, 130, 127, + /* 240 */ 234, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 250 */ 131, 128, 451, 136, 136, 136, 136, 538, 483, 137, + /* 260 */ 138, 91, 1019, 1228, 1228, 1063, 1066, 1053, 1053, 135, + /* 270 */ 135, 136, 136, 136, 136, 1085, 576, 1204, 132, 132, + /* 280 */ 132, 131, 128, 451, 93, 214, 134, 134, 134, 134, + /* 290 */ 133, 133, 132, 132, 132, 131, 128, 451, 401, 19, + /* 300 */ 19, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 310 */ 131, 128, 451, 1498, 426, 267, 344, 467, 332, 134, + /* 320 */ 134, 134, 134, 133, 133, 132, 132, 132, 131, 128, + /* 330 */ 451, 1281, 576, 6, 1204, 1205, 1204, 257, 576, 413, + /* 340 */ 511, 508, 507, 1279, 94, 1019, 464, 1204, 551, 551, + /* 350 */ 506, 1224, 1571, 44, 38, 51, 51, 411, 576, 413, + /* 360 */ 45, 51, 51, 137, 138, 91, 530, 1228, 1228, 1063, + /* 370 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 398, + /* 380 */ 1148, 82, 82, 137, 138, 91, 39, 1228, 1228, 1063, + /* 390 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 344, + /* 400 */ 44, 288, 288, 375, 1204, 1205, 1204, 209, 1204, 1224, + /* 410 */ 320, 567, 471, 576, 573, 576, 562, 576, 316, 264, + /* 420 */ 231, 46, 160, 134, 134, 134, 134, 133, 133, 132, + /* 430 */ 132, 132, 131, 128, 451, 303, 82, 82, 82, 82, + /* 440 */ 82, 82, 442, 134, 134, 134, 134, 133, 133, 132, + /* 450 */ 132, 132, 131, 128, 451, 1582, 544, 320, 567, 1250, + /* 460 */ 874, 1582, 380, 382, 413, 1204, 1205, 1204, 360, 182, + /* 470 */ 288, 288, 1576, 557, 1339, 557, 7, 557, 1277, 472, + /* 480 */ 346, 526, 531, 573, 556, 562, 439, 1511, 137, 138, + /* 490 */ 91, 219, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 500 */ 136, 136, 136, 136, 465, 1511, 1513, 532, 413, 288, + /* 510 */ 288, 423, 512, 288, 288, 411, 288, 288, 874, 130, + /* 520 */ 127, 234, 573, 1107, 562, 1204, 573, 1107, 562, 573, + /* 530 */ 560, 562, 137, 138, 91, 1293, 1228, 1228, 1063, 1066, + /* 540 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 134, 134, + /* 550 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 560 */ 493, 503, 1292, 1204, 257, 288, 288, 511, 508, 507, + /* 570 */ 1204, 1628, 1169, 123, 568, 275, 4, 506, 573, 1511, + /* 580 */ 562, 331, 1204, 1205, 1204, 1169, 548, 548, 1169, 261, + /* 590 */ 571, 7, 134, 134, 134, 134, 133, 133, 132, 132, + /* 600 */ 132, 131, 128, 451, 108, 533, 130, 127, 234, 1204, + /* 610 */ 448, 447, 413, 1451, 452, 983, 886, 96, 1598, 1233, + /* 620 */ 1204, 1205, 1204, 984, 1235, 1450, 565, 1204, 1205, 1204, + /* 630 */ 229, 522, 1234, 534, 1333, 1333, 137, 138, 91, 1449, + /* 640 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 650 */ 136, 136, 373, 1595, 971, 1040, 413, 1236, 418, 1236, + /* 660 */ 879, 121, 121, 948, 373, 1595, 1204, 1205, 1204, 122, + /* 670 */ 1204, 452, 577, 452, 363, 417, 1028, 882, 373, 1595, + /* 680 */ 137, 138, 91, 462, 1228, 1228, 1063, 1066, 1053, 1053, + /* 690 */ 135, 135, 136, 136, 136, 136, 134, 134, 134, 134, + /* 700 */ 133, 133, 132, 132, 132, 131, 128, 451, 1028, 1028, + /* 710 */ 1030, 1031, 35, 570, 570, 570, 197, 423, 1040, 198, + /* 720 */ 1204, 123, 568, 1204, 4, 320, 567, 1204, 1205, 1204, + /* 730 */ 40, 388, 576, 384, 882, 1029, 423, 1188, 571, 1028, + /* 740 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 750 */ 128, 451, 529, 1568, 1204, 19, 19, 1204, 575, 492, + /* 760 */ 413, 157, 452, 489, 1187, 1331, 1331, 5, 1204, 949, + /* 770 */ 431, 1028, 1028, 1030, 565, 22, 22, 1204, 1205, 1204, + /* 780 */ 1204, 1205, 1204, 477, 137, 138, 91, 212, 1228, 1228, + /* 790 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 800 */ 1188, 48, 111, 1040, 413, 1204, 213, 970, 1041, 121, + /* 810 */ 121, 1204, 1205, 1204, 1204, 1205, 1204, 122, 221, 452, + /* 820 */ 577, 452, 44, 487, 1028, 1204, 1205, 1204, 137, 138, + /* 830 */ 91, 378, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 840 */ 136, 136, 136, 136, 134, 134, 134, 134, 133, 133, + /* 850 */ 132, 132, 132, 131, 128, 451, 1028, 1028, 1030, 1031, + /* 860 */ 35, 461, 1204, 1205, 1204, 1569, 1040, 377, 214, 1149, + /* 870 */ 1657, 535, 1657, 437, 902, 320, 567, 1568, 364, 320, + /* 880 */ 567, 412, 329, 1029, 519, 1188, 3, 1028, 134, 134, + /* 890 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 900 */ 1659, 399, 1169, 307, 893, 307, 515, 576, 413, 214, + /* 910 */ 498, 944, 1024, 540, 903, 1169, 943, 392, 1169, 1028, + /* 920 */ 1028, 1030, 406, 298, 1204, 50, 1149, 1658, 413, 1658, + /* 930 */ 145, 145, 137, 138, 91, 293, 1228, 1228, 1063, 1066, + /* 940 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 1188, 1147, + /* 950 */ 514, 1568, 137, 138, 91, 1505, 1228, 1228, 1063, 1066, + /* 960 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 434, 323, + /* 970 */ 435, 539, 111, 1506, 274, 291, 372, 517, 367, 516, + /* 980 */ 262, 1204, 1205, 1204, 1574, 481, 363, 576, 7, 1569, + /* 990 */ 1568, 377, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1000 */ 132, 131, 128, 451, 1568, 576, 1147, 576, 232, 576, + /* 1010 */ 19, 19, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1020 */ 132, 131, 128, 451, 1169, 433, 576, 1207, 19, 19, + /* 1030 */ 19, 19, 19, 19, 1627, 576, 911, 1169, 47, 120, + /* 1040 */ 1169, 117, 413, 306, 498, 438, 1125, 206, 336, 19, + /* 1050 */ 19, 1435, 49, 449, 449, 449, 1368, 315, 81, 81, + /* 1060 */ 576, 304, 413, 1570, 207, 377, 137, 138, 91, 115, + /* 1070 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1080 */ 136, 136, 576, 82, 82, 1207, 137, 138, 91, 1340, + /* 1090 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1100 */ 136, 136, 1569, 386, 377, 82, 82, 463, 1126, 1552, + /* 1110 */ 333, 463, 335, 131, 128, 451, 1569, 161, 377, 16, + /* 1120 */ 317, 387, 428, 1127, 448, 447, 134, 134, 134, 134, + /* 1130 */ 133, 133, 132, 132, 132, 131, 128, 451, 1128, 576, + /* 1140 */ 1105, 10, 445, 267, 576, 1554, 134, 134, 134, 134, + /* 1150 */ 133, 133, 132, 132, 132, 131, 128, 451, 532, 576, + /* 1160 */ 922, 576, 19, 19, 576, 1573, 576, 147, 147, 7, + /* 1170 */ 923, 1236, 498, 1236, 576, 487, 413, 552, 285, 1224, + /* 1180 */ 969, 215, 82, 82, 66, 66, 1435, 67, 67, 21, + /* 1190 */ 21, 1110, 1110, 495, 334, 297, 413, 53, 53, 297, + /* 1200 */ 137, 138, 91, 119, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1210 */ 135, 135, 136, 136, 136, 136, 413, 1336, 1311, 446, + /* 1220 */ 137, 138, 91, 227, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1230 */ 135, 135, 136, 136, 136, 136, 574, 1224, 936, 936, + /* 1240 */ 137, 126, 91, 141, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1250 */ 135, 135, 136, 136, 136, 136, 533, 429, 472, 346, + /* 1260 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1270 */ 128, 451, 576, 457, 233, 343, 1435, 403, 498, 1550, + /* 1280 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1290 */ 128, 451, 576, 324, 576, 82, 82, 487, 576, 969, + /* 1300 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1310 */ 128, 451, 288, 288, 546, 68, 68, 54, 54, 553, + /* 1320 */ 413, 69, 69, 351, 6, 573, 944, 562, 410, 409, + /* 1330 */ 1435, 943, 450, 545, 260, 259, 258, 576, 158, 576, + /* 1340 */ 413, 222, 1180, 479, 969, 138, 91, 430, 1228, 1228, + /* 1350 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1360 */ 70, 70, 71, 71, 576, 1126, 91, 576, 1228, 1228, + /* 1370 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1380 */ 1127, 166, 850, 851, 852, 1282, 419, 72, 72, 108, + /* 1390 */ 73, 73, 1310, 358, 1180, 1128, 576, 305, 576, 123, + /* 1400 */ 568, 494, 4, 488, 134, 134, 134, 134, 133, 133, + /* 1410 */ 132, 132, 132, 131, 128, 451, 571, 564, 534, 55, + /* 1420 */ 55, 56, 56, 576, 134, 134, 134, 134, 133, 133, + /* 1430 */ 132, 132, 132, 131, 128, 451, 576, 1104, 233, 1104, + /* 1440 */ 452, 1602, 582, 2, 1259, 576, 57, 57, 576, 321, + /* 1450 */ 576, 155, 565, 1435, 485, 353, 576, 356, 1341, 59, + /* 1460 */ 59, 576, 44, 969, 569, 419, 576, 238, 60, 60, + /* 1470 */ 261, 74, 74, 75, 75, 287, 231, 576, 1366, 76, + /* 1480 */ 76, 1040, 420, 184, 20, 20, 576, 121, 121, 77, + /* 1490 */ 77, 97, 218, 288, 288, 122, 125, 452, 577, 452, + /* 1500 */ 143, 143, 1028, 576, 520, 576, 573, 576, 562, 144, + /* 1510 */ 144, 474, 227, 1244, 478, 123, 568, 576, 4, 320, + /* 1520 */ 567, 245, 411, 576, 443, 411, 78, 78, 62, 62, + /* 1530 */ 79, 79, 571, 319, 1028, 1028, 1030, 1031, 35, 418, + /* 1540 */ 63, 63, 576, 290, 411, 9, 80, 80, 1144, 576, + /* 1550 */ 400, 576, 486, 455, 576, 1223, 452, 576, 325, 342, + /* 1560 */ 576, 111, 576, 1188, 242, 64, 64, 473, 565, 576, + /* 1570 */ 23, 576, 170, 170, 171, 171, 576, 87, 87, 328, + /* 1580 */ 65, 65, 542, 83, 83, 146, 146, 541, 123, 568, + /* 1590 */ 341, 4, 84, 84, 168, 168, 576, 1040, 576, 148, + /* 1600 */ 148, 576, 1380, 121, 121, 571, 1021, 576, 266, 576, + /* 1610 */ 424, 122, 576, 452, 577, 452, 576, 553, 1028, 142, + /* 1620 */ 142, 169, 169, 576, 162, 162, 528, 889, 371, 452, + /* 1630 */ 152, 152, 151, 151, 1379, 149, 149, 109, 370, 150, + /* 1640 */ 150, 565, 576, 480, 576, 266, 86, 86, 576, 1092, + /* 1650 */ 1028, 1028, 1030, 1031, 35, 542, 482, 576, 266, 466, + /* 1660 */ 543, 123, 568, 1616, 4, 88, 88, 85, 85, 475, + /* 1670 */ 1040, 52, 52, 222, 901, 900, 121, 121, 571, 1188, + /* 1680 */ 58, 58, 244, 1032, 122, 889, 452, 577, 452, 908, + /* 1690 */ 909, 1028, 300, 347, 504, 111, 263, 361, 165, 111, + /* 1700 */ 111, 1088, 452, 263, 974, 1153, 266, 1092, 986, 987, + /* 1710 */ 942, 939, 125, 125, 565, 1103, 872, 1103, 159, 941, + /* 1720 */ 1309, 125, 1557, 1028, 1028, 1030, 1031, 35, 542, 337, + /* 1730 */ 1530, 205, 1529, 541, 499, 1589, 490, 348, 1376, 352, + /* 1740 */ 355, 1032, 357, 1040, 359, 1324, 1308, 366, 563, 121, + /* 1750 */ 121, 376, 1188, 1389, 1434, 1362, 280, 122, 1374, 452, + /* 1760 */ 577, 452, 167, 1439, 1028, 1289, 1280, 1268, 1267, 1269, + /* 1770 */ 1609, 1359, 312, 313, 314, 397, 12, 237, 224, 1421, + /* 1780 */ 295, 1416, 1409, 1426, 339, 484, 340, 509, 1371, 1612, + /* 1790 */ 1372, 1425, 1244, 404, 301, 228, 1028, 1028, 1030, 1031, + /* 1800 */ 35, 1601, 1192, 454, 345, 1307, 292, 369, 1502, 1501, + /* 1810 */ 270, 396, 396, 395, 277, 393, 1370, 1369, 859, 1549, + /* 1820 */ 186, 123, 568, 235, 4, 1188, 391, 210, 211, 223, + /* 1830 */ 1547, 239, 1241, 327, 422, 96, 220, 195, 571, 180, + /* 1840 */ 188, 326, 468, 469, 190, 191, 502, 192, 193, 566, + /* 1850 */ 247, 109, 1430, 491, 199, 251, 102, 281, 402, 476, + /* 1860 */ 405, 1496, 452, 497, 253, 1422, 13, 1428, 14, 1427, + /* 1870 */ 203, 1507, 241, 500, 565, 354, 407, 92, 95, 1270, + /* 1880 */ 175, 254, 518, 43, 1327, 255, 1326, 1325, 436, 1518, + /* 1890 */ 350, 1318, 104, 229, 893, 1626, 440, 441, 1625, 408, + /* 1900 */ 240, 1296, 268, 1040, 310, 269, 1297, 527, 444, 121, + /* 1910 */ 121, 368, 1295, 1594, 1624, 311, 1394, 122, 1317, 452, + /* 1920 */ 577, 452, 374, 1580, 1028, 1393, 140, 553, 11, 90, + /* 1930 */ 568, 385, 4, 116, 318, 414, 1579, 110, 1483, 537, + /* 1940 */ 320, 567, 1350, 555, 42, 579, 571, 1349, 1198, 383, + /* 1950 */ 276, 390, 216, 389, 278, 279, 1028, 1028, 1030, 1031, + /* 1960 */ 35, 172, 580, 1265, 458, 1260, 415, 416, 185, 156, + /* 1970 */ 452, 1534, 1535, 173, 1533, 1532, 89, 308, 225, 226, + /* 1980 */ 846, 174, 565, 453, 217, 1188, 322, 236, 1102, 154, + /* 1990 */ 1100, 330, 187, 176, 1223, 243, 189, 925, 338, 246, + /* 2000 */ 1116, 194, 177, 425, 178, 427, 98, 196, 99, 100, + /* 2010 */ 101, 1040, 179, 1119, 1115, 248, 249, 121, 121, 163, + /* 2020 */ 24, 250, 349, 1238, 496, 122, 1108, 452, 577, 452, + /* 2030 */ 1192, 454, 1028, 266, 292, 200, 252, 201, 861, 396, + /* 2040 */ 396, 395, 277, 393, 15, 501, 859, 370, 292, 256, + /* 2050 */ 202, 554, 505, 396, 396, 395, 277, 393, 103, 239, + /* 2060 */ 859, 327, 25, 26, 1028, 1028, 1030, 1031, 35, 326, + /* 2070 */ 362, 510, 891, 239, 365, 327, 513, 904, 105, 309, + /* 2080 */ 164, 181, 27, 326, 106, 521, 107, 1185, 1069, 1155, + /* 2090 */ 17, 1154, 230, 1188, 284, 286, 265, 204, 125, 1171, + /* 2100 */ 241, 28, 978, 972, 29, 41, 1175, 1179, 175, 1173, + /* 2110 */ 30, 43, 31, 8, 241, 1178, 32, 1160, 208, 549, + /* 2120 */ 33, 111, 175, 1083, 1070, 43, 1068, 1072, 240, 113, + /* 2130 */ 114, 34, 561, 118, 1124, 271, 1073, 36, 18, 572, + /* 2140 */ 1033, 873, 240, 124, 37, 935, 272, 273, 1617, 183, + /* 2150 */ 153, 394, 1194, 1193, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2160 */ 1256, 1256, 1256, 414, 1256, 1256, 1256, 1256, 320, 567, + /* 2170 */ 1256, 1256, 1256, 1256, 1256, 1256, 1256, 414, 1256, 1256, + /* 2180 */ 1256, 1256, 320, 567, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2190 */ 1256, 1256, 458, 1256, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2200 */ 1256, 1256, 1256, 1256, 1256, 1256, 458, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, - /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, - /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, - /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, - /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, - /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, - /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, - /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, - /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, - /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, - /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, - /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, - /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, - /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, - /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, - /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, - /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, - /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, - /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, - /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, - /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, - /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, - /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, - /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, - /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, - /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, - /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, - /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, - /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, - /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, - /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, - /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, - /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, - /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, - /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, - /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, - /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, - /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, - /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, - /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, - /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, - /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, - /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, - /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, - /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, - /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, - /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, - /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, - /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, - /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, - /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, - /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, - /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, - /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, - /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, - /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, - /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, - /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, - /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, - /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, - /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, - /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, - /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, - /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, - /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, - /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, - /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, - /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, - /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, - /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, - /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, - /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, - /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, - /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, - /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, - /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, - /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, - /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, - /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, - /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, - /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, - /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, - /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, - /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, - /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, - /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, - /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, - /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, - /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, - /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, - /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, - /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, - /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, - /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, - /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, - /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, - /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, - /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, - /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, - /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, - /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, - /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, - /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, - /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, - /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, - /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, - /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, - /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, - /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, - /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, - /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, - /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, - /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, - /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, - /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, - /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, - /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, - /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, - /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, - /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, - /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, - /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, - /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, - /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, - /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, - /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, - /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, - /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, - /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, - /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, - /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, - /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, - /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, - /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, - /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, - /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, - /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, - /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, - /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, - /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, - /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, - /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, - /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, - /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, - /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, - /* 1870 */ 78, 285, 22, 81, 296, 296, 43, 235, 18, 238, - /* 1880 */ 201, 274, 272, 238, 238, 238, 18, 59, 200, 149, - /* 1890 */ 98, 247, 274, 274, 235, 247, 247, 247, 235, 71, - /* 1900 */ 272, 201, 200, 158, 292, 62, 291, 201, 200, 22, - /* 1910 */ 201, 222, 200, 222, 201, 200, 115, 219, 219, 64, - /* 1920 */ 219, 228, 22, 126, 221, 133, 165, 222, 100, 225, - /* 1930 */ 138, 139, 225, 219, 106, 107, 24, 219, 228, 219, - /* 1940 */ 219, 307, 114, 113, 116, 117, 118, 315, 284, 121, - /* 1950 */ 284, 222, 201, 91, 162, 320, 320, 82, 148, 267, - /* 1960 */ 145, 267, 22, 279, 201, 158, 281, 251, 147, 146, - /* 1970 */ 25, 203, 250, 249, 251, 248, 13, 247, 195, 195, - /* 1980 */ 6, 153, 154, 155, 156, 157, 193, 193, 305, 193, - /* 1990 */ 208, 305, 302, 214, 214, 214, 208, 223, 223, 214, - /* 2000 */ 4, 215, 215, 214, 3, 22, 208, 163, 15, 23, - /* 2010 */ 16, 183, 23, 139, 151, 130, 25, 20, 142, 24, - /* 2020 */ 16, 144, 1, 142, 130, 130, 61, 37, 53, 151, - /* 2030 */ 53, 53, 53, 130, 116, 1, 34, 141, 5, 22, - /* 2040 */ 115, 161, 75, 25, 68, 141, 41, 115, 68, 24, - /* 2050 */ 20, 19, 131, 125, 67, 67, 96, 22, 22, 22, - /* 2060 */ 37, 23, 22, 24, 22, 59, 67, 23, 149, 28, - /* 2070 */ 22, 25, 23, 23, 23, 22, 141, 34, 97, 23, - /* 2080 */ 23, 34, 116, 22, 143, 25, 34, 75, 34, 34, - /* 2090 */ 75, 88, 34, 86, 23, 22, 34, 25, 24, 34, - /* 2100 */ 25, 93, 23, 44, 142, 23, 142, 23, 23, 22, - /* 2110 */ 11, 25, 23, 25, 23, 22, 22, 22, 1, 23, - /* 2120 */ 23, 23, 22, 22, 15, 141, 141, 25, 25, 1, - /* 2130 */ 322, 322, 322, 135, 322, 322, 322, 322, 322, 322, - /* 2140 */ 322, 141, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2320 */ 322, 322, 322, 322, 322, 322, 322, 322, + /* 0 */ 277, 278, 279, 241, 242, 225, 195, 227, 195, 241, + /* 10 */ 242, 195, 217, 221, 195, 235, 254, 195, 256, 19, + /* 20 */ 225, 298, 254, 195, 256, 206, 213, 214, 206, 218, + /* 30 */ 219, 31, 206, 195, 218, 219, 195, 218, 219, 39, + /* 40 */ 218, 219, 313, 43, 44, 45, 317, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 19, + /* 60 */ 241, 242, 195, 241, 242, 195, 255, 241, 242, 277, + /* 70 */ 278, 279, 234, 254, 255, 256, 254, 255, 256, 218, + /* 80 */ 254, 240, 256, 43, 44, 45, 264, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 271, + /* 100 */ 287, 22, 23, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 114, 114, 47, 48, 49, 50, + /* 120 */ 187, 188, 189, 190, 191, 192, 190, 87, 192, 89, + /* 130 */ 197, 19, 199, 197, 318, 199, 320, 25, 195, 206, + /* 140 */ 299, 271, 206, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 113, 114, 43, 44, 45, 195, 47, + /* 160 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 170 */ 58, 60, 21, 195, 241, 242, 215, 241, 242, 312, + /* 180 */ 313, 102, 70, 205, 317, 207, 242, 254, 77, 256, + /* 190 */ 254, 122, 256, 55, 56, 57, 58, 59, 254, 88, + /* 200 */ 256, 90, 269, 240, 93, 269, 107, 108, 109, 110, + /* 210 */ 111, 112, 113, 114, 271, 103, 104, 105, 106, 107, + /* 220 */ 108, 109, 110, 111, 112, 113, 114, 313, 117, 118, + /* 230 */ 119, 317, 81, 195, 301, 19, 195, 301, 277, 278, + /* 240 */ 279, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 250 */ 112, 113, 114, 55, 56, 57, 58, 146, 195, 43, + /* 260 */ 44, 45, 74, 47, 48, 49, 50, 51, 52, 53, + /* 270 */ 54, 55, 56, 57, 58, 124, 195, 60, 109, 110, + /* 280 */ 111, 112, 113, 114, 68, 195, 103, 104, 105, 106, + /* 290 */ 107, 108, 109, 110, 111, 112, 113, 114, 208, 218, + /* 300 */ 219, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 310 */ 112, 113, 114, 162, 233, 24, 128, 129, 130, 103, + /* 320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 330 */ 114, 195, 195, 215, 117, 118, 119, 120, 195, 19, + /* 340 */ 123, 124, 125, 207, 24, 74, 246, 60, 310, 311, + /* 350 */ 133, 60, 311, 82, 22, 218, 219, 257, 195, 19, + /* 360 */ 73, 218, 219, 43, 44, 45, 206, 47, 48, 49, + /* 370 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 22, + /* 380 */ 23, 218, 219, 43, 44, 45, 54, 47, 48, 49, + /* 390 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 128, + /* 400 */ 82, 241, 242, 195, 117, 118, 119, 289, 60, 118, + /* 410 */ 139, 140, 294, 195, 254, 195, 256, 195, 255, 259, + /* 420 */ 260, 73, 22, 103, 104, 105, 106, 107, 108, 109, + /* 430 */ 110, 111, 112, 113, 114, 206, 218, 219, 218, 219, + /* 440 */ 218, 219, 234, 103, 104, 105, 106, 107, 108, 109, + /* 450 */ 110, 111, 112, 113, 114, 318, 319, 139, 140, 102, + /* 460 */ 60, 318, 319, 221, 19, 117, 118, 119, 23, 195, + /* 470 */ 241, 242, 313, 255, 206, 255, 317, 255, 206, 129, + /* 480 */ 130, 206, 264, 254, 264, 256, 264, 195, 43, 44, + /* 490 */ 45, 151, 47, 48, 49, 50, 51, 52, 53, 54, + /* 500 */ 55, 56, 57, 58, 246, 213, 214, 19, 19, 241, + /* 510 */ 242, 195, 23, 241, 242, 257, 241, 242, 118, 277, + /* 520 */ 278, 279, 254, 29, 256, 60, 254, 33, 256, 254, + /* 530 */ 206, 256, 43, 44, 45, 218, 47, 48, 49, 50, + /* 540 */ 51, 52, 53, 54, 55, 56, 57, 58, 103, 104, + /* 550 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 560 */ 66, 19, 218, 60, 120, 241, 242, 123, 124, 125, + /* 570 */ 60, 232, 77, 19, 20, 26, 22, 133, 254, 287, + /* 580 */ 256, 265, 117, 118, 119, 90, 312, 313, 93, 47, + /* 590 */ 36, 317, 103, 104, 105, 106, 107, 108, 109, 110, + /* 600 */ 111, 112, 113, 114, 116, 117, 277, 278, 279, 60, + /* 610 */ 107, 108, 19, 276, 60, 31, 23, 152, 195, 116, + /* 620 */ 117, 118, 119, 39, 121, 276, 72, 117, 118, 119, + /* 630 */ 166, 167, 129, 145, 237, 238, 43, 44, 45, 276, + /* 640 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 650 */ 57, 58, 315, 316, 144, 101, 19, 154, 116, 156, + /* 660 */ 23, 107, 108, 109, 315, 316, 117, 118, 119, 115, + /* 670 */ 60, 117, 118, 119, 132, 200, 122, 60, 315, 316, + /* 680 */ 43, 44, 45, 272, 47, 48, 49, 50, 51, 52, + /* 690 */ 53, 54, 55, 56, 57, 58, 103, 104, 105, 106, + /* 700 */ 107, 108, 109, 110, 111, 112, 113, 114, 154, 155, + /* 710 */ 156, 157, 158, 212, 213, 214, 22, 195, 101, 22, + /* 720 */ 60, 19, 20, 60, 22, 139, 140, 117, 118, 119, + /* 730 */ 22, 251, 195, 253, 117, 118, 195, 183, 36, 122, + /* 740 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 750 */ 113, 114, 195, 195, 60, 218, 219, 60, 195, 284, + /* 760 */ 19, 25, 60, 288, 23, 237, 238, 22, 60, 109, + /* 770 */ 233, 154, 155, 156, 72, 218, 219, 117, 118, 119, + /* 780 */ 117, 118, 119, 116, 43, 44, 45, 265, 47, 48, + /* 790 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 800 */ 183, 243, 25, 101, 19, 60, 265, 144, 23, 107, + /* 810 */ 108, 117, 118, 119, 117, 118, 119, 115, 151, 117, + /* 820 */ 118, 119, 82, 195, 122, 117, 118, 119, 43, 44, + /* 830 */ 45, 195, 47, 48, 49, 50, 51, 52, 53, 54, + /* 840 */ 55, 56, 57, 58, 103, 104, 105, 106, 107, 108, + /* 850 */ 109, 110, 111, 112, 113, 114, 154, 155, 156, 157, + /* 860 */ 158, 121, 117, 118, 119, 307, 101, 309, 195, 22, + /* 870 */ 23, 195, 25, 19, 35, 139, 140, 195, 24, 139, + /* 880 */ 140, 208, 195, 118, 109, 183, 22, 122, 103, 104, + /* 890 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 900 */ 304, 305, 77, 230, 127, 232, 67, 195, 19, 195, + /* 910 */ 195, 136, 23, 88, 75, 90, 141, 203, 93, 154, + /* 920 */ 155, 156, 208, 295, 60, 243, 22, 23, 19, 25, + /* 930 */ 218, 219, 43, 44, 45, 100, 47, 48, 49, 50, + /* 940 */ 51, 52, 53, 54, 55, 56, 57, 58, 183, 102, + /* 950 */ 96, 195, 43, 44, 45, 240, 47, 48, 49, 50, + /* 960 */ 51, 52, 53, 54, 55, 56, 57, 58, 114, 134, + /* 970 */ 131, 146, 25, 286, 120, 121, 122, 123, 124, 125, + /* 980 */ 126, 117, 118, 119, 313, 195, 132, 195, 317, 307, + /* 990 */ 195, 309, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1000 */ 111, 112, 113, 114, 195, 195, 102, 195, 195, 195, + /* 1010 */ 218, 219, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1020 */ 111, 112, 113, 114, 77, 233, 195, 60, 218, 219, + /* 1030 */ 218, 219, 218, 219, 23, 195, 25, 90, 243, 159, + /* 1040 */ 93, 161, 19, 233, 195, 233, 23, 233, 16, 218, + /* 1050 */ 219, 195, 243, 212, 213, 214, 262, 263, 218, 219, + /* 1060 */ 195, 271, 19, 307, 233, 309, 43, 44, 45, 160, + /* 1070 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1080 */ 57, 58, 195, 218, 219, 118, 43, 44, 45, 240, + /* 1090 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1100 */ 57, 58, 307, 195, 309, 218, 219, 263, 12, 195, + /* 1110 */ 78, 267, 80, 112, 113, 114, 307, 22, 309, 24, + /* 1120 */ 255, 281, 266, 27, 107, 108, 103, 104, 105, 106, + /* 1130 */ 107, 108, 109, 110, 111, 112, 113, 114, 42, 195, + /* 1140 */ 11, 22, 255, 24, 195, 195, 103, 104, 105, 106, + /* 1150 */ 107, 108, 109, 110, 111, 112, 113, 114, 19, 195, + /* 1160 */ 64, 195, 218, 219, 195, 313, 195, 218, 219, 317, + /* 1170 */ 74, 154, 195, 156, 195, 195, 19, 233, 23, 60, + /* 1180 */ 25, 24, 218, 219, 218, 219, 195, 218, 219, 218, + /* 1190 */ 219, 128, 129, 130, 162, 263, 19, 218, 219, 267, + /* 1200 */ 43, 44, 45, 160, 47, 48, 49, 50, 51, 52, + /* 1210 */ 53, 54, 55, 56, 57, 58, 19, 240, 228, 255, + /* 1220 */ 43, 44, 45, 25, 47, 48, 49, 50, 51, 52, + /* 1230 */ 53, 54, 55, 56, 57, 58, 135, 118, 137, 138, + /* 1240 */ 43, 44, 45, 22, 47, 48, 49, 50, 51, 52, + /* 1250 */ 53, 54, 55, 56, 57, 58, 117, 266, 129, 130, + /* 1260 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1270 */ 113, 114, 195, 195, 119, 295, 195, 206, 195, 195, + /* 1280 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1290 */ 113, 114, 195, 195, 195, 218, 219, 195, 195, 144, + /* 1300 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1310 */ 113, 114, 241, 242, 67, 218, 219, 218, 219, 146, + /* 1320 */ 19, 218, 219, 240, 215, 254, 136, 256, 107, 108, + /* 1330 */ 195, 141, 255, 86, 128, 129, 130, 195, 165, 195, + /* 1340 */ 19, 143, 95, 272, 25, 44, 45, 266, 47, 48, + /* 1350 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1360 */ 218, 219, 218, 219, 195, 12, 45, 195, 47, 48, + /* 1370 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1380 */ 27, 23, 7, 8, 9, 210, 211, 218, 219, 116, + /* 1390 */ 218, 219, 228, 16, 147, 42, 195, 295, 195, 19, + /* 1400 */ 20, 266, 22, 294, 103, 104, 105, 106, 107, 108, + /* 1410 */ 109, 110, 111, 112, 113, 114, 36, 64, 145, 218, + /* 1420 */ 219, 218, 219, 195, 103, 104, 105, 106, 107, 108, + /* 1430 */ 109, 110, 111, 112, 113, 114, 195, 154, 119, 156, + /* 1440 */ 60, 189, 190, 191, 192, 195, 218, 219, 195, 197, + /* 1450 */ 195, 199, 72, 195, 19, 78, 195, 80, 206, 218, + /* 1460 */ 219, 195, 82, 144, 210, 211, 195, 15, 218, 219, + /* 1470 */ 47, 218, 219, 218, 219, 259, 260, 195, 261, 218, + /* 1480 */ 219, 101, 302, 303, 218, 219, 195, 107, 108, 218, + /* 1490 */ 219, 150, 151, 241, 242, 115, 25, 117, 118, 119, + /* 1500 */ 218, 219, 122, 195, 146, 195, 254, 195, 256, 218, + /* 1510 */ 219, 246, 25, 61, 246, 19, 20, 195, 22, 139, + /* 1520 */ 140, 269, 257, 195, 266, 257, 218, 219, 218, 219, + /* 1530 */ 218, 219, 36, 246, 154, 155, 156, 157, 158, 116, + /* 1540 */ 218, 219, 195, 22, 257, 49, 218, 219, 23, 195, + /* 1550 */ 25, 195, 117, 301, 195, 25, 60, 195, 195, 23, + /* 1560 */ 195, 25, 195, 183, 24, 218, 219, 130, 72, 195, + /* 1570 */ 22, 195, 218, 219, 218, 219, 195, 218, 219, 195, + /* 1580 */ 218, 219, 86, 218, 219, 218, 219, 91, 19, 20, + /* 1590 */ 153, 22, 218, 219, 218, 219, 195, 101, 195, 218, + /* 1600 */ 219, 195, 195, 107, 108, 36, 23, 195, 25, 195, + /* 1610 */ 62, 115, 195, 117, 118, 119, 195, 146, 122, 218, + /* 1620 */ 219, 218, 219, 195, 218, 219, 19, 60, 122, 60, + /* 1630 */ 218, 219, 218, 219, 195, 218, 219, 150, 132, 218, + /* 1640 */ 219, 72, 195, 23, 195, 25, 218, 219, 195, 60, + /* 1650 */ 154, 155, 156, 157, 158, 86, 23, 195, 25, 195, + /* 1660 */ 91, 19, 20, 142, 22, 218, 219, 218, 219, 130, + /* 1670 */ 101, 218, 219, 143, 121, 122, 107, 108, 36, 183, + /* 1680 */ 218, 219, 142, 60, 115, 118, 117, 118, 119, 7, + /* 1690 */ 8, 122, 153, 23, 23, 25, 25, 23, 23, 25, + /* 1700 */ 25, 23, 60, 25, 23, 98, 25, 118, 84, 85, + /* 1710 */ 23, 23, 25, 25, 72, 154, 23, 156, 25, 23, + /* 1720 */ 228, 25, 195, 154, 155, 156, 157, 158, 86, 195, + /* 1730 */ 195, 258, 195, 91, 291, 322, 195, 195, 195, 195, + /* 1740 */ 195, 118, 195, 101, 195, 195, 195, 195, 238, 107, + /* 1750 */ 108, 195, 183, 195, 195, 195, 290, 115, 195, 117, + /* 1760 */ 118, 119, 244, 195, 122, 195, 195, 195, 195, 195, + /* 1770 */ 195, 258, 258, 258, 258, 193, 245, 300, 216, 274, + /* 1780 */ 247, 270, 270, 274, 296, 296, 248, 222, 262, 198, + /* 1790 */ 262, 274, 61, 274, 248, 231, 154, 155, 156, 157, + /* 1800 */ 158, 0, 1, 2, 247, 227, 5, 221, 221, 221, + /* 1810 */ 142, 10, 11, 12, 13, 14, 262, 262, 17, 202, + /* 1820 */ 300, 19, 20, 300, 22, 183, 247, 251, 251, 245, + /* 1830 */ 202, 30, 38, 32, 202, 152, 151, 22, 36, 43, + /* 1840 */ 236, 40, 18, 202, 239, 239, 18, 239, 239, 283, + /* 1850 */ 201, 150, 236, 202, 236, 201, 159, 202, 248, 248, + /* 1860 */ 248, 248, 60, 63, 201, 275, 273, 275, 273, 275, + /* 1870 */ 22, 286, 71, 223, 72, 202, 223, 297, 297, 202, + /* 1880 */ 79, 201, 116, 82, 220, 201, 220, 220, 65, 293, + /* 1890 */ 292, 229, 22, 166, 127, 226, 24, 114, 226, 223, + /* 1900 */ 99, 222, 202, 101, 285, 92, 220, 308, 83, 107, + /* 1910 */ 108, 220, 220, 316, 220, 285, 268, 115, 229, 117, + /* 1920 */ 118, 119, 223, 321, 122, 268, 149, 146, 22, 19, + /* 1930 */ 20, 202, 22, 159, 282, 134, 321, 148, 280, 147, + /* 1940 */ 139, 140, 252, 141, 25, 204, 36, 252, 13, 251, + /* 1950 */ 196, 248, 250, 249, 196, 6, 154, 155, 156, 157, + /* 1960 */ 158, 209, 194, 194, 163, 194, 306, 306, 303, 224, + /* 1970 */ 60, 215, 215, 209, 215, 215, 215, 224, 216, 216, + /* 1980 */ 4, 209, 72, 3, 22, 183, 164, 15, 23, 16, + /* 1990 */ 23, 140, 152, 131, 25, 24, 143, 20, 16, 145, + /* 2000 */ 1, 143, 131, 62, 131, 37, 54, 152, 54, 54, + /* 2010 */ 54, 101, 131, 117, 1, 34, 142, 107, 108, 5, + /* 2020 */ 22, 116, 162, 76, 41, 115, 69, 117, 118, 119, + /* 2030 */ 1, 2, 122, 25, 5, 69, 142, 116, 20, 10, + /* 2040 */ 11, 12, 13, 14, 24, 19, 17, 132, 5, 126, + /* 2050 */ 22, 141, 68, 10, 11, 12, 13, 14, 22, 30, + /* 2060 */ 17, 32, 22, 22, 154, 155, 156, 157, 158, 40, + /* 2070 */ 23, 68, 60, 30, 24, 32, 97, 28, 22, 68, + /* 2080 */ 23, 37, 34, 40, 150, 22, 25, 23, 23, 23, + /* 2090 */ 22, 98, 142, 183, 23, 23, 34, 22, 25, 89, + /* 2100 */ 71, 34, 117, 144, 34, 22, 76, 76, 79, 87, + /* 2110 */ 34, 82, 34, 44, 71, 94, 34, 23, 25, 24, + /* 2120 */ 34, 25, 79, 23, 23, 82, 23, 23, 99, 143, + /* 2130 */ 143, 22, 25, 25, 23, 22, 11, 22, 22, 25, + /* 2140 */ 23, 23, 99, 22, 22, 136, 142, 142, 142, 25, + /* 2150 */ 23, 15, 1, 1, 323, 323, 323, 323, 323, 323, + /* 2160 */ 323, 323, 323, 134, 323, 323, 323, 323, 139, 140, + /* 2170 */ 323, 323, 323, 323, 323, 323, 323, 134, 323, 323, + /* 2180 */ 323, 323, 139, 140, 323, 323, 323, 323, 323, 323, + /* 2190 */ 323, 323, 163, 323, 323, 323, 323, 323, 323, 323, + /* 2200 */ 323, 323, 323, 323, 323, 323, 163, 323, 323, 323, + /* 2210 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2220 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2230 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2240 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2250 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2260 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2270 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2280 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2290 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2300 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2310 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2320 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2330 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2340 */ 323, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2350 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2360 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2370 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2380 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2390 */ 187, 187, 187, 187, }; #define YY_SHIFT_COUNT (582) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2128) +#define YY_SHIFT_MAX (2152) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, - /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, - /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, - /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, - /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, - /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, - /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, - /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, - /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2142, 2142, - /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, - /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, - /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, - /* 210 */ 434, 434, 605, 605, 1298, 2142, 2142, 2142, 2142, 2142, - /* 220 */ 2142, 2142, 1179, 1157, 1157, 487, 527, 585, 645, 749, - /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, - /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, - /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, - /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, - /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, - /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, - /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, - /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, - /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, - /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, - /* 330 */ 1850, 1833, 1860, 1860, 1860, 1860, 1711, 1868, 1740, 1719, - /* 340 */ 1719, 1740, 1850, 1833, 1740, 1833, 1740, 1711, 1868, 1745, - /* 350 */ 1843, 1711, 1868, 1887, 1711, 1868, 1711, 1868, 1887, 1801, - /* 360 */ 1801, 1801, 1855, 1900, 1900, 1887, 1801, 1797, 1801, 1855, - /* 370 */ 1801, 1801, 1761, 1912, 1830, 1830, 1887, 1711, 1862, 1862, - /* 380 */ 1875, 1875, 1810, 1815, 1940, 1711, 1807, 1810, 1821, 1823, - /* 390 */ 1740, 1945, 1963, 1963, 1974, 1974, 1974, 2142, 2142, 2142, - /* 400 */ 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, - /* 410 */ 2142, 2142, 20, 1224, 256, 1111, 1115, 1114, 1192, 1496, - /* 420 */ 1424, 1505, 1427, 355, 1383, 1537, 1506, 1538, 1553, 1583, - /* 430 */ 1584, 1591, 1625, 541, 1445, 1562, 1450, 1572, 1515, 1428, - /* 440 */ 1532, 1592, 1629, 1520, 1630, 1639, 1510, 1544, 1662, 1675, - /* 450 */ 1551, 48, 1996, 2001, 1983, 1844, 1993, 1994, 1986, 1989, - /* 460 */ 1874, 1863, 1885, 1991, 1991, 1995, 1876, 1997, 1877, 2004, - /* 470 */ 2021, 1881, 1894, 1991, 1895, 1965, 1990, 1991, 1878, 1975, - /* 480 */ 1977, 1978, 1979, 1903, 1918, 2002, 1896, 2034, 2033, 2017, - /* 490 */ 1925, 1880, 1976, 2018, 1980, 1967, 2005, 1904, 1932, 2025, - /* 500 */ 2030, 2032, 1921, 1928, 2035, 1987, 2036, 2037, 2038, 2040, - /* 510 */ 1988, 2006, 2039, 1960, 2041, 2042, 1999, 2023, 2044, 2043, - /* 520 */ 1919, 2048, 2049, 2050, 2046, 2051, 2053, 1981, 1935, 2056, - /* 530 */ 2057, 1966, 2047, 2061, 1941, 2060, 2052, 2054, 2055, 2058, - /* 540 */ 2003, 2012, 2007, 2059, 2015, 2008, 2062, 2071, 2073, 2074, - /* 550 */ 2072, 2075, 2065, 1962, 1964, 2079, 2060, 2082, 2084, 2085, - /* 560 */ 2087, 2086, 2089, 2088, 2091, 2093, 2099, 2094, 2095, 2096, - /* 570 */ 2097, 2100, 2101, 2102, 1998, 1984, 1985, 2000, 2103, 2098, - /* 580 */ 2109, 2117, 2128, + /* 0 */ 2029, 1801, 2043, 1380, 1380, 318, 271, 1496, 1569, 1642, + /* 10 */ 702, 702, 702, 740, 318, 318, 318, 318, 318, 0, + /* 20 */ 0, 216, 1177, 702, 702, 702, 702, 702, 702, 702, + /* 30 */ 702, 702, 702, 702, 702, 702, 702, 702, 503, 503, + /* 40 */ 111, 111, 217, 287, 348, 610, 610, 736, 736, 736, + /* 50 */ 736, 40, 112, 320, 340, 445, 489, 593, 637, 741, + /* 60 */ 785, 889, 909, 1023, 1043, 1157, 1177, 1177, 1177, 1177, + /* 70 */ 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, + /* 80 */ 1177, 1177, 1177, 1177, 1197, 1177, 1301, 1321, 1321, 554, + /* 90 */ 1802, 1910, 702, 702, 702, 702, 702, 702, 702, 702, + /* 100 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 110 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 120 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 130 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 140 */ 702, 702, 138, 198, 198, 198, 198, 198, 198, 198, + /* 150 */ 183, 99, 169, 549, 610, 151, 542, 610, 610, 1017, + /* 160 */ 1017, 610, 1001, 350, 464, 464, 464, 586, 1, 1, + /* 170 */ 2207, 2207, 854, 854, 854, 465, 694, 694, 694, 694, + /* 180 */ 1096, 1096, 825, 549, 847, 904, 610, 610, 610, 610, + /* 190 */ 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, + /* 200 */ 610, 610, 610, 610, 610, 488, 947, 947, 610, 1129, + /* 210 */ 495, 495, 1139, 1139, 967, 967, 1173, 2207, 2207, 2207, + /* 220 */ 2207, 2207, 2207, 2207, 617, 765, 765, 697, 444, 708, + /* 230 */ 660, 745, 510, 663, 864, 610, 610, 610, 610, 610, + /* 240 */ 610, 610, 610, 610, 610, 188, 610, 610, 610, 610, + /* 250 */ 610, 610, 610, 610, 610, 610, 610, 610, 839, 839, + /* 260 */ 839, 610, 610, 610, 1155, 610, 610, 610, 1119, 1247, + /* 270 */ 610, 1353, 610, 610, 610, 610, 610, 610, 610, 610, + /* 280 */ 1063, 494, 1101, 291, 291, 291, 291, 1319, 1101, 1101, + /* 290 */ 775, 1221, 1375, 1452, 667, 1341, 1198, 1341, 1435, 1487, + /* 300 */ 667, 667, 1487, 667, 1198, 1435, 777, 1011, 1423, 584, + /* 310 */ 584, 584, 1273, 1273, 1273, 1273, 1471, 1471, 880, 1530, + /* 320 */ 1190, 1095, 1731, 1731, 1668, 1668, 1794, 1794, 1668, 1683, + /* 330 */ 1685, 1815, 1796, 1824, 1824, 1824, 1824, 1668, 1828, 1701, + /* 340 */ 1685, 1685, 1701, 1815, 1796, 1701, 1796, 1701, 1668, 1828, + /* 350 */ 1697, 1800, 1668, 1828, 1848, 1668, 1828, 1668, 1828, 1848, + /* 360 */ 1766, 1766, 1766, 1823, 1870, 1870, 1848, 1766, 1767, 1766, + /* 370 */ 1823, 1766, 1766, 1727, 1872, 1783, 1783, 1848, 1668, 1813, + /* 380 */ 1813, 1825, 1825, 1777, 1781, 1906, 1668, 1774, 1777, 1789, + /* 390 */ 1792, 1701, 1919, 1935, 1935, 1949, 1949, 1949, 2207, 2207, + /* 400 */ 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, + /* 410 */ 2207, 2207, 2207, 69, 1032, 79, 357, 1377, 1206, 400, + /* 420 */ 1525, 835, 332, 1540, 1437, 1539, 1536, 1548, 1583, 1620, + /* 430 */ 1633, 1670, 1671, 1674, 1567, 1553, 1682, 1506, 1675, 1358, + /* 440 */ 1607, 1589, 1678, 1681, 1624, 1687, 1688, 1283, 1561, 1693, + /* 450 */ 1696, 1623, 1521, 1976, 1980, 1962, 1822, 1972, 1973, 1965, + /* 460 */ 1967, 1851, 1840, 1862, 1969, 1969, 1971, 1853, 1977, 1854, + /* 470 */ 1982, 1999, 1858, 1871, 1969, 1873, 1941, 1968, 1969, 1855, + /* 480 */ 1952, 1954, 1955, 1956, 1881, 1896, 1981, 1874, 2013, 2014, + /* 490 */ 1998, 1905, 1860, 1957, 2008, 1966, 1947, 1983, 1894, 1921, + /* 500 */ 2020, 2018, 2026, 1915, 1923, 2028, 1984, 2036, 2040, 2047, + /* 510 */ 2041, 2003, 2012, 2050, 1979, 2049, 2056, 2011, 2044, 2057, + /* 520 */ 2048, 1934, 2063, 2064, 2065, 2061, 2066, 2068, 1993, 1950, + /* 530 */ 2071, 2072, 1985, 2062, 2075, 1959, 2073, 2067, 2070, 2076, + /* 540 */ 2078, 2010, 2030, 2022, 2069, 2031, 2021, 2082, 2094, 2083, + /* 550 */ 2095, 2093, 2096, 2086, 1986, 1987, 2100, 2073, 2101, 2103, + /* 560 */ 2104, 2109, 2107, 2108, 2111, 2113, 2125, 2115, 2116, 2117, + /* 570 */ 2118, 2121, 2122, 2114, 2009, 2004, 2005, 2006, 2124, 2127, + /* 580 */ 2136, 2151, 2152, }; -#define YY_REDUCE_COUNT (411) -#define YY_REDUCE_MIN (-275) -#define YY_REDUCE_MAX (1798) +#define YY_REDUCE_COUNT (412) +#define YY_REDUCE_MIN (-277) +#define YY_REDUCE_MAX (1772) static const short yy_reduce_ofst[] = { - /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, - /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, - /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, - /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, - /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, - /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, - /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, - /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, - /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, - /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, - /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, - /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, - /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, - /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, - /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, - /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, - /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, - /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, - /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, - /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, - /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, - /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, - /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, - /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, - /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, - /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, - /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, - /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, - /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, - /* 330 */ 1610, 1642, 1641, 1645, 1646, 1647, 1679, 1688, 1644, 1618, - /* 340 */ 1619, 1648, 1628, 1659, 1649, 1663, 1650, 1700, 1702, 1612, - /* 350 */ 1615, 1706, 1708, 1689, 1709, 1712, 1713, 1715, 1691, 1698, - /* 360 */ 1699, 1701, 1693, 1704, 1707, 1705, 1714, 1703, 1718, 1710, - /* 370 */ 1720, 1721, 1632, 1634, 1664, 1666, 1729, 1751, 1635, 1636, - /* 380 */ 1692, 1694, 1716, 1722, 1684, 1763, 1685, 1723, 1724, 1727, - /* 390 */ 1730, 1768, 1783, 1784, 1793, 1794, 1796, 1683, 1686, 1690, - /* 400 */ 1782, 1779, 1780, 1781, 1785, 1788, 1774, 1775, 1786, 1787, - /* 410 */ 1789, 1798, + /* 0 */ -67, 1252, -64, -178, -181, 160, 1071, 143, -184, 137, + /* 10 */ 218, 220, 222, -174, 229, 268, 272, 275, 324, -208, + /* 20 */ 242, -277, -39, 81, 537, 792, 810, 812, -189, 814, + /* 30 */ 831, 163, 865, 944, 887, 840, 964, 1077, -187, 292, + /* 40 */ -133, 274, 673, 558, 682, 795, 809, -238, -232, -238, + /* 50 */ -232, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 60 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 70 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 80 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 557, + /* 90 */ 712, 949, 966, 969, 971, 979, 1097, 1099, 1103, 1142, + /* 100 */ 1144, 1169, 1172, 1201, 1203, 1228, 1241, 1250, 1253, 1255, + /* 110 */ 1261, 1266, 1271, 1282, 1291, 1308, 1310, 1312, 1322, 1328, + /* 120 */ 1347, 1354, 1356, 1359, 1362, 1365, 1367, 1374, 1376, 1381, + /* 130 */ 1401, 1403, 1406, 1412, 1414, 1417, 1421, 1428, 1447, 1449, + /* 140 */ 1453, 1462, 329, 329, 329, 329, 329, 329, 329, 329, + /* 150 */ 329, 329, 329, -22, -159, 475, -220, 756, 38, 501, + /* 160 */ 841, 714, 329, 118, 337, 349, 363, -56, 329, 329, + /* 170 */ 329, 329, -205, -205, -205, 687, -172, -130, -57, 790, + /* 180 */ 397, 528, -271, 136, 596, 596, 90, 316, 522, 541, + /* 190 */ -37, 715, 849, 977, 628, 856, 980, 991, 1081, 1102, + /* 200 */ 1135, 1083, -162, 208, 1258, 794, -86, 159, 41, 1109, + /* 210 */ 671, 852, 844, 932, 1175, 1254, 480, 1180, 100, 258, + /* 220 */ 1265, 1268, 1216, 1287, -139, 317, 344, 63, 339, 423, + /* 230 */ 563, 636, 676, 813, 908, 914, 950, 1078, 1084, 1098, + /* 240 */ 1363, 1384, 1407, 1439, 1464, 411, 1527, 1534, 1535, 1537, + /* 250 */ 1541, 1542, 1543, 1544, 1545, 1547, 1549, 1550, 990, 1164, + /* 260 */ 1492, 1551, 1552, 1556, 1217, 1558, 1559, 1560, 1473, 1413, + /* 270 */ 1563, 1510, 1568, 563, 1570, 1571, 1572, 1573, 1574, 1575, + /* 280 */ 1443, 1466, 1518, 1513, 1514, 1515, 1516, 1217, 1518, 1518, + /* 290 */ 1531, 1562, 1582, 1477, 1505, 1511, 1533, 1512, 1488, 1538, + /* 300 */ 1509, 1517, 1546, 1519, 1557, 1489, 1565, 1564, 1578, 1586, + /* 310 */ 1587, 1588, 1526, 1528, 1554, 1555, 1576, 1577, 1566, 1579, + /* 320 */ 1584, 1591, 1520, 1523, 1617, 1628, 1580, 1581, 1632, 1585, + /* 330 */ 1590, 1593, 1604, 1605, 1606, 1608, 1609, 1641, 1649, 1610, + /* 340 */ 1592, 1594, 1611, 1595, 1616, 1612, 1618, 1613, 1651, 1654, + /* 350 */ 1596, 1598, 1655, 1663, 1650, 1673, 1680, 1677, 1684, 1653, + /* 360 */ 1664, 1666, 1667, 1662, 1669, 1672, 1676, 1686, 1679, 1691, + /* 370 */ 1689, 1692, 1694, 1597, 1599, 1619, 1630, 1699, 1700, 1602, + /* 380 */ 1615, 1648, 1657, 1690, 1698, 1658, 1729, 1652, 1695, 1702, + /* 390 */ 1704, 1703, 1741, 1754, 1758, 1768, 1769, 1771, 1660, 1661, + /* 400 */ 1665, 1752, 1756, 1757, 1759, 1760, 1764, 1745, 1753, 1762, + /* 410 */ 1763, 1761, 1772, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 1663, 1663, 1663, 1491, 1254, 1367, 1254, 1254, 1254, 1254, @@ -173648,57 +176491,57 @@ static const YYACTIONTYPE yy_default[] = { /* 30 */ 1254, 1254, 1254, 1254, 1254, 1490, 1254, 1254, 1254, 1254, /* 40 */ 1578, 1578, 1254, 1254, 1254, 1254, 1254, 1563, 1562, 1254, /* 50 */ 1254, 1254, 1406, 1254, 1413, 1254, 1254, 1254, 1254, 1254, - /* 60 */ 1492, 1493, 1254, 1254, 1254, 1543, 1545, 1508, 1420, 1419, - /* 70 */ 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, 1486, - /* 80 */ 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, 1254, + /* 60 */ 1492, 1493, 1254, 1254, 1254, 1254, 1543, 1545, 1508, 1420, + /* 70 */ 1419, 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, + /* 80 */ 1486, 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, /* 90 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 100 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 110 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 120 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 130 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 140 */ 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, 1456, 1458, - /* 150 */ 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, 1254, 1254, - /* 160 */ 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, 1473, 1472, - /* 170 */ 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, 1254, 1254, - /* 180 */ 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 140 */ 1254, 1254, 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, + /* 150 */ 1456, 1458, 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, + /* 160 */ 1254, 1254, 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, + /* 170 */ 1473, 1472, 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, + /* 180 */ 1254, 1254, 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 190 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 200 */ 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, 1578, 1578, - /* 210 */ 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, 1358, 1358, - /* 220 */ 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, 1546, 1254, - /* 240 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 200 */ 1254, 1254, 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, + /* 210 */ 1578, 1578, 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, + /* 220 */ 1358, 1358, 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, + /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, + /* 240 */ 1546, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 250 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, 1254, 1254, - /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, 1254, - /* 280 */ 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, 1357, - /* 290 */ 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, 1423, - /* 300 */ 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, 1397, - /* 310 */ 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, 1357, - /* 320 */ 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, 1638, - /* 330 */ 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, 1638, - /* 340 */ 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, 1525, - /* 350 */ 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, 1330, - /* 360 */ 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, 1319, - /* 370 */ 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, 1588, - /* 380 */ 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, 1401, - /* 390 */ 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, 1558, - /* 400 */ 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, 1288, - /* 410 */ 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, 1254, - /* 420 */ 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1564, - /* 440 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 450 */ 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, 1254, - /* 460 */ 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, 1254, - /* 470 */ 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, 1254, - /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, 1254, - /* 490 */ 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, 1254, + /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, + /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, + /* 280 */ 1254, 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, + /* 290 */ 1357, 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, + /* 300 */ 1423, 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, + /* 310 */ 1397, 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, + /* 320 */ 1357, 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, + /* 330 */ 1638, 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, + /* 340 */ 1638, 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, + /* 350 */ 1525, 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, + /* 360 */ 1330, 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, + /* 370 */ 1319, 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, + /* 380 */ 1588, 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, + /* 390 */ 1401, 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, + /* 400 */ 1558, 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, + /* 410 */ 1288, 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, + /* 420 */ 1254, 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, + /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 440 */ 1564, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 450 */ 1254, 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, + /* 460 */ 1254, 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, + /* 470 */ 1254, 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, + /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, + /* 490 */ 1254, 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, /* 500 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 510 */ 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 510 */ 1254, 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 520 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 530 */ 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, 1254, + /* 530 */ 1254, 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, /* 540 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 550 */ 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, 1254, - /* 560 */ 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 550 */ 1254, 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, + /* 560 */ 1254, 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 570 */ 1254, 1254, 1254, 1634, 1346, 1438, 1254, 1441, 1276, 1254, /* 580 */ 1266, 1254, 1254, }; @@ -173722,52 +176565,53 @@ static const YYACTIONTYPE yy_default[] = { static const YYCODETYPE yyFallback[] = { 0, /* $ => nothing */ 0, /* SEMI => nothing */ - 59, /* EXPLAIN => ID */ - 59, /* QUERY => ID */ - 59, /* PLAN => ID */ - 59, /* BEGIN => ID */ + 60, /* EXPLAIN => ID */ + 60, /* QUERY => ID */ + 60, /* PLAN => ID */ + 60, /* BEGIN => ID */ 0, /* TRANSACTION => nothing */ - 59, /* DEFERRED => ID */ - 59, /* IMMEDIATE => ID */ - 59, /* EXCLUSIVE => ID */ + 60, /* DEFERRED => ID */ + 60, /* IMMEDIATE => ID */ + 60, /* EXCLUSIVE => ID */ 0, /* COMMIT => nothing */ - 59, /* END => ID */ - 59, /* ROLLBACK => ID */ - 59, /* SAVEPOINT => ID */ - 59, /* RELEASE => ID */ + 60, /* END => ID */ + 60, /* ROLLBACK => ID */ + 60, /* SAVEPOINT => ID */ + 60, /* RELEASE => ID */ 0, /* TO => nothing */ 0, /* TABLE => nothing */ 0, /* CREATE => nothing */ - 59, /* IF => ID */ + 60, /* IF => ID */ 0, /* NOT => nothing */ 0, /* EXISTS => nothing */ - 59, /* TEMP => ID */ + 60, /* TEMP => ID */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ 0, /* COMMA => nothing */ - 59, /* WITHOUT => ID */ - 59, /* ABORT => ID */ - 59, /* ACTION => ID */ - 59, /* AFTER => ID */ - 59, /* ANALYZE => ID */ - 59, /* ASC => ID */ - 59, /* ATTACH => ID */ - 59, /* BEFORE => ID */ - 59, /* BY => ID */ - 59, /* CASCADE => ID */ - 59, /* CAST => ID */ - 59, /* CONFLICT => ID */ - 59, /* DATABASE => ID */ - 59, /* DESC => ID */ - 59, /* DETACH => ID */ - 59, /* EACH => ID */ - 59, /* FAIL => ID */ + 60, /* WITHOUT => ID */ + 60, /* ABORT => ID */ + 60, /* ACTION => ID */ + 60, /* AFTER => ID */ + 60, /* ANALYZE => ID */ + 60, /* ASC => ID */ + 60, /* ATTACH => ID */ + 60, /* BEFORE => ID */ + 60, /* BY => ID */ + 60, /* CASCADE => ID */ + 60, /* CAST => ID */ + 60, /* CONFLICT => ID */ + 60, /* DATABASE => ID */ + 60, /* DESC => ID */ + 60, /* DETACH => ID */ + 60, /* EACH => ID */ + 60, /* FAIL => ID */ 0, /* OR => nothing */ 0, /* AND => nothing */ 0, /* IS => nothing */ - 59, /* MATCH => ID */ - 59, /* LIKE_KW => ID */ + 0, /* ISNOT => nothing */ + 60, /* MATCH => ID */ + 60, /* LIKE_KW => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ 0, /* ISNULL => nothing */ @@ -173780,47 +176624,47 @@ static const YYCODETYPE yyFallback[] = { 0, /* GE => nothing */ 0, /* ESCAPE => nothing */ 0, /* ID => nothing */ - 59, /* COLUMNKW => ID */ - 59, /* DO => ID */ - 59, /* FOR => ID */ - 59, /* IGNORE => ID */ - 59, /* INITIALLY => ID */ - 59, /* INSTEAD => ID */ - 59, /* NO => ID */ - 59, /* KEY => ID */ - 59, /* OF => ID */ - 59, /* OFFSET => ID */ - 59, /* PRAGMA => ID */ - 59, /* RAISE => ID */ - 59, /* RECURSIVE => ID */ - 59, /* REPLACE => ID */ - 59, /* RESTRICT => ID */ - 59, /* ROW => ID */ - 59, /* ROWS => ID */ - 59, /* TRIGGER => ID */ - 59, /* VACUUM => ID */ - 59, /* VIEW => ID */ - 59, /* VIRTUAL => ID */ - 59, /* WITH => ID */ - 59, /* NULLS => ID */ - 59, /* FIRST => ID */ - 59, /* LAST => ID */ - 59, /* CURRENT => ID */ - 59, /* FOLLOWING => ID */ - 59, /* PARTITION => ID */ - 59, /* PRECEDING => ID */ - 59, /* RANGE => ID */ - 59, /* UNBOUNDED => ID */ - 59, /* EXCLUDE => ID */ - 59, /* GROUPS => ID */ - 59, /* OTHERS => ID */ - 59, /* TIES => ID */ - 59, /* GENERATED => ID */ - 59, /* ALWAYS => ID */ - 59, /* MATERIALIZED => ID */ - 59, /* REINDEX => ID */ - 59, /* RENAME => ID */ - 59, /* CTIME_KW => ID */ + 60, /* COLUMNKW => ID */ + 60, /* DO => ID */ + 60, /* FOR => ID */ + 60, /* IGNORE => ID */ + 60, /* INITIALLY => ID */ + 60, /* INSTEAD => ID */ + 60, /* NO => ID */ + 60, /* KEY => ID */ + 60, /* OF => ID */ + 60, /* OFFSET => ID */ + 60, /* PRAGMA => ID */ + 60, /* RAISE => ID */ + 60, /* RECURSIVE => ID */ + 60, /* REPLACE => ID */ + 60, /* RESTRICT => ID */ + 60, /* ROW => ID */ + 60, /* ROWS => ID */ + 60, /* TRIGGER => ID */ + 60, /* VACUUM => ID */ + 60, /* VIEW => ID */ + 60, /* VIRTUAL => ID */ + 60, /* WITH => ID */ + 60, /* NULLS => ID */ + 60, /* FIRST => ID */ + 60, /* LAST => ID */ + 60, /* CURRENT => ID */ + 60, /* FOLLOWING => ID */ + 60, /* PARTITION => ID */ + 60, /* PRECEDING => ID */ + 60, /* RANGE => ID */ + 60, /* UNBOUNDED => ID */ + 60, /* EXCLUDE => ID */ + 60, /* GROUPS => ID */ + 60, /* OTHERS => ID */ + 60, /* TIES => ID */ + 60, /* GENERATED => ID */ + 60, /* ALWAYS => ID */ + 60, /* MATERIALIZED => ID */ + 60, /* REINDEX => ID */ + 60, /* RENAME => ID */ + 60, /* CTIME_KW => ID */ 0, /* ANY => nothing */ 0, /* BITAND => nothing */ 0, /* BITOR => nothing */ @@ -173891,7 +176735,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* AGG_FUNCTION => nothing */ 0, /* AGG_COLUMN => nothing */ 0, /* TRUEFALSE => nothing */ - 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ 0, /* UPLUS => nothing */ 0, /* UMINUS => nothing */ @@ -173905,6 +176748,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ERROR => nothing */ 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ + 0, /* COMMENT => nothing */ 0, /* ILLEGAL => nothing */ }; #endif /* YYFALLBACK */ @@ -174035,132 +176879,132 @@ static const char *const yyTokenName[] = { /* 43 */ "OR", /* 44 */ "AND", /* 45 */ "IS", - /* 46 */ "MATCH", - /* 47 */ "LIKE_KW", - /* 48 */ "BETWEEN", - /* 49 */ "IN", - /* 50 */ "ISNULL", - /* 51 */ "NOTNULL", - /* 52 */ "NE", - /* 53 */ "EQ", - /* 54 */ "GT", - /* 55 */ "LE", - /* 56 */ "LT", - /* 57 */ "GE", - /* 58 */ "ESCAPE", - /* 59 */ "ID", - /* 60 */ "COLUMNKW", - /* 61 */ "DO", - /* 62 */ "FOR", - /* 63 */ "IGNORE", - /* 64 */ "INITIALLY", - /* 65 */ "INSTEAD", - /* 66 */ "NO", - /* 67 */ "KEY", - /* 68 */ "OF", - /* 69 */ "OFFSET", - /* 70 */ "PRAGMA", - /* 71 */ "RAISE", - /* 72 */ "RECURSIVE", - /* 73 */ "REPLACE", - /* 74 */ "RESTRICT", - /* 75 */ "ROW", - /* 76 */ "ROWS", - /* 77 */ "TRIGGER", - /* 78 */ "VACUUM", - /* 79 */ "VIEW", - /* 80 */ "VIRTUAL", - /* 81 */ "WITH", - /* 82 */ "NULLS", - /* 83 */ "FIRST", - /* 84 */ "LAST", - /* 85 */ "CURRENT", - /* 86 */ "FOLLOWING", - /* 87 */ "PARTITION", - /* 88 */ "PRECEDING", - /* 89 */ "RANGE", - /* 90 */ "UNBOUNDED", - /* 91 */ "EXCLUDE", - /* 92 */ "GROUPS", - /* 93 */ "OTHERS", - /* 94 */ "TIES", - /* 95 */ "GENERATED", - /* 96 */ "ALWAYS", - /* 97 */ "MATERIALIZED", - /* 98 */ "REINDEX", - /* 99 */ "RENAME", - /* 100 */ "CTIME_KW", - /* 101 */ "ANY", - /* 102 */ "BITAND", - /* 103 */ "BITOR", - /* 104 */ "LSHIFT", - /* 105 */ "RSHIFT", - /* 106 */ "PLUS", - /* 107 */ "MINUS", - /* 108 */ "STAR", - /* 109 */ "SLASH", - /* 110 */ "REM", - /* 111 */ "CONCAT", - /* 112 */ "PTR", - /* 113 */ "COLLATE", - /* 114 */ "BITNOT", - /* 115 */ "ON", - /* 116 */ "INDEXED", - /* 117 */ "STRING", - /* 118 */ "JOIN_KW", - /* 119 */ "CONSTRAINT", - /* 120 */ "DEFAULT", - /* 121 */ "NULL", - /* 122 */ "PRIMARY", - /* 123 */ "UNIQUE", - /* 124 */ "CHECK", - /* 125 */ "REFERENCES", - /* 126 */ "AUTOINCR", - /* 127 */ "INSERT", - /* 128 */ "DELETE", - /* 129 */ "UPDATE", - /* 130 */ "SET", - /* 131 */ "DEFERRABLE", - /* 132 */ "FOREIGN", - /* 133 */ "DROP", - /* 134 */ "UNION", - /* 135 */ "ALL", - /* 136 */ "EXCEPT", - /* 137 */ "INTERSECT", - /* 138 */ "SELECT", - /* 139 */ "VALUES", - /* 140 */ "DISTINCT", - /* 141 */ "DOT", - /* 142 */ "FROM", - /* 143 */ "JOIN", - /* 144 */ "USING", - /* 145 */ "ORDER", - /* 146 */ "GROUP", - /* 147 */ "HAVING", - /* 148 */ "LIMIT", - /* 149 */ "WHERE", - /* 150 */ "RETURNING", - /* 151 */ "INTO", - /* 152 */ "NOTHING", - /* 153 */ "FLOAT", - /* 154 */ "BLOB", - /* 155 */ "INTEGER", - /* 156 */ "VARIABLE", - /* 157 */ "CASE", - /* 158 */ "WHEN", - /* 159 */ "THEN", - /* 160 */ "ELSE", - /* 161 */ "INDEX", - /* 162 */ "ALTER", - /* 163 */ "ADD", - /* 164 */ "WINDOW", - /* 165 */ "OVER", - /* 166 */ "FILTER", - /* 167 */ "COLUMN", - /* 168 */ "AGG_FUNCTION", - /* 169 */ "AGG_COLUMN", - /* 170 */ "TRUEFALSE", - /* 171 */ "ISNOT", + /* 46 */ "ISNOT", + /* 47 */ "MATCH", + /* 48 */ "LIKE_KW", + /* 49 */ "BETWEEN", + /* 50 */ "IN", + /* 51 */ "ISNULL", + /* 52 */ "NOTNULL", + /* 53 */ "NE", + /* 54 */ "EQ", + /* 55 */ "GT", + /* 56 */ "LE", + /* 57 */ "LT", + /* 58 */ "GE", + /* 59 */ "ESCAPE", + /* 60 */ "ID", + /* 61 */ "COLUMNKW", + /* 62 */ "DO", + /* 63 */ "FOR", + /* 64 */ "IGNORE", + /* 65 */ "INITIALLY", + /* 66 */ "INSTEAD", + /* 67 */ "NO", + /* 68 */ "KEY", + /* 69 */ "OF", + /* 70 */ "OFFSET", + /* 71 */ "PRAGMA", + /* 72 */ "RAISE", + /* 73 */ "RECURSIVE", + /* 74 */ "REPLACE", + /* 75 */ "RESTRICT", + /* 76 */ "ROW", + /* 77 */ "ROWS", + /* 78 */ "TRIGGER", + /* 79 */ "VACUUM", + /* 80 */ "VIEW", + /* 81 */ "VIRTUAL", + /* 82 */ "WITH", + /* 83 */ "NULLS", + /* 84 */ "FIRST", + /* 85 */ "LAST", + /* 86 */ "CURRENT", + /* 87 */ "FOLLOWING", + /* 88 */ "PARTITION", + /* 89 */ "PRECEDING", + /* 90 */ "RANGE", + /* 91 */ "UNBOUNDED", + /* 92 */ "EXCLUDE", + /* 93 */ "GROUPS", + /* 94 */ "OTHERS", + /* 95 */ "TIES", + /* 96 */ "GENERATED", + /* 97 */ "ALWAYS", + /* 98 */ "MATERIALIZED", + /* 99 */ "REINDEX", + /* 100 */ "RENAME", + /* 101 */ "CTIME_KW", + /* 102 */ "ANY", + /* 103 */ "BITAND", + /* 104 */ "BITOR", + /* 105 */ "LSHIFT", + /* 106 */ "RSHIFT", + /* 107 */ "PLUS", + /* 108 */ "MINUS", + /* 109 */ "STAR", + /* 110 */ "SLASH", + /* 111 */ "REM", + /* 112 */ "CONCAT", + /* 113 */ "PTR", + /* 114 */ "COLLATE", + /* 115 */ "BITNOT", + /* 116 */ "ON", + /* 117 */ "INDEXED", + /* 118 */ "STRING", + /* 119 */ "JOIN_KW", + /* 120 */ "CONSTRAINT", + /* 121 */ "DEFAULT", + /* 122 */ "NULL", + /* 123 */ "PRIMARY", + /* 124 */ "UNIQUE", + /* 125 */ "CHECK", + /* 126 */ "REFERENCES", + /* 127 */ "AUTOINCR", + /* 128 */ "INSERT", + /* 129 */ "DELETE", + /* 130 */ "UPDATE", + /* 131 */ "SET", + /* 132 */ "DEFERRABLE", + /* 133 */ "FOREIGN", + /* 134 */ "DROP", + /* 135 */ "UNION", + /* 136 */ "ALL", + /* 137 */ "EXCEPT", + /* 138 */ "INTERSECT", + /* 139 */ "SELECT", + /* 140 */ "VALUES", + /* 141 */ "DISTINCT", + /* 142 */ "DOT", + /* 143 */ "FROM", + /* 144 */ "JOIN", + /* 145 */ "USING", + /* 146 */ "ORDER", + /* 147 */ "GROUP", + /* 148 */ "HAVING", + /* 149 */ "LIMIT", + /* 150 */ "WHERE", + /* 151 */ "RETURNING", + /* 152 */ "INTO", + /* 153 */ "NOTHING", + /* 154 */ "FLOAT", + /* 155 */ "BLOB", + /* 156 */ "INTEGER", + /* 157 */ "VARIABLE", + /* 158 */ "CASE", + /* 159 */ "WHEN", + /* 160 */ "THEN", + /* 161 */ "ELSE", + /* 162 */ "INDEX", + /* 163 */ "ALTER", + /* 164 */ "ADD", + /* 165 */ "WINDOW", + /* 166 */ "OVER", + /* 167 */ "FILTER", + /* 168 */ "COLUMN", + /* 169 */ "AGG_FUNCTION", + /* 170 */ "AGG_COLUMN", + /* 171 */ "TRUEFALSE", /* 172 */ "FUNCTION", /* 173 */ "UPLUS", /* 174 */ "UMINUS", @@ -174174,143 +177018,144 @@ static const char *const yyTokenName[] = { /* 182 */ "ERROR", /* 183 */ "QNUMBER", /* 184 */ "SPACE", - /* 185 */ "ILLEGAL", - /* 186 */ "input", - /* 187 */ "cmdlist", - /* 188 */ "ecmd", - /* 189 */ "cmdx", - /* 190 */ "explain", - /* 191 */ "cmd", - /* 192 */ "transtype", - /* 193 */ "trans_opt", - /* 194 */ "nm", - /* 195 */ "savepoint_opt", - /* 196 */ "create_table", - /* 197 */ "create_table_args", - /* 198 */ "createkw", - /* 199 */ "temp", - /* 200 */ "ifnotexists", - /* 201 */ "dbnm", - /* 202 */ "columnlist", - /* 203 */ "conslist_opt", - /* 204 */ "table_option_set", - /* 205 */ "select", - /* 206 */ "table_option", - /* 207 */ "columnname", - /* 208 */ "carglist", - /* 209 */ "typetoken", - /* 210 */ "typename", - /* 211 */ "signed", - /* 212 */ "plus_num", - /* 213 */ "minus_num", - /* 214 */ "scanpt", - /* 215 */ "scantok", - /* 216 */ "ccons", - /* 217 */ "term", - /* 218 */ "expr", - /* 219 */ "onconf", - /* 220 */ "sortorder", - /* 221 */ "autoinc", - /* 222 */ "eidlist_opt", - /* 223 */ "refargs", - /* 224 */ "defer_subclause", - /* 225 */ "generated", - /* 226 */ "refarg", - /* 227 */ "refact", - /* 228 */ "init_deferred_pred_opt", - /* 229 */ "conslist", - /* 230 */ "tconscomma", - /* 231 */ "tcons", - /* 232 */ "sortlist", - /* 233 */ "eidlist", - /* 234 */ "defer_subclause_opt", - /* 235 */ "orconf", - /* 236 */ "resolvetype", - /* 237 */ "raisetype", - /* 238 */ "ifexists", - /* 239 */ "fullname", - /* 240 */ "selectnowith", - /* 241 */ "oneselect", - /* 242 */ "wqlist", - /* 243 */ "multiselect_op", - /* 244 */ "distinct", - /* 245 */ "selcollist", - /* 246 */ "from", - /* 247 */ "where_opt", - /* 248 */ "groupby_opt", - /* 249 */ "having_opt", - /* 250 */ "orderby_opt", - /* 251 */ "limit_opt", - /* 252 */ "window_clause", - /* 253 */ "values", - /* 254 */ "nexprlist", - /* 255 */ "mvalues", - /* 256 */ "sclp", - /* 257 */ "as", - /* 258 */ "seltablist", - /* 259 */ "stl_prefix", - /* 260 */ "joinop", - /* 261 */ "on_using", - /* 262 */ "indexed_by", - /* 263 */ "exprlist", - /* 264 */ "xfullname", - /* 265 */ "idlist", - /* 266 */ "indexed_opt", - /* 267 */ "nulls", - /* 268 */ "with", - /* 269 */ "where_opt_ret", - /* 270 */ "setlist", - /* 271 */ "insert_cmd", - /* 272 */ "idlist_opt", - /* 273 */ "upsert", - /* 274 */ "returning", - /* 275 */ "filter_over", - /* 276 */ "likeop", - /* 277 */ "between_op", - /* 278 */ "in_op", - /* 279 */ "paren_exprlist", - /* 280 */ "case_operand", - /* 281 */ "case_exprlist", - /* 282 */ "case_else", - /* 283 */ "uniqueflag", - /* 284 */ "collate", - /* 285 */ "vinto", - /* 286 */ "nmnum", - /* 287 */ "trigger_decl", - /* 288 */ "trigger_cmd_list", - /* 289 */ "trigger_time", - /* 290 */ "trigger_event", - /* 291 */ "foreach_clause", - /* 292 */ "when_clause", - /* 293 */ "trigger_cmd", - /* 294 */ "trnm", - /* 295 */ "tridxby", - /* 296 */ "database_kw_opt", - /* 297 */ "key_opt", - /* 298 */ "add_column_fullname", - /* 299 */ "kwcolumn_opt", - /* 300 */ "create_vtab", - /* 301 */ "vtabarglist", - /* 302 */ "vtabarg", - /* 303 */ "vtabargtoken", - /* 304 */ "lp", - /* 305 */ "anylist", - /* 306 */ "wqitem", - /* 307 */ "wqas", - /* 308 */ "withnm", - /* 309 */ "windowdefn_list", - /* 310 */ "windowdefn", - /* 311 */ "window", - /* 312 */ "frame_opt", - /* 313 */ "part_opt", - /* 314 */ "filter_clause", - /* 315 */ "over_clause", - /* 316 */ "range_or_rows", - /* 317 */ "frame_bound", - /* 318 */ "frame_bound_s", - /* 319 */ "frame_bound_e", - /* 320 */ "frame_exclude_opt", - /* 321 */ "frame_exclude", + /* 185 */ "COMMENT", + /* 186 */ "ILLEGAL", + /* 187 */ "input", + /* 188 */ "cmdlist", + /* 189 */ "ecmd", + /* 190 */ "cmdx", + /* 191 */ "explain", + /* 192 */ "cmd", + /* 193 */ "transtype", + /* 194 */ "trans_opt", + /* 195 */ "nm", + /* 196 */ "savepoint_opt", + /* 197 */ "create_table", + /* 198 */ "create_table_args", + /* 199 */ "createkw", + /* 200 */ "temp", + /* 201 */ "ifnotexists", + /* 202 */ "dbnm", + /* 203 */ "columnlist", + /* 204 */ "conslist_opt", + /* 205 */ "table_option_set", + /* 206 */ "select", + /* 207 */ "table_option", + /* 208 */ "columnname", + /* 209 */ "carglist", + /* 210 */ "typetoken", + /* 211 */ "typename", + /* 212 */ "signed", + /* 213 */ "plus_num", + /* 214 */ "minus_num", + /* 215 */ "scanpt", + /* 216 */ "scantok", + /* 217 */ "ccons", + /* 218 */ "term", + /* 219 */ "expr", + /* 220 */ "onconf", + /* 221 */ "sortorder", + /* 222 */ "autoinc", + /* 223 */ "eidlist_opt", + /* 224 */ "refargs", + /* 225 */ "defer_subclause", + /* 226 */ "generated", + /* 227 */ "refarg", + /* 228 */ "refact", + /* 229 */ "init_deferred_pred_opt", + /* 230 */ "conslist", + /* 231 */ "tconscomma", + /* 232 */ "tcons", + /* 233 */ "sortlist", + /* 234 */ "eidlist", + /* 235 */ "defer_subclause_opt", + /* 236 */ "orconf", + /* 237 */ "resolvetype", + /* 238 */ "raisetype", + /* 239 */ "ifexists", + /* 240 */ "fullname", + /* 241 */ "selectnowith", + /* 242 */ "oneselect", + /* 243 */ "wqlist", + /* 244 */ "multiselect_op", + /* 245 */ "distinct", + /* 246 */ "selcollist", + /* 247 */ "from", + /* 248 */ "where_opt", + /* 249 */ "groupby_opt", + /* 250 */ "having_opt", + /* 251 */ "orderby_opt", + /* 252 */ "limit_opt", + /* 253 */ "window_clause", + /* 254 */ "values", + /* 255 */ "nexprlist", + /* 256 */ "mvalues", + /* 257 */ "sclp", + /* 258 */ "as", + /* 259 */ "seltablist", + /* 260 */ "stl_prefix", + /* 261 */ "joinop", + /* 262 */ "on_using", + /* 263 */ "indexed_by", + /* 264 */ "exprlist", + /* 265 */ "xfullname", + /* 266 */ "idlist", + /* 267 */ "indexed_opt", + /* 268 */ "nulls", + /* 269 */ "with", + /* 270 */ "where_opt_ret", + /* 271 */ "setlist", + /* 272 */ "insert_cmd", + /* 273 */ "idlist_opt", + /* 274 */ "upsert", + /* 275 */ "returning", + /* 276 */ "filter_over", + /* 277 */ "likeop", + /* 278 */ "between_op", + /* 279 */ "in_op", + /* 280 */ "paren_exprlist", + /* 281 */ "case_operand", + /* 282 */ "case_exprlist", + /* 283 */ "case_else", + /* 284 */ "uniqueflag", + /* 285 */ "collate", + /* 286 */ "vinto", + /* 287 */ "nmnum", + /* 288 */ "trigger_decl", + /* 289 */ "trigger_cmd_list", + /* 290 */ "trigger_time", + /* 291 */ "trigger_event", + /* 292 */ "foreach_clause", + /* 293 */ "when_clause", + /* 294 */ "trigger_cmd", + /* 295 */ "trnm", + /* 296 */ "tridxby", + /* 297 */ "database_kw_opt", + /* 298 */ "key_opt", + /* 299 */ "add_column_fullname", + /* 300 */ "kwcolumn_opt", + /* 301 */ "create_vtab", + /* 302 */ "vtabarglist", + /* 303 */ "vtabarg", + /* 304 */ "vtabargtoken", + /* 305 */ "lp", + /* 306 */ "anylist", + /* 307 */ "wqitem", + /* 308 */ "wqas", + /* 309 */ "withnm", + /* 310 */ "windowdefn_list", + /* 311 */ "windowdefn", + /* 312 */ "window", + /* 313 */ "frame_opt", + /* 314 */ "part_opt", + /* 315 */ "filter_clause", + /* 316 */ "over_clause", + /* 317 */ "range_or_rows", + /* 318 */ "frame_bound", + /* 319 */ "frame_bound_s", + /* 320 */ "frame_bound_e", + /* 321 */ "frame_exclude_opt", + /* 322 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -174598,7 +177443,7 @@ static const char *const yyRuleName[] = { /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", /* 278 */ "trigger_cmd ::= scanpt select scanpt", /* 279 */ "expr ::= RAISE LP IGNORE RP", - /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA expr RP", /* 281 */ "raisetype ::= ROLLBACK", /* 282 */ "raisetype ::= ABORT", /* 283 */ "raisetype ::= FAIL", @@ -174850,98 +177695,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 205: /* select */ - case 240: /* selectnowith */ - case 241: /* oneselect */ - case 253: /* values */ - case 255: /* mvalues */ + case 206: /* select */ + case 241: /* selectnowith */ + case 242: /* oneselect */ + case 254: /* values */ + case 256: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy555)); -} - break; - case 217: /* term */ - case 218: /* expr */ - case 247: /* where_opt */ - case 249: /* having_opt */ - case 269: /* where_opt_ret */ - case 280: /* case_operand */ - case 282: /* case_else */ - case 285: /* vinto */ - case 292: /* when_clause */ - case 297: /* key_opt */ - case 314: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy637)); +} + break; + case 218: /* term */ + case 219: /* expr */ + case 248: /* where_opt */ + case 250: /* having_opt */ + case 270: /* where_opt_ret */ + case 281: /* case_operand */ + case 283: /* case_else */ + case 286: /* vinto */ + case 293: /* when_clause */ + case 298: /* key_opt */ + case 315: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy454)); -} - break; - case 222: /* eidlist_opt */ - case 232: /* sortlist */ - case 233: /* eidlist */ - case 245: /* selcollist */ - case 248: /* groupby_opt */ - case 250: /* orderby_opt */ - case 254: /* nexprlist */ - case 256: /* sclp */ - case 263: /* exprlist */ - case 270: /* setlist */ - case 279: /* paren_exprlist */ - case 281: /* case_exprlist */ - case 313: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy590)); +} + break; + case 223: /* eidlist_opt */ + case 233: /* sortlist */ + case 234: /* eidlist */ + case 246: /* selcollist */ + case 249: /* groupby_opt */ + case 251: /* orderby_opt */ + case 255: /* nexprlist */ + case 257: /* sclp */ + case 264: /* exprlist */ + case 271: /* setlist */ + case 280: /* paren_exprlist */ + case 282: /* case_exprlist */ + case 314: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy402)); } break; - case 239: /* fullname */ - case 246: /* from */ - case 258: /* seltablist */ - case 259: /* stl_prefix */ - case 264: /* xfullname */ + case 240: /* fullname */ + case 247: /* from */ + case 259: /* seltablist */ + case 260: /* stl_prefix */ + case 265: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy563)); } break; - case 242: /* wqlist */ + case 243: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy59)); +sqlite3WithDelete(pParse->db, (yypminor->yy125)); } break; - case 252: /* window_clause */ - case 309: /* windowdefn_list */ + case 253: /* window_clause */ + case 310: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy483)); } break; - case 265: /* idlist */ - case 272: /* idlist_opt */ + case 266: /* idlist */ + case 273: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy132)); +sqlite3IdListDelete(pParse->db, (yypminor->yy204)); } break; - case 275: /* filter_over */ - case 310: /* windowdefn */ - case 311: /* window */ - case 312: /* frame_opt */ - case 315: /* over_clause */ + case 276: /* filter_over */ + case 311: /* windowdefn */ + case 312: /* window */ + case 313: /* frame_opt */ + case 316: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowDelete(pParse->db, (yypminor->yy483)); } break; - case 288: /* trigger_cmd_list */ - case 293: /* trigger_cmd */ + case 289: /* trigger_cmd_list */ + case 294: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy319)); } break; - case 290: /* trigger_event */ + case 291: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy28).b); } break; - case 317: /* frame_bound */ - case 318: /* frame_bound_s */ - case 319: /* frame_bound_e */ + case 318: /* frame_bound */ + case 319: /* frame_bound_s */ + case 320: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy205).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -175243,415 +178088,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 190, /* (0) explain ::= EXPLAIN */ - 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 189, /* (2) cmdx ::= cmd */ - 191, /* (3) cmd ::= BEGIN transtype trans_opt */ - 192, /* (4) transtype ::= */ - 192, /* (5) transtype ::= DEFERRED */ - 192, /* (6) transtype ::= IMMEDIATE */ - 192, /* (7) transtype ::= EXCLUSIVE */ - 191, /* (8) cmd ::= COMMIT|END trans_opt */ - 191, /* (9) cmd ::= ROLLBACK trans_opt */ - 191, /* (10) cmd ::= SAVEPOINT nm */ - 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 198, /* (14) createkw ::= CREATE */ - 200, /* (15) ifnotexists ::= */ - 200, /* (16) ifnotexists ::= IF NOT EXISTS */ - 199, /* (17) temp ::= TEMP */ - 199, /* (18) temp ::= */ - 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 197, /* (20) create_table_args ::= AS select */ - 204, /* (21) table_option_set ::= */ - 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 206, /* (23) table_option ::= WITHOUT nm */ - 206, /* (24) table_option ::= nm */ - 207, /* (25) columnname ::= nm typetoken */ - 209, /* (26) typetoken ::= */ - 209, /* (27) typetoken ::= typename LP signed RP */ - 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 210, /* (29) typename ::= typename ID|STRING */ - 214, /* (30) scanpt ::= */ - 215, /* (31) scantok ::= */ - 216, /* (32) ccons ::= CONSTRAINT nm */ - 216, /* (33) ccons ::= DEFAULT scantok term */ - 216, /* (34) ccons ::= DEFAULT LP expr RP */ - 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 216, /* (38) ccons ::= NOT NULL onconf */ - 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 216, /* (40) ccons ::= UNIQUE onconf */ - 216, /* (41) ccons ::= CHECK LP expr RP */ - 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 216, /* (43) ccons ::= defer_subclause */ - 216, /* (44) ccons ::= COLLATE ID|STRING */ - 225, /* (45) generated ::= LP expr RP */ - 225, /* (46) generated ::= LP expr RP ID */ - 221, /* (47) autoinc ::= */ - 221, /* (48) autoinc ::= AUTOINCR */ - 223, /* (49) refargs ::= */ - 223, /* (50) refargs ::= refargs refarg */ - 226, /* (51) refarg ::= MATCH nm */ - 226, /* (52) refarg ::= ON INSERT refact */ - 226, /* (53) refarg ::= ON DELETE refact */ - 226, /* (54) refarg ::= ON UPDATE refact */ - 227, /* (55) refact ::= SET NULL */ - 227, /* (56) refact ::= SET DEFAULT */ - 227, /* (57) refact ::= CASCADE */ - 227, /* (58) refact ::= RESTRICT */ - 227, /* (59) refact ::= NO ACTION */ - 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 228, /* (62) init_deferred_pred_opt ::= */ - 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 203, /* (65) conslist_opt ::= */ - 230, /* (66) tconscomma ::= COMMA */ - 231, /* (67) tcons ::= CONSTRAINT nm */ - 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 231, /* (70) tcons ::= CHECK LP expr RP onconf */ - 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 234, /* (72) defer_subclause_opt ::= */ - 219, /* (73) onconf ::= */ - 219, /* (74) onconf ::= ON CONFLICT resolvetype */ - 235, /* (75) orconf ::= */ - 235, /* (76) orconf ::= OR resolvetype */ - 236, /* (77) resolvetype ::= IGNORE */ - 236, /* (78) resolvetype ::= REPLACE */ - 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 238, /* (80) ifexists ::= IF EXISTS */ - 238, /* (81) ifexists ::= */ - 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 191, /* (84) cmd ::= select */ - 205, /* (85) select ::= WITH wqlist selectnowith */ - 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 205, /* (87) select ::= selectnowith */ - 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 243, /* (89) multiselect_op ::= UNION */ - 243, /* (90) multiselect_op ::= UNION ALL */ - 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 253, /* (94) values ::= VALUES LP nexprlist RP */ - 241, /* (95) oneselect ::= mvalues */ - 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ - 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ - 244, /* (98) distinct ::= DISTINCT */ - 244, /* (99) distinct ::= ALL */ - 244, /* (100) distinct ::= */ - 256, /* (101) sclp ::= */ - 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ - 245, /* (103) selcollist ::= sclp scanpt STAR */ - 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ - 257, /* (105) as ::= AS nm */ - 257, /* (106) as ::= */ - 246, /* (107) from ::= */ - 246, /* (108) from ::= FROM seltablist */ - 259, /* (109) stl_prefix ::= seltablist joinop */ - 259, /* (110) stl_prefix ::= */ - 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ - 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ - 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 201, /* (116) dbnm ::= */ - 201, /* (117) dbnm ::= DOT nm */ - 239, /* (118) fullname ::= nm */ - 239, /* (119) fullname ::= nm DOT nm */ - 264, /* (120) xfullname ::= nm */ - 264, /* (121) xfullname ::= nm DOT nm */ - 264, /* (122) xfullname ::= nm DOT nm AS nm */ - 264, /* (123) xfullname ::= nm AS nm */ - 260, /* (124) joinop ::= COMMA|JOIN */ - 260, /* (125) joinop ::= JOIN_KW JOIN */ - 260, /* (126) joinop ::= JOIN_KW nm JOIN */ - 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ - 261, /* (128) on_using ::= ON expr */ - 261, /* (129) on_using ::= USING LP idlist RP */ - 261, /* (130) on_using ::= */ - 266, /* (131) indexed_opt ::= */ - 262, /* (132) indexed_by ::= INDEXED BY nm */ - 262, /* (133) indexed_by ::= NOT INDEXED */ - 250, /* (134) orderby_opt ::= */ - 250, /* (135) orderby_opt ::= ORDER BY sortlist */ - 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ - 232, /* (137) sortlist ::= expr sortorder nulls */ - 220, /* (138) sortorder ::= ASC */ - 220, /* (139) sortorder ::= DESC */ - 220, /* (140) sortorder ::= */ - 267, /* (141) nulls ::= NULLS FIRST */ - 267, /* (142) nulls ::= NULLS LAST */ - 267, /* (143) nulls ::= */ - 248, /* (144) groupby_opt ::= */ - 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ - 249, /* (146) having_opt ::= */ - 249, /* (147) having_opt ::= HAVING expr */ - 251, /* (148) limit_opt ::= */ - 251, /* (149) limit_opt ::= LIMIT expr */ - 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ - 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ - 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 247, /* (153) where_opt ::= */ - 247, /* (154) where_opt ::= WHERE expr */ - 269, /* (155) where_opt_ret ::= */ - 269, /* (156) where_opt_ret ::= WHERE expr */ - 269, /* (157) where_opt_ret ::= RETURNING selcollist */ - 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ - 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 270, /* (162) setlist ::= nm EQ expr */ - 270, /* (163) setlist ::= LP idlist RP EQ expr */ - 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 273, /* (166) upsert ::= */ - 273, /* (167) upsert ::= RETURNING selcollist */ - 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ - 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 274, /* (172) returning ::= RETURNING selcollist */ - 271, /* (173) insert_cmd ::= INSERT orconf */ - 271, /* (174) insert_cmd ::= REPLACE */ - 272, /* (175) idlist_opt ::= */ - 272, /* (176) idlist_opt ::= LP idlist RP */ - 265, /* (177) idlist ::= idlist COMMA nm */ - 265, /* (178) idlist ::= nm */ - 218, /* (179) expr ::= LP expr RP */ - 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ - 218, /* (181) expr ::= nm DOT nm */ - 218, /* (182) expr ::= nm DOT nm DOT nm */ - 217, /* (183) term ::= NULL|FLOAT|BLOB */ - 217, /* (184) term ::= STRING */ - 217, /* (185) term ::= INTEGER */ - 218, /* (186) expr ::= VARIABLE */ - 218, /* (187) expr ::= expr COLLATE ID|STRING */ - 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ - 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 217, /* (195) term ::= CTIME_KW */ - 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ - 218, /* (197) expr ::= expr AND expr */ - 218, /* (198) expr ::= expr OR expr */ - 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ - 218, /* (200) expr ::= expr EQ|NE expr */ - 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 218, /* (202) expr ::= expr PLUS|MINUS expr */ - 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ - 218, /* (204) expr ::= expr CONCAT expr */ - 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ - 218, /* (206) expr ::= expr likeop expr */ - 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ - 218, /* (208) expr ::= expr ISNULL|NOTNULL */ - 218, /* (209) expr ::= expr NOT NULL */ - 218, /* (210) expr ::= expr IS expr */ - 218, /* (211) expr ::= expr IS NOT expr */ - 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ - 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ - 218, /* (214) expr ::= NOT expr */ - 218, /* (215) expr ::= BITNOT expr */ - 218, /* (216) expr ::= PLUS|MINUS expr */ - 218, /* (217) expr ::= expr PTR expr */ - 277, /* (218) between_op ::= BETWEEN */ - 277, /* (219) between_op ::= NOT BETWEEN */ - 218, /* (220) expr ::= expr between_op expr AND expr */ - 278, /* (221) in_op ::= IN */ - 278, /* (222) in_op ::= NOT IN */ - 218, /* (223) expr ::= expr in_op LP exprlist RP */ - 218, /* (224) expr ::= LP select RP */ - 218, /* (225) expr ::= expr in_op LP select RP */ - 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ - 218, /* (227) expr ::= EXISTS LP select RP */ - 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ - 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ - 282, /* (231) case_else ::= ELSE expr */ - 282, /* (232) case_else ::= */ - 280, /* (233) case_operand ::= */ - 263, /* (234) exprlist ::= */ - 254, /* (235) nexprlist ::= nexprlist COMMA expr */ - 254, /* (236) nexprlist ::= expr */ - 279, /* (237) paren_exprlist ::= */ - 279, /* (238) paren_exprlist ::= LP exprlist RP */ - 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 283, /* (240) uniqueflag ::= UNIQUE */ - 283, /* (241) uniqueflag ::= */ - 222, /* (242) eidlist_opt ::= */ - 222, /* (243) eidlist_opt ::= LP eidlist RP */ - 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ - 233, /* (245) eidlist ::= nm collate sortorder */ - 284, /* (246) collate ::= */ - 284, /* (247) collate ::= COLLATE ID|STRING */ - 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ - 191, /* (249) cmd ::= VACUUM vinto */ - 191, /* (250) cmd ::= VACUUM nm vinto */ - 285, /* (251) vinto ::= INTO expr */ - 285, /* (252) vinto ::= */ - 191, /* (253) cmd ::= PRAGMA nm dbnm */ - 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ - 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ - 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 289, /* (262) trigger_time ::= BEFORE|AFTER */ - 289, /* (263) trigger_time ::= INSTEAD OF */ - 289, /* (264) trigger_time ::= */ - 290, /* (265) trigger_event ::= DELETE|INSERT */ - 290, /* (266) trigger_event ::= UPDATE */ - 290, /* (267) trigger_event ::= UPDATE OF idlist */ - 292, /* (268) when_clause ::= */ - 292, /* (269) when_clause ::= WHEN expr */ - 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ - 294, /* (272) trnm ::= nm DOT nm */ - 295, /* (273) tridxby ::= INDEXED BY nm */ - 295, /* (274) tridxby ::= NOT INDEXED */ - 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 293, /* (278) trigger_cmd ::= scanpt select scanpt */ - 218, /* (279) expr ::= RAISE LP IGNORE RP */ - 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ - 237, /* (281) raisetype ::= ROLLBACK */ - 237, /* (282) raisetype ::= ABORT */ - 237, /* (283) raisetype ::= FAIL */ - 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ - 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 191, /* (286) cmd ::= DETACH database_kw_opt expr */ - 297, /* (287) key_opt ::= */ - 297, /* (288) key_opt ::= KEY expr */ - 191, /* (289) cmd ::= REINDEX */ - 191, /* (290) cmd ::= REINDEX nm dbnm */ - 191, /* (291) cmd ::= ANALYZE */ - 191, /* (292) cmd ::= ANALYZE nm dbnm */ - 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 298, /* (296) add_column_fullname ::= fullname */ - 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 191, /* (298) cmd ::= create_vtab */ - 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ - 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 302, /* (301) vtabarg ::= */ - 303, /* (302) vtabargtoken ::= ANY */ - 303, /* (303) vtabargtoken ::= lp anylist RP */ - 304, /* (304) lp ::= LP */ - 268, /* (305) with ::= WITH wqlist */ - 268, /* (306) with ::= WITH RECURSIVE wqlist */ - 307, /* (307) wqas ::= AS */ - 307, /* (308) wqas ::= AS MATERIALIZED */ - 307, /* (309) wqas ::= AS NOT MATERIALIZED */ - 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ - 308, /* (311) withnm ::= nm */ - 242, /* (312) wqlist ::= wqitem */ - 242, /* (313) wqlist ::= wqlist COMMA wqitem */ - 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 310, /* (315) windowdefn ::= nm AS LP window RP */ - 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (318) window ::= ORDER BY sortlist frame_opt */ - 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ - 311, /* (320) window ::= nm frame_opt */ - 312, /* (321) frame_opt ::= */ - 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ - 318, /* (325) frame_bound_s ::= frame_bound */ - 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ - 319, /* (327) frame_bound_e ::= frame_bound */ - 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ - 317, /* (330) frame_bound ::= CURRENT ROW */ - 320, /* (331) frame_exclude_opt ::= */ - 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 321, /* (333) frame_exclude ::= NO OTHERS */ - 321, /* (334) frame_exclude ::= CURRENT ROW */ - 321, /* (335) frame_exclude ::= GROUP|TIES */ - 252, /* (336) window_clause ::= WINDOW windowdefn_list */ - 275, /* (337) filter_over ::= filter_clause over_clause */ - 275, /* (338) filter_over ::= over_clause */ - 275, /* (339) filter_over ::= filter_clause */ - 315, /* (340) over_clause ::= OVER LP window RP */ - 315, /* (341) over_clause ::= OVER nm */ - 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ - 217, /* (343) term ::= QNUMBER */ - 186, /* (344) input ::= cmdlist */ - 187, /* (345) cmdlist ::= cmdlist ecmd */ - 187, /* (346) cmdlist ::= ecmd */ - 188, /* (347) ecmd ::= SEMI */ - 188, /* (348) ecmd ::= cmdx SEMI */ - 188, /* (349) ecmd ::= explain cmdx SEMI */ - 193, /* (350) trans_opt ::= */ - 193, /* (351) trans_opt ::= TRANSACTION */ - 193, /* (352) trans_opt ::= TRANSACTION nm */ - 195, /* (353) savepoint_opt ::= SAVEPOINT */ - 195, /* (354) savepoint_opt ::= */ - 191, /* (355) cmd ::= create_table create_table_args */ - 204, /* (356) table_option_set ::= table_option */ - 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ - 202, /* (358) columnlist ::= columnname carglist */ - 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ - 194, /* (360) nm ::= STRING */ - 209, /* (361) typetoken ::= typename */ - 210, /* (362) typename ::= ID|STRING */ - 211, /* (363) signed ::= plus_num */ - 211, /* (364) signed ::= minus_num */ - 208, /* (365) carglist ::= carglist ccons */ - 208, /* (366) carglist ::= */ - 216, /* (367) ccons ::= NULL onconf */ - 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ - 216, /* (369) ccons ::= AS generated */ - 203, /* (370) conslist_opt ::= COMMA conslist */ - 229, /* (371) conslist ::= conslist tconscomma tcons */ - 229, /* (372) conslist ::= tcons */ - 230, /* (373) tconscomma ::= */ - 234, /* (374) defer_subclause_opt ::= defer_subclause */ - 236, /* (375) resolvetype ::= raisetype */ - 240, /* (376) selectnowith ::= oneselect */ - 241, /* (377) oneselect ::= values */ - 256, /* (378) sclp ::= selcollist COMMA */ - 257, /* (379) as ::= ID|STRING */ - 266, /* (380) indexed_opt ::= indexed_by */ - 274, /* (381) returning ::= */ - 218, /* (382) expr ::= term */ - 276, /* (383) likeop ::= LIKE_KW|MATCH */ - 280, /* (384) case_operand ::= expr */ - 263, /* (385) exprlist ::= nexprlist */ - 286, /* (386) nmnum ::= plus_num */ - 286, /* (387) nmnum ::= nm */ - 286, /* (388) nmnum ::= ON */ - 286, /* (389) nmnum ::= DELETE */ - 286, /* (390) nmnum ::= DEFAULT */ - 212, /* (391) plus_num ::= INTEGER|FLOAT */ - 291, /* (392) foreach_clause ::= */ - 291, /* (393) foreach_clause ::= FOR EACH ROW */ - 294, /* (394) trnm ::= nm */ - 295, /* (395) tridxby ::= */ - 296, /* (396) database_kw_opt ::= DATABASE */ - 296, /* (397) database_kw_opt ::= */ - 299, /* (398) kwcolumn_opt ::= */ - 299, /* (399) kwcolumn_opt ::= COLUMNKW */ - 301, /* (400) vtabarglist ::= vtabarg */ - 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ - 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ - 305, /* (403) anylist ::= */ - 305, /* (404) anylist ::= anylist LP anylist RP */ - 305, /* (405) anylist ::= anylist ANY */ - 268, /* (406) with ::= */ - 309, /* (407) windowdefn_list ::= windowdefn */ - 311, /* (408) window ::= frame_opt */ + 191, /* (0) explain ::= EXPLAIN */ + 191, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 190, /* (2) cmdx ::= cmd */ + 192, /* (3) cmd ::= BEGIN transtype trans_opt */ + 193, /* (4) transtype ::= */ + 193, /* (5) transtype ::= DEFERRED */ + 193, /* (6) transtype ::= IMMEDIATE */ + 193, /* (7) transtype ::= EXCLUSIVE */ + 192, /* (8) cmd ::= COMMIT|END trans_opt */ + 192, /* (9) cmd ::= ROLLBACK trans_opt */ + 192, /* (10) cmd ::= SAVEPOINT nm */ + 192, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 192, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 197, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 199, /* (14) createkw ::= CREATE */ + 201, /* (15) ifnotexists ::= */ + 201, /* (16) ifnotexists ::= IF NOT EXISTS */ + 200, /* (17) temp ::= TEMP */ + 200, /* (18) temp ::= */ + 198, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 198, /* (20) create_table_args ::= AS select */ + 205, /* (21) table_option_set ::= */ + 205, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 207, /* (23) table_option ::= WITHOUT nm */ + 207, /* (24) table_option ::= nm */ + 208, /* (25) columnname ::= nm typetoken */ + 210, /* (26) typetoken ::= */ + 210, /* (27) typetoken ::= typename LP signed RP */ + 210, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 211, /* (29) typename ::= typename ID|STRING */ + 215, /* (30) scanpt ::= */ + 216, /* (31) scantok ::= */ + 217, /* (32) ccons ::= CONSTRAINT nm */ + 217, /* (33) ccons ::= DEFAULT scantok term */ + 217, /* (34) ccons ::= DEFAULT LP expr RP */ + 217, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 217, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 217, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 217, /* (38) ccons ::= NOT NULL onconf */ + 217, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 217, /* (40) ccons ::= UNIQUE onconf */ + 217, /* (41) ccons ::= CHECK LP expr RP */ + 217, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 217, /* (43) ccons ::= defer_subclause */ + 217, /* (44) ccons ::= COLLATE ID|STRING */ + 226, /* (45) generated ::= LP expr RP */ + 226, /* (46) generated ::= LP expr RP ID */ + 222, /* (47) autoinc ::= */ + 222, /* (48) autoinc ::= AUTOINCR */ + 224, /* (49) refargs ::= */ + 224, /* (50) refargs ::= refargs refarg */ + 227, /* (51) refarg ::= MATCH nm */ + 227, /* (52) refarg ::= ON INSERT refact */ + 227, /* (53) refarg ::= ON DELETE refact */ + 227, /* (54) refarg ::= ON UPDATE refact */ + 228, /* (55) refact ::= SET NULL */ + 228, /* (56) refact ::= SET DEFAULT */ + 228, /* (57) refact ::= CASCADE */ + 228, /* (58) refact ::= RESTRICT */ + 228, /* (59) refact ::= NO ACTION */ + 225, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 225, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 229, /* (62) init_deferred_pred_opt ::= */ + 229, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 229, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 204, /* (65) conslist_opt ::= */ + 231, /* (66) tconscomma ::= COMMA */ + 232, /* (67) tcons ::= CONSTRAINT nm */ + 232, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 232, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 232, /* (70) tcons ::= CHECK LP expr RP onconf */ + 232, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 235, /* (72) defer_subclause_opt ::= */ + 220, /* (73) onconf ::= */ + 220, /* (74) onconf ::= ON CONFLICT resolvetype */ + 236, /* (75) orconf ::= */ + 236, /* (76) orconf ::= OR resolvetype */ + 237, /* (77) resolvetype ::= IGNORE */ + 237, /* (78) resolvetype ::= REPLACE */ + 192, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 239, /* (80) ifexists ::= IF EXISTS */ + 239, /* (81) ifexists ::= */ + 192, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 192, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 192, /* (84) cmd ::= select */ + 206, /* (85) select ::= WITH wqlist selectnowith */ + 206, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 206, /* (87) select ::= selectnowith */ + 241, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 244, /* (89) multiselect_op ::= UNION */ + 244, /* (90) multiselect_op ::= UNION ALL */ + 244, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 242, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 242, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 254, /* (94) values ::= VALUES LP nexprlist RP */ + 242, /* (95) oneselect ::= mvalues */ + 256, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 256, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 245, /* (98) distinct ::= DISTINCT */ + 245, /* (99) distinct ::= ALL */ + 245, /* (100) distinct ::= */ + 257, /* (101) sclp ::= */ + 246, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 246, /* (103) selcollist ::= sclp scanpt STAR */ + 246, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 258, /* (105) as ::= AS nm */ + 258, /* (106) as ::= */ + 247, /* (107) from ::= */ + 247, /* (108) from ::= FROM seltablist */ + 260, /* (109) stl_prefix ::= seltablist joinop */ + 260, /* (110) stl_prefix ::= */ + 259, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 259, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 259, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 259, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 259, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 202, /* (116) dbnm ::= */ + 202, /* (117) dbnm ::= DOT nm */ + 240, /* (118) fullname ::= nm */ + 240, /* (119) fullname ::= nm DOT nm */ + 265, /* (120) xfullname ::= nm */ + 265, /* (121) xfullname ::= nm DOT nm */ + 265, /* (122) xfullname ::= nm DOT nm AS nm */ + 265, /* (123) xfullname ::= nm AS nm */ + 261, /* (124) joinop ::= COMMA|JOIN */ + 261, /* (125) joinop ::= JOIN_KW JOIN */ + 261, /* (126) joinop ::= JOIN_KW nm JOIN */ + 261, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 262, /* (128) on_using ::= ON expr */ + 262, /* (129) on_using ::= USING LP idlist RP */ + 262, /* (130) on_using ::= */ + 267, /* (131) indexed_opt ::= */ + 263, /* (132) indexed_by ::= INDEXED BY nm */ + 263, /* (133) indexed_by ::= NOT INDEXED */ + 251, /* (134) orderby_opt ::= */ + 251, /* (135) orderby_opt ::= ORDER BY sortlist */ + 233, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 233, /* (137) sortlist ::= expr sortorder nulls */ + 221, /* (138) sortorder ::= ASC */ + 221, /* (139) sortorder ::= DESC */ + 221, /* (140) sortorder ::= */ + 268, /* (141) nulls ::= NULLS FIRST */ + 268, /* (142) nulls ::= NULLS LAST */ + 268, /* (143) nulls ::= */ + 249, /* (144) groupby_opt ::= */ + 249, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 250, /* (146) having_opt ::= */ + 250, /* (147) having_opt ::= HAVING expr */ + 252, /* (148) limit_opt ::= */ + 252, /* (149) limit_opt ::= LIMIT expr */ + 252, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 252, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 192, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 248, /* (153) where_opt ::= */ + 248, /* (154) where_opt ::= WHERE expr */ + 270, /* (155) where_opt_ret ::= */ + 270, /* (156) where_opt_ret ::= WHERE expr */ + 270, /* (157) where_opt_ret ::= RETURNING selcollist */ + 270, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 192, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 271, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 271, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 271, /* (162) setlist ::= nm EQ expr */ + 271, /* (163) setlist ::= LP idlist RP EQ expr */ + 192, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 192, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 274, /* (166) upsert ::= */ + 274, /* (167) upsert ::= RETURNING selcollist */ + 274, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 274, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 274, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 274, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 275, /* (172) returning ::= RETURNING selcollist */ + 272, /* (173) insert_cmd ::= INSERT orconf */ + 272, /* (174) insert_cmd ::= REPLACE */ + 273, /* (175) idlist_opt ::= */ + 273, /* (176) idlist_opt ::= LP idlist RP */ + 266, /* (177) idlist ::= idlist COMMA nm */ + 266, /* (178) idlist ::= nm */ + 219, /* (179) expr ::= LP expr RP */ + 219, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 219, /* (181) expr ::= nm DOT nm */ + 219, /* (182) expr ::= nm DOT nm DOT nm */ + 218, /* (183) term ::= NULL|FLOAT|BLOB */ + 218, /* (184) term ::= STRING */ + 218, /* (185) term ::= INTEGER */ + 219, /* (186) expr ::= VARIABLE */ + 219, /* (187) expr ::= expr COLLATE ID|STRING */ + 219, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 219, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 219, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 219, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 219, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 219, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 219, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 218, /* (195) term ::= CTIME_KW */ + 219, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 219, /* (197) expr ::= expr AND expr */ + 219, /* (198) expr ::= expr OR expr */ + 219, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 219, /* (200) expr ::= expr EQ|NE expr */ + 219, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 219, /* (202) expr ::= expr PLUS|MINUS expr */ + 219, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 219, /* (204) expr ::= expr CONCAT expr */ + 277, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 219, /* (206) expr ::= expr likeop expr */ + 219, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 219, /* (208) expr ::= expr ISNULL|NOTNULL */ + 219, /* (209) expr ::= expr NOT NULL */ + 219, /* (210) expr ::= expr IS expr */ + 219, /* (211) expr ::= expr IS NOT expr */ + 219, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 219, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 219, /* (214) expr ::= NOT expr */ + 219, /* (215) expr ::= BITNOT expr */ + 219, /* (216) expr ::= PLUS|MINUS expr */ + 219, /* (217) expr ::= expr PTR expr */ + 278, /* (218) between_op ::= BETWEEN */ + 278, /* (219) between_op ::= NOT BETWEEN */ + 219, /* (220) expr ::= expr between_op expr AND expr */ + 279, /* (221) in_op ::= IN */ + 279, /* (222) in_op ::= NOT IN */ + 219, /* (223) expr ::= expr in_op LP exprlist RP */ + 219, /* (224) expr ::= LP select RP */ + 219, /* (225) expr ::= expr in_op LP select RP */ + 219, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 219, /* (227) expr ::= EXISTS LP select RP */ + 219, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 282, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 282, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 283, /* (231) case_else ::= ELSE expr */ + 283, /* (232) case_else ::= */ + 281, /* (233) case_operand ::= */ + 264, /* (234) exprlist ::= */ + 255, /* (235) nexprlist ::= nexprlist COMMA expr */ + 255, /* (236) nexprlist ::= expr */ + 280, /* (237) paren_exprlist ::= */ + 280, /* (238) paren_exprlist ::= LP exprlist RP */ + 192, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 284, /* (240) uniqueflag ::= UNIQUE */ + 284, /* (241) uniqueflag ::= */ + 223, /* (242) eidlist_opt ::= */ + 223, /* (243) eidlist_opt ::= LP eidlist RP */ + 234, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 234, /* (245) eidlist ::= nm collate sortorder */ + 285, /* (246) collate ::= */ + 285, /* (247) collate ::= COLLATE ID|STRING */ + 192, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 192, /* (249) cmd ::= VACUUM vinto */ + 192, /* (250) cmd ::= VACUUM nm vinto */ + 286, /* (251) vinto ::= INTO expr */ + 286, /* (252) vinto ::= */ + 192, /* (253) cmd ::= PRAGMA nm dbnm */ + 192, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 192, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 192, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 192, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 213, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 214, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 192, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 288, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 290, /* (262) trigger_time ::= BEFORE|AFTER */ + 290, /* (263) trigger_time ::= INSTEAD OF */ + 290, /* (264) trigger_time ::= */ + 291, /* (265) trigger_event ::= DELETE|INSERT */ + 291, /* (266) trigger_event ::= UPDATE */ + 291, /* (267) trigger_event ::= UPDATE OF idlist */ + 293, /* (268) when_clause ::= */ + 293, /* (269) when_clause ::= WHEN expr */ + 289, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 289, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 295, /* (272) trnm ::= nm DOT nm */ + 296, /* (273) tridxby ::= INDEXED BY nm */ + 296, /* (274) tridxby ::= NOT INDEXED */ + 294, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 294, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 294, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 294, /* (278) trigger_cmd ::= scanpt select scanpt */ + 219, /* (279) expr ::= RAISE LP IGNORE RP */ + 219, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ + 238, /* (281) raisetype ::= ROLLBACK */ + 238, /* (282) raisetype ::= ABORT */ + 238, /* (283) raisetype ::= FAIL */ + 192, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 192, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 192, /* (286) cmd ::= DETACH database_kw_opt expr */ + 298, /* (287) key_opt ::= */ + 298, /* (288) key_opt ::= KEY expr */ + 192, /* (289) cmd ::= REINDEX */ + 192, /* (290) cmd ::= REINDEX nm dbnm */ + 192, /* (291) cmd ::= ANALYZE */ + 192, /* (292) cmd ::= ANALYZE nm dbnm */ + 192, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 192, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 192, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 299, /* (296) add_column_fullname ::= fullname */ + 192, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 192, /* (298) cmd ::= create_vtab */ + 192, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 301, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 303, /* (301) vtabarg ::= */ + 304, /* (302) vtabargtoken ::= ANY */ + 304, /* (303) vtabargtoken ::= lp anylist RP */ + 305, /* (304) lp ::= LP */ + 269, /* (305) with ::= WITH wqlist */ + 269, /* (306) with ::= WITH RECURSIVE wqlist */ + 308, /* (307) wqas ::= AS */ + 308, /* (308) wqas ::= AS MATERIALIZED */ + 308, /* (309) wqas ::= AS NOT MATERIALIZED */ + 307, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 309, /* (311) withnm ::= nm */ + 243, /* (312) wqlist ::= wqitem */ + 243, /* (313) wqlist ::= wqlist COMMA wqitem */ + 310, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 311, /* (315) windowdefn ::= nm AS LP window RP */ + 312, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (318) window ::= ORDER BY sortlist frame_opt */ + 312, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 312, /* (320) window ::= nm frame_opt */ + 313, /* (321) frame_opt ::= */ + 313, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 313, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 317, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 319, /* (325) frame_bound_s ::= frame_bound */ + 319, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 320, /* (327) frame_bound_e ::= frame_bound */ + 320, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 318, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 318, /* (330) frame_bound ::= CURRENT ROW */ + 321, /* (331) frame_exclude_opt ::= */ + 321, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 322, /* (333) frame_exclude ::= NO OTHERS */ + 322, /* (334) frame_exclude ::= CURRENT ROW */ + 322, /* (335) frame_exclude ::= GROUP|TIES */ + 253, /* (336) window_clause ::= WINDOW windowdefn_list */ + 276, /* (337) filter_over ::= filter_clause over_clause */ + 276, /* (338) filter_over ::= over_clause */ + 276, /* (339) filter_over ::= filter_clause */ + 316, /* (340) over_clause ::= OVER LP window RP */ + 316, /* (341) over_clause ::= OVER nm */ + 315, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 218, /* (343) term ::= QNUMBER */ + 187, /* (344) input ::= cmdlist */ + 188, /* (345) cmdlist ::= cmdlist ecmd */ + 188, /* (346) cmdlist ::= ecmd */ + 189, /* (347) ecmd ::= SEMI */ + 189, /* (348) ecmd ::= cmdx SEMI */ + 189, /* (349) ecmd ::= explain cmdx SEMI */ + 194, /* (350) trans_opt ::= */ + 194, /* (351) trans_opt ::= TRANSACTION */ + 194, /* (352) trans_opt ::= TRANSACTION nm */ + 196, /* (353) savepoint_opt ::= SAVEPOINT */ + 196, /* (354) savepoint_opt ::= */ + 192, /* (355) cmd ::= create_table create_table_args */ + 205, /* (356) table_option_set ::= table_option */ + 203, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 203, /* (358) columnlist ::= columnname carglist */ + 195, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 195, /* (360) nm ::= STRING */ + 210, /* (361) typetoken ::= typename */ + 211, /* (362) typename ::= ID|STRING */ + 212, /* (363) signed ::= plus_num */ + 212, /* (364) signed ::= minus_num */ + 209, /* (365) carglist ::= carglist ccons */ + 209, /* (366) carglist ::= */ + 217, /* (367) ccons ::= NULL onconf */ + 217, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 217, /* (369) ccons ::= AS generated */ + 204, /* (370) conslist_opt ::= COMMA conslist */ + 230, /* (371) conslist ::= conslist tconscomma tcons */ + 230, /* (372) conslist ::= tcons */ + 231, /* (373) tconscomma ::= */ + 235, /* (374) defer_subclause_opt ::= defer_subclause */ + 237, /* (375) resolvetype ::= raisetype */ + 241, /* (376) selectnowith ::= oneselect */ + 242, /* (377) oneselect ::= values */ + 257, /* (378) sclp ::= selcollist COMMA */ + 258, /* (379) as ::= ID|STRING */ + 267, /* (380) indexed_opt ::= indexed_by */ + 275, /* (381) returning ::= */ + 219, /* (382) expr ::= term */ + 277, /* (383) likeop ::= LIKE_KW|MATCH */ + 281, /* (384) case_operand ::= expr */ + 264, /* (385) exprlist ::= nexprlist */ + 287, /* (386) nmnum ::= plus_num */ + 287, /* (387) nmnum ::= nm */ + 287, /* (388) nmnum ::= ON */ + 287, /* (389) nmnum ::= DELETE */ + 287, /* (390) nmnum ::= DEFAULT */ + 213, /* (391) plus_num ::= INTEGER|FLOAT */ + 292, /* (392) foreach_clause ::= */ + 292, /* (393) foreach_clause ::= FOR EACH ROW */ + 295, /* (394) trnm ::= nm */ + 296, /* (395) tridxby ::= */ + 297, /* (396) database_kw_opt ::= DATABASE */ + 297, /* (397) database_kw_opt ::= */ + 300, /* (398) kwcolumn_opt ::= */ + 300, /* (399) kwcolumn_opt ::= COLUMNKW */ + 302, /* (400) vtabarglist ::= vtabarg */ + 302, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 303, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 306, /* (403) anylist ::= */ + 306, /* (404) anylist ::= anylist LP anylist RP */ + 306, /* (405) anylist ::= anylist ANY */ + 269, /* (406) with ::= */ + 310, /* (407) windowdefn_list ::= windowdefn */ + 312, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -175937,7 +178782,7 @@ static const signed char yyRuleInfoNRhs[] = { -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -3, /* (278) trigger_cmd ::= scanpt select scanpt */ -4, /* (279) expr ::= RAISE LP IGNORE RP */ - -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ -1, /* (281) raisetype ::= ROLLBACK */ -1, /* (282) raisetype ::= ABORT */ -1, /* (283) raisetype ::= FAIL */ @@ -176117,16 +178962,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy502);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy144 = TK_DEFERRED;} +{yymsp[1].minor.yy502 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -176149,11 +178994,13 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy502,0,0,yymsp[-2].minor.yy502); } break; case 14: /* createkw ::= CREATE */ -{disableLookaside(pParse);} +{ + disableLookaside(pParse); +} break; case 15: /* ifnotexists ::= */ case 18: /* temp ::= */ yytestcase(yyruleno==18); @@ -176163,38 +179010,38 @@ static YYACTIONTYPE yy_reduce( case 81: /* ifexists ::= */ yytestcase(yyruleno==81); case 100: /* distinct ::= */ yytestcase(yyruleno==100); case 246: /* collate ::= */ yytestcase(yyruleno==246); -{yymsp[1].minor.yy144 = 0;} +{yymsp[1].minor.yy502 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy144 = 1;} +{yymsp[-2].minor.yy502 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy502 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy9,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy637); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy391 = 0;} +{yymsp[1].minor.yy9 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} - yymsp[-2].minor.yy391 = yylhsminor.yy391; +{yylhsminor.yy9 = yymsp[-2].minor.yy9|yymsp[0].minor.yy9;} + yymsp[-2].minor.yy9 = yylhsminor.yy9; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy9 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy391 = 0; + yymsp[-1].minor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -176202,13 +179049,13 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy391 = TF_Strict; + yylhsminor.yy9 = TF_Strict; }else{ - yylhsminor.yy391 = 0; + yylhsminor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy391 = yylhsminor.yy391; + yymsp[0].minor.yy9 = yylhsminor.yy9; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} @@ -176234,7 +179081,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy168 = yyLookaheadToken.z; + yymsp[1].minor.yy342 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -176245,20 +179092,20 @@ static YYACTIONTYPE yy_reduce( break; case 32: /* ccons ::= CONSTRAINT nm */ case 67: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==67); -{pParse->constraintName = yymsp[0].minor.yy0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy590, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -176273,151 +179120,155 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy502);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy502,yymsp[0].minor.yy502,yymsp[-2].minor.yy502);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy402,yymsp[0].minor.yy502);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy502);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy590,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy590,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy144 = 1;} +{yymsp[0].minor.yy502 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy502 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } +{ yymsp[-1].minor.yy502 = (yymsp[-1].minor.yy502 & ~yymsp[0].minor.yy481.mask) | yymsp[0].minor.yy481.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } +{ yymsp[-1].minor.yy481.value = 0; yymsp[-1].minor.yy481.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } +{ yymsp[-2].minor.yy481.value = 0; yymsp[-2].minor.yy481.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502; yymsp[-2].minor.yy481.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502<<8; yymsp[-2].minor.yy481.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy144 = 0;} +{yymsp[-2].minor.yy502 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); -{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-1].minor.yy502 = yymsp[0].minor.yy502;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); -{yymsp[-1].minor.yy144 = 1;} +{yymsp[-1].minor.yy502 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy144 = 0;} +{yymsp[-1].minor.yy502 = 0;} break; case 66: /* tconscomma ::= COMMA */ -{pParse->constraintName.n = 0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy402,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy402,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy590,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy402, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[-1].minor.yy502); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy502); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy144 = OE_Default;} +{yymsp[1].minor.yy502 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-2].minor.yy502 = yymsp[0].minor.yy502;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy144 = OE_Ignore;} +{yymsp[0].minor.yy502 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); -{yymsp[0].minor.yy144 = OE_Replace;} +{yymsp[0].minor.yy502 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 0, yymsp[-1].minor.yy502); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[0].minor.yy637, yymsp[-7].minor.yy502, yymsp[-5].minor.yy502); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 1, yymsp[-1].minor.yy502); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + if( (pParse->db->mDbFlags & DBFLAG_EncodingFixed)!=0 + || sqlite3ReadSchema(pParse)==SQLITE_OK + ){ + sqlite3Select(pParse, yymsp[0].minor.yy637, &dest); + } + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-2].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-3].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy555; + Select *p = yymsp[0].minor.yy637; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -176425,8 +179276,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy555; - Select *pLhs = yymsp[-2].minor.yy555; + Select *pRhs = yymsp[0].minor.yy637; + Select *pLhs = yymsp[-2].minor.yy637; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -176436,60 +179287,60 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy144; + pRhs->op = (u8)yymsp[-1].minor.yy502; pRhs->pPrior = pLhs; - if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; - pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; + if( ALWAYS(pLhs) ) pLhs->selFlags &= ~(u32)SF_MultiValue; + pRhs->selFlags &= ~(u32)SF_MultiValue; + if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy555 = pRhs; + yymsp[-2].minor.yy637 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy144 = TK_ALL;} +{yymsp[-1].minor.yy502 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); + yymsp[-8].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy402,yymsp[-5].minor.yy563,yymsp[-4].minor.yy590,yymsp[-3].minor.yy402,yymsp[-2].minor.yy590,yymsp[-1].minor.yy402,yymsp[-7].minor.yy502,yymsp[0].minor.yy590); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); - if( yymsp[-9].minor.yy555 ){ - yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; + yymsp[-9].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy402,yymsp[-6].minor.yy563,yymsp[-5].minor.yy590,yymsp[-4].minor.yy402,yymsp[-3].minor.yy590,yymsp[-1].minor.yy402,yymsp[-8].minor.yy502,yymsp[0].minor.yy590); + if( yymsp[-9].minor.yy637 ){ + yymsp[-9].minor.yy637->pWinDefn = yymsp[-2].minor.yy483; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy483); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy402,0,0,0,0,0,SF_Values,0); } break; case 95: /* oneselect ::= mvalues */ { - sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy637); } break; case 96: /* mvalues ::= values COMMA LP nexprlist RP */ case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); { - yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); + yymsp[-4].minor.yy637 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy637, yymsp[-1].minor.yy402); } break; case 98: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy144 = SF_Distinct;} +{yymsp[0].minor.yy502 = SF_Distinct;} break; case 99: /* distinct ::= ALL */ -{yymsp[0].minor.yy144 = SF_All;} +{yymsp[0].minor.yy502 = SF_All;} break; case 101: /* sclp ::= */ case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); @@ -176497,20 +179348,20 @@ static YYACTIONTYPE yy_reduce( case 234: /* exprlist ::= */ yytestcase(yyruleno==234); case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); -{yymsp[1].minor.yy14 = 0;} +{yymsp[1].minor.yy402 = 0;} break; case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy402,yymsp[-3].minor.yy342,yymsp[-1].minor.yy342); } break; case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy402, p); } break; case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ @@ -176520,7 +179371,7 @@ static YYACTIONTYPE yy_reduce( sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, pDot); } break; case 105: /* as ::= AS nm */ @@ -176531,55 +179382,65 @@ static YYACTIONTYPE yy_reduce( break; case 107: /* from ::= */ case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); -{yymsp[1].minor.yy203 = 0;} +{yymsp[1].minor.yy563 = 0;} break; case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); + yymsp[-1].minor.yy563 = yymsp[0].minor.yy563; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy563); } break; case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; + if( ALWAYS(yymsp[-1].minor.yy563 && yymsp[-1].minor.yy563->nSrc>0) ) yymsp[-1].minor.yy563->a[yymsp[-1].minor.yy563->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy502; } break; case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + yymsp[-4].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy563,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); } break; case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-1].minor.yy0); } break; case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); + yymsp[-7].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy563,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy563, yymsp[-3].minor.yy402); } break; case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy637,&yymsp[0].minor.yy421); } break; case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ - yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; - }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - if( yymsp[-5].minor.yy203 ){ - SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy203->a; + if( yymsp[-5].minor.yy563==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy421.pOn==0 && yymsp[0].minor.yy421.pUsing==0 ){ + yymsp[-5].minor.yy563 = yymsp[-3].minor.yy563; + }else if( ALWAYS(yymsp[-3].minor.yy563!=0) && yymsp[-3].minor.yy563->nSrc==1 ){ + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + if( yymsp[-5].minor.yy563 ){ + SrcItem *pNew = &yymsp[-5].minor.yy563->a[yymsp[-5].minor.yy563->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy563->a; + assert( pOld->fg.fixedSchema==0 ); pNew->zName = pOld->zName; - pNew->zDatabase = pOld->zDatabase; - pNew->pSelect = pOld->pSelect; - if( pNew->pSelect && (pNew->pSelect->selFlags & SF_NestedFrom)!=0 ){ - pNew->fg.isNestedFrom = 1; + assert( pOld->fg.fixedSchema==0 ); + if( pOld->fg.isSubquery ){ + pNew->fg.isSubquery = 1; + pNew->u4.pSubq = pOld->u4.pSubq; + pOld->u4.pSubq = 0; + pOld->fg.isSubquery = 0; + assert( pNew->u4.pSubq!=0 && pNew->u4.pSubq->pSelect!=0 ); + if( (pNew->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 ){ + pNew->fg.isNestedFrom = 1; + } + }else{ + pNew->u4.zDatabase = pOld->u4.zDatabase; + pOld->u4.zDatabase = 0; } if( pOld->fg.isTabFunc ){ pNew->u1.pFuncArg = pOld->u1.pFuncArg; @@ -176587,15 +179448,14 @@ static YYACTIONTYPE yy_reduce( pOld->fg.isTabFunc = 0; pNew->fg.isTabFunc = 1; } - pOld->zName = pOld->zDatabase = 0; - pOld->pSelect = 0; + pOld->zName = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy563); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy563); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy563,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy421); } } break; @@ -176605,56 +179465,56 @@ static YYACTIONTYPE yy_reduce( break; case 118: /* fullname ::= nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy203 = yylhsminor.yy203; + yymsp[0].minor.yy563 = yylhsminor.yy563; break; case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy203 = yylhsminor.yy203; + yymsp[-2].minor.yy563 = yylhsminor.yy563; break; case 120: /* xfullname ::= nm */ -{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} +{yymsp[0].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; case 121: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy563 ) yymsp[-4].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy563 ) yymsp[-2].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 124: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy144 = JT_INNER; } +{ yymsp[0].minor.yy502 = JT_INNER; } break; case 125: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 126: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 127: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 128: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} +{yymsp[-1].minor.yy421.pOn = yymsp[0].minor.yy590; yymsp[-1].minor.yy421.pUsing = 0;} break; case 129: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} +{yymsp[-3].minor.yy421.pOn = 0; yymsp[-3].minor.yy421.pUsing = yymsp[-1].minor.yy204;} break; case 130: /* on_using ::= */ -{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} +{yymsp[1].minor.yy421.pOn = 0; yymsp[1].minor.yy421.pUsing = 0;} break; case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -176664,35 +179524,35 @@ static YYACTIONTYPE yy_reduce( break; case 135: /* orderby_opt ::= ORDER BY sortlist */ case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); -{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[0].minor.yy402;} break; case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402,yymsp[-2].minor.yy590); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy590); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 138: /* sortorder ::= ASC */ -{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy502 = SQLITE_SO_ASC;} break; case 139: /* sortorder ::= DESC */ -{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy502 = SQLITE_SO_DESC;} break; case 140: /* sortorder ::= */ case 143: /* nulls ::= */ yytestcase(yyruleno==143); -{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy502 = SQLITE_SO_UNDEFINED;} break; case 141: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_ASC;} break; case 142: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_DESC;} break; case 146: /* having_opt ::= */ case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); @@ -176701,42 +179561,42 @@ static YYACTIONTYPE yy_reduce( case 232: /* case_else ::= */ yytestcase(yyruleno==232); case 233: /* case_operand ::= */ yytestcase(yyruleno==233); case 252: /* vinto ::= */ yytestcase(yyruleno==252); -{yymsp[1].minor.yy454 = 0;} +{yymsp[1].minor.yy590 = 0;} break; case 147: /* having_opt ::= HAVING expr */ case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); -{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} +{yymsp[-1].minor.yy590 = yymsp[0].minor.yy590;} break; case 149: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,0);} break; case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 151: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,yymsp[-2].minor.yy590);} break; case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy203, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy203,yymsp[0].minor.yy454,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy563, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy563,yymsp[0].minor.yy590,0,0); } break; case 157: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-1].minor.yy590 = 0;} break; case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-3].minor.yy590 = yymsp[-2].minor.yy590;} break; case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy14,"set list"); - if( yymsp[-1].minor.yy203 ){ - SrcList *pFromClause = yymsp[-1].minor.yy203; + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy402,"set list"); + if( yymsp[-1].minor.yy563 ){ + SrcList *pFromClause = yymsp[-1].minor.yy563; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -176745,90 +179605,90 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-5].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy203, pFromClause); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy563, pFromClause); } - sqlite3Update(pParse,yymsp[-5].minor.yy203,yymsp[-2].minor.yy14,yymsp[0].minor.yy454,yymsp[-6].minor.yy144,0,0,0); + sqlite3Update(pParse,yymsp[-5].minor.yy563,yymsp[-2].minor.yy402,yymsp[0].minor.yy590,yymsp[-6].minor.yy502,0,0,0); } break; case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, 1); } break; case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-6].minor.yy402 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy402, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy402 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yylhsminor.yy402, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy14 = yylhsminor.yy14; + yymsp[-2].minor.yy402 = yylhsminor.yy402; break; case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); + sqlite3Insert(pParse, yymsp[-3].minor.yy563, yymsp[-1].minor.yy637, yymsp[-2].minor.yy204, yymsp[-5].minor.yy502, yymsp[0].minor.yy403); } break; case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy563, 0, yymsp[-3].minor.yy204, yymsp[-6].minor.yy502, 0); } break; case 166: /* upsert ::= */ -{ yymsp[1].minor.yy122 = 0; } +{ yymsp[1].minor.yy403 = 0; } break; case 167: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } +{ yymsp[-1].minor.yy403 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy402); } break; case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} +{ yymsp[-11].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy402,yymsp[-6].minor.yy590,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,yymsp[0].minor.yy403);} break; case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } +{ yymsp[-8].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy402,yymsp[-3].minor.yy590,0,0,yymsp[0].minor.yy403); } break; case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } +{ yymsp[-4].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} +{ yymsp[-7].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,0);} break; case 172: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402);} break; case 175: /* idlist_opt ::= */ -{yymsp[1].minor.yy132 = 0;} +{yymsp[1].minor.yy204 = 0;} break; case 176: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} +{yymsp[-2].minor.yy204 = yymsp[-1].minor.yy204;} break; case 177: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} +{yymsp[-2].minor.yy204 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy204,&yymsp[0].minor.yy0);} break; case 178: /* idlist ::= nm */ -{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} +{yymsp[0].minor.yy204 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; case 179: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} +{yymsp[-2].minor.yy590 = yymsp[-1].minor.yy590;} break; case 180: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 182: /* expr ::= nm DOT nm DOT nm */ { @@ -176839,27 +179699,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 183: /* term ::= NULL|FLOAT|BLOB */ case 184: /* term ::= STRING */ yytestcase(yyruleno==184); -{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 185: /* term ::= INTEGER */ { - yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy590 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy590 ) yylhsminor.yy590->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); + yymsp[0].minor.yy590 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy590, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -176867,81 +179727,81 @@ static YYACTIONTYPE yy_reduce( Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ assert( t.n>=2 ); if( pParse->nested==0 ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy454 = 0; + parserSyntaxError(pParse, &t); + yymsp[0].minor.yy590 = 0; }else{ - yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); + yymsp[0].minor.yy590 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy590 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy590->iTable); } } } break; case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy590 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy590, &yymsp[0].minor.yy0, 1); } break; case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); + yymsp[-5].minor.yy590 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy590, yymsp[-3].minor.yy590, 0); } break; case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy502); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy402, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy502); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-1].minor.yy402); } - yymsp[-7].minor.yy454 = yylhsminor.yy454; + yymsp[-7].minor.yy590 = yylhsminor.yy590; break; case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy454 = yylhsminor.yy454; + yymsp[-3].minor.yy590 = yylhsminor.yy590; break; case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy402, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-5].minor.yy454 = yylhsminor.yy454; + yymsp[-5].minor.yy590 = yylhsminor.yy590; break; case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy402, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-2].minor.yy402); } - yymsp[-8].minor.yy454 = yylhsminor.yy454; + yymsp[-8].minor.yy590 = yylhsminor.yy590; break; case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy590->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); @@ -176949,7 +179809,7 @@ static YYACTIONTYPE yy_reduce( } break; case 197: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 198: /* expr ::= expr OR expr */ case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); @@ -176958,7 +179818,7 @@ static YYACTIONTYPE yy_reduce( case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); -{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} @@ -176968,11 +179828,11 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); - yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); - if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy590); + yymsp[-2].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy590, 0); + if( yymsp[-2].minor.yy590 ) yymsp[-2].minor.yy590->flags |= EP_InfixFunc; } break; case 207: /* expr ::= expr likeop expr ESCAPE expr */ @@ -176980,203 +179840,212 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ) yymsp[-4].minor.yy590->flags |= EP_InfixFunc; } break; case 208: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy590,0);} break; case 209: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} +{yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy590,0);} break; case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-2].minor.yy590, TK_ISNULL); } break; case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-3].minor.yy590, TK_NOTNULL); } break; case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-5].minor.yy590, TK_ISNULL); } break; case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-4].minor.yy590, TK_NOTNULL); } break; case 214: /* expr ::= NOT expr */ case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy590, 0);/*A-overwrites-B*/} break; case 216: /* expr ::= PLUS|MINUS expr */ { - Expr *p = yymsp[0].minor.yy454; + Expr *p = yymsp[0].minor.yy590; u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); assert( TK_UPLUS>TK_PLUS ); assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); if( p && p->op==TK_UPLUS ){ p->op = op; - yymsp[-1].minor.yy454 = p; + yymsp[-1].minor.yy590 = p; }else{ - yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, op, p, 0); /*A-overwrites-B*/ } } break; case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); - yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy590); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 218: /* between_op ::= BETWEEN */ case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); -{yymsp[0].minor.yy144 = 0;} +{yymsp[0].minor.yy502 = 0;} break; case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy14==0 ){ + if( yymsp[-1].minor.yy402==0 ){ /* Expressions of the form ** ** expr1 IN () ** expr1 NOT IN () ** - ** simplify to constants 0 (false) and 1 (true), respectively, - ** regardless of the value of expr1. + ** simplify to constants 0 (false) and 1 (true), respectively. + ** + ** Except, do not apply this optimization if expr1 contains a function + ** because that function might be an aggregate (we don't know yet whether + ** it is or not) and if it is an aggregate, that could change the meaning + ** of the whole query. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); - if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); - }else{ - Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; - if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ - yymsp[-1].minor.yy14->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + Expr *pB = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); + if( pB ) sqlite3ExprIdToTrueFalse(pB); + if( !ExprHasProperty(yymsp[-4].minor.yy590, EP_HasFunc) ){ + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); + yymsp[-4].minor.yy590 = pB; + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, yymsp[-3].minor.yy502 ? TK_OR : TK_AND, pB, yymsp[-4].minor.yy590); + } + }else{ + Expr *pRHS = yymsp[-1].minor.yy402->a[0].pExpr; + if( yymsp[-1].minor.yy402->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy590->op!=TK_VECTOR ){ + yymsp[-1].minor.yy402->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); - }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy590, pRHS); + }else if( yymsp[-1].minor.yy402->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else{ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else if( yymsp[-4].minor.yy590->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy590->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy402); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelectRHS); } }else{ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); } } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } } break; case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy590, yymsp[-1].minor.yy637); } break; case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, yymsp[-1].minor.yy637); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[0].minor.yy402 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy402); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelect); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); + p = yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy637); } break; case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy590 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590) : yymsp[-2].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy402); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } } break; case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[0].minor.yy590); } break; case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy402, yymsp[0].minor.yy590); } break; case 235: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[0].minor.yy590);} break; case 236: /* nexprlist ::= expr */ -{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} +{yymsp[0].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy590); /*A-overwrites-Y*/} break; case 238: /* paren_exprlist ::= LP exprlist RP */ case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); -{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[-1].minor.yy402;} break; case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy402, yymsp[-10].minor.yy502, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy590, SQLITE_SO_ASC, yymsp[-8].minor.yy502, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } @@ -177184,29 +180053,29 @@ static YYACTIONTYPE yy_reduce( break; case 240: /* uniqueflag ::= UNIQUE */ case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); -{yymsp[0].minor.yy144 = OE_Abort;} +{yymsp[0].minor.yy502 = OE_Abort;} break; case 241: /* uniqueflag ::= */ -{yymsp[1].minor.yy144 = OE_None;} +{yymsp[1].minor.yy502 = OE_None;} break; case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); } break; case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ + yymsp[-2].minor.yy402 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); /*A-overwrites-Y*/ } break; case 248: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} +{sqlite3DropIndex(pParse, yymsp[0].minor.yy563, yymsp[-1].minor.yy502);} break; case 249: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy590);} break; case 250: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy590);} break; case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} @@ -177228,50 +180097,54 @@ static YYACTIONTYPE yy_reduce( Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy319, &all); } break; case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy28.a, yymsp[-4].minor.yy28.b, yymsp[-2].minor.yy563, yymsp[0].minor.yy590, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ +#ifdef SQLITE_DEBUG + assert( pParse->isCreate ); /* Set by createkw reduce action */ + pParse->isCreate = 0; /* But, should not be set for CREATE TRIGGER */ +#endif } break; case 262: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } +{ yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/ } break; case 263: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy144 = TK_INSTEAD;} +{ yymsp[-1].minor.yy502 = TK_INSTEAD;} break; case 264: /* trigger_time ::= */ -{ yymsp[1].minor.yy144 = TK_BEFORE; } +{ yymsp[1].minor.yy502 = TK_BEFORE; } break; case 265: /* trigger_event ::= DELETE|INSERT */ case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); -{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} +{yymsp[0].minor.yy28.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy28.b = 0;} break; case 267: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} +{yymsp[-2].minor.yy28.a = TK_UPDATE; yymsp[-2].minor.yy28.b = yymsp[0].minor.yy204;} break; case 268: /* when_clause ::= */ case 287: /* key_opt ::= */ yytestcase(yyruleno==287); -{ yymsp[1].minor.yy454 = 0; } +{ yymsp[1].minor.yy590 = 0; } break; case 269: /* when_clause ::= WHEN expr */ case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); -{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } +{ yymsp[-1].minor.yy590 = yymsp[0].minor.yy590; } break; case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy427!=0 ); - yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; - yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-2].minor.yy319!=0 ); + yymsp[-2].minor.yy319->pLast->pNext = yymsp[-1].minor.yy319; + yymsp[-2].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy427!=0 ); - yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-1].minor.yy319!=0 ); + yymsp[-1].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 272: /* trnm ::= nm DOT nm */ @@ -177297,58 +180170,58 @@ static YYACTIONTYPE yy_reduce( } break; case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-8].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy563, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590, yymsp[-7].minor.yy502, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-8].minor.yy319 = yylhsminor.yy319; break; case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ + yylhsminor.yy319 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy204,yymsp[-2].minor.yy637,yymsp[-6].minor.yy502,yymsp[-1].minor.yy403,yymsp[-7].minor.yy342,yymsp[0].minor.yy342);/*yylhsminor.yy319-overwrites-yymsp[-6].minor.yy502*/ } - yymsp[-7].minor.yy427 = yylhsminor.yy427; + yymsp[-7].minor.yy319 = yylhsminor.yy319; break; case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-5].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy590, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-5].minor.yy319 = yylhsminor.yy319; break; case 278: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} - yymsp[-2].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy637, yymsp[-2].minor.yy342, yymsp[0].minor.yy342); /*yylhsminor.yy319-overwrites-yymsp[-1].minor.yy637*/} + yymsp[-2].minor.yy319 = yylhsminor.yy319; break; case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy454 ){ - yymsp[-3].minor.yy454->affExpr = OE_Ignore; + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy590 ){ + yymsp[-3].minor.yy590->affExpr = OE_Ignore; } } break; - case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA expr RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy454 ) { - yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, yymsp[-1].minor.yy590, 0); + if( yymsp[-5].minor.yy590 ) { + yymsp[-5].minor.yy590->affExpr = (char)yymsp[-3].minor.yy502; } } break; case 281: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy144 = OE_Rollback;} +{yymsp[0].minor.yy502 = OE_Rollback;} break; case 283: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy144 = OE_Fail;} +{yymsp[0].minor.yy502 = OE_Fail;} break; case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy563,yymsp[-1].minor.yy502); } break; case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); + sqlite3Attach(pParse, yymsp[-3].minor.yy590, yymsp[-1].minor.yy590, yymsp[0].minor.yy590); } break; case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy454); + sqlite3Detach(pParse, yymsp[0].minor.yy590); } break; case 289: /* cmd ::= REINDEX */ @@ -177365,7 +180238,7 @@ static YYACTIONTYPE yy_reduce( break; case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy563,&yymsp[0].minor.yy0); } break; case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ @@ -177376,18 +180249,18 @@ static YYACTIONTYPE yy_reduce( break; case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy563, &yymsp[0].minor.yy0); } break; case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy563); } break; case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy563, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; case 298: /* cmd ::= create_vtab */ @@ -177398,7 +180271,7 @@ static YYACTIONTYPE yy_reduce( break; case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy502); } break; case 301: /* vtabarg ::= */ @@ -177411,20 +180284,20 @@ static YYACTIONTYPE yy_reduce( break; case 305: /* with ::= WITH wqlist */ case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } +{ sqlite3WithPush(pParse, yymsp[0].minor.yy125, 1); } break; case 307: /* wqas ::= AS */ -{yymsp[0].minor.yy462 = M10d_Any;} +{yymsp[0].minor.yy444 = M10d_Any;} break; case 308: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy462 = M10d_Yes;} +{yymsp[-1].minor.yy444 = M10d_Yes;} break; case 309: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy462 = M10d_No;} +{yymsp[-2].minor.yy444 = M10d_No;} break; case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ + yymsp[-5].minor.yy361 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy402, yymsp[-1].minor.yy637, yymsp[-3].minor.yy444); /*A-overwrites-X*/ } break; case 311: /* withnm ::= nm */ @@ -177432,160 +180305,160 @@ static YYACTIONTYPE yy_reduce( break; case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ + yymsp[0].minor.yy125 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy361); /*A-overwrites-X*/ } break; case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); + yymsp[-2].minor.yy125 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy125, yymsp[0].minor.yy361); } break; case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy211!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); - yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; - yylhsminor.yy211 = yymsp[0].minor.yy211; + assert( yymsp[0].minor.yy483!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy483); + yymsp[0].minor.yy483->pNextWin = yymsp[-2].minor.yy483; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy211) ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy483) ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy211 = yymsp[-1].minor.yy211; + yylhsminor.yy483 = yymsp[-1].minor.yy483; } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); + yymsp[-4].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, 0); } break; case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); + yymsp[-3].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, 0); } break; case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy483 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy502, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy444); } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy502, yymsp[-3].minor.yy205.eType, yymsp[-3].minor.yy205.pExpr, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, yymsp[0].minor.yy444); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 325: /* frame_bound_s ::= frame_bound */ case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); -{yylhsminor.yy509 = yymsp[0].minor.yy509;} - yymsp[0].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205 = yymsp[0].minor.yy205;} + yymsp[0].minor.yy205 = yylhsminor.yy205; break; case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); -{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[-1].major; yylhsminor.yy205.pExpr = 0;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[0].major; yylhsminor.yy205.pExpr = yymsp[-1].minor.yy590;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 331: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy462 = 0;} +{yymsp[1].minor.yy444 = 0;} break; case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} +{yymsp[-1].minor.yy444 = yymsp[0].minor.yy444;} break; case 333: /* frame_exclude ::= NO OTHERS */ case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); -{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} +{yymsp[-1].minor.yy444 = yymsp[-1].major; /*A-overwrites-X*/} break; case 335: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy444 = yymsp[0].major; /*A-overwrites-X*/} break; case 336: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } +{ yymsp[-1].minor.yy483 = yymsp[0].minor.yy483; } break; case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy211 ){ - yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; + if( yymsp[0].minor.yy483 ){ + yymsp[0].minor.yy483->pFilter = yymsp[-1].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy211 ){ - yylhsminor.yy211->eFrmType = TK_FILTER; - yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; + yylhsminor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy483 ){ + yylhsminor.yy483->eFrmType = TK_FILTER; + yylhsminor.yy483->pFilter = yymsp[0].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy590); } } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; - assert( yymsp[-3].minor.yy211!=0 ); + yymsp[-3].minor.yy483 = yymsp[-1].minor.yy483; + assert( yymsp[-3].minor.yy483!=0 ); } break; case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy211 ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy483 ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } +{ yymsp[-4].minor.yy590 = yymsp[-1].minor.yy590; } break; case 343: /* term ::= QNUMBER */ { - yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); - sqlite3DequoteNumber(pParse, yylhsminor.yy454); + yylhsminor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy590); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; default: /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); @@ -177715,7 +180588,7 @@ static void yy_syntax_error( UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ if( TOKEN.z[0] ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); + parserSyntaxError(pParse, &TOKEN); }else{ sqlite3ErrorMsg(pParse, "incomplete input"); } @@ -178677,7 +181550,7 @@ static int getToken(const unsigned char **pz){ int t; /* Token type to return */ do { z += sqlite3GetToken(z, &t); - }while( t==TK_SPACE ); + }while( t==TK_SPACE || t==TK_COMMENT ); if( t==TK_ID || t==TK_STRING || t==TK_JOIN_KW @@ -178766,7 +181639,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ case CC_MINUS: { if( z[1]=='-' ){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; }else if( z[1]=='>' ){ *tokenType = TK_PTR; @@ -178802,7 +181675,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ } for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} if( c ) i++; - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; } case CC_PERCENT: { @@ -179131,12 +182004,12 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #else if( tokenType>=TK_SPACE ){ assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ @@ -179170,6 +182043,13 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ + }else if( tokenType==TK_COMMENT + && (db->init.busy || (db->flags & SQLITE_Comments)!=0) + ){ + /* Ignore SQL comments if either (1) we are reparsing the schema or + ** (2) SQLITE_DBCONFIG_ENABLE_COMMENTS is turned on (the default). */ + zSql += n; + continue; }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; @@ -179206,7 +182086,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( pParse->zErrMsg==0 ){ pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); } - sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + if( (pParse->prepFlags & SQLITE_PREPARE_DONT_LOG)==0 ){ + sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + } nErr++; } pParse->zTail = zSql; @@ -179274,6 +182156,7 @@ SQLITE_PRIVATE char *sqlite3Normalize( n = sqlite3GetToken((unsigned char*)zSql+i, &tokenType); if( NEVER(n<=0) ) break; switch( tokenType ){ + case TK_COMMENT: case TK_SPACE: { break; } @@ -179915,32 +182798,6 @@ SQLITE_API char *sqlite3_temp_directory = 0; */ SQLITE_API char *sqlite3_data_directory = 0; -/* -** Determine whether or not high-precision (long double) floating point -** math works correctly on CPU currently running. -*/ -static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){ - if( sizeof(LONGDOUBLE_TYPE)<=8 ){ - /* If the size of "long double" is not more than 8, then - ** high-precision math is not possible. */ - return 0; - }else{ - /* Just because sizeof(long double)>8 does not mean that the underlying - ** hardware actually supports high-precision floating point. For example, - ** clearing the 0x100 bit in the floating-point control word on Intel - ** processors will make long double work like double, even though long - ** double takes up more space. The only way to determine if long double - ** actually works is to run an experiment. */ - LONGDOUBLE_TYPE a, b, c; - rc++; - a = 1.0+rc*0.1; - b = 1.0e+18+rc*25.0; - c = a+b; - return b!=c; - } -} - - /* ** Initialize SQLite. ** @@ -180085,6 +182942,14 @@ SQLITE_API int sqlite3_initialize(void){ if( rc==SQLITE_OK ){ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); +#ifdef SQLITE_EXTRA_INIT_MUTEXED + { + int SQLITE_EXTRA_INIT_MUTEXED(const char*); + rc = SQLITE_EXTRA_INIT_MUTEXED(0); + } +#endif + } + if( rc==SQLITE_OK ){ sqlite3MemoryBarrier(); sqlite3GlobalConfig.isInit = 1; #ifdef SQLITE_EXTRA_INIT @@ -180135,13 +183000,6 @@ SQLITE_API int sqlite3_initialize(void){ rc = SQLITE_EXTRA_INIT(0); } #endif - - /* Experimentally determine if high-precision floating point is - ** available. */ -#ifndef SQLITE_OMIT_WSD - sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc); -#endif - return rc; } @@ -180548,17 +183406,22 @@ SQLITE_API int sqlite3_config(int op, ...){ ** If lookaside is already active, return SQLITE_BUSY. ** ** The sz parameter is the number of bytes in each lookaside slot. -** The cnt parameter is the number of slots. If pStart is NULL the -** space for the lookaside memory is obtained from sqlite3_malloc(). -** If pStart is not NULL then it is sz*cnt bytes of memory to use for -** the lookaside memory. +** The cnt parameter is the number of slots. If pBuf is NULL the +** space for the lookaside memory is obtained from sqlite3_malloc() +** or similar. If pBuf is not NULL then it is sz*cnt bytes of memory +** to use for the lookaside memory. */ -static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ +static int setupLookaside( + sqlite3 *db, /* Database connection being configured */ + void *pBuf, /* Memory to use for lookaside. May be NULL */ + int sz, /* Desired size of each lookaside memory slot */ + int cnt /* Number of slots to allocate */ +){ #ifndef SQLITE_OMIT_LOOKASIDE - void *pStart; - sqlite3_int64 szAlloc = sz*(sqlite3_int64)cnt; - int nBig; /* Number of full-size slots */ - int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ + void *pStart; /* Start of the lookaside buffer */ + sqlite3_int64 szAlloc; /* Total space set aside for lookaside memory */ + int nBig; /* Number of full-size slots */ + int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ if( sqlite3LookasideUsed(db,0)>0 ){ return SQLITE_BUSY; @@ -180571,17 +183434,22 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ sqlite3_free(db->lookaside.pStart); } /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger - ** than a pointer to be useful. + ** than a pointer and small enough to fit in a u16. */ - sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ + sz = ROUNDDOWN8(sz); if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; - if( cnt<0 ) cnt = 0; - if( sz==0 || cnt==0 ){ + if( sz>65528 ) sz = 65528; + /* Count must be at least 1 to be useful, but not so large as to use + ** more than 0x7fff0000 total bytes for lookaside. */ + if( cnt<1 ) cnt = 0; + if( sz>0 && cnt>(0x7fff0000/sz) ) cnt = 0x7fff0000/sz; + szAlloc = (i64)sz*(i64)cnt; + if( szAlloc==0 ){ sz = 0; pStart = 0; }else if( pBuf==0 ){ sqlite3BeginBenignMalloc(); - pStart = sqlite3Malloc( szAlloc ); /* IMP: R-61949-35727 */ + pStart = sqlite3Malloc( szAlloc ); sqlite3EndBenignMalloc(); if( pStart ) szAlloc = sqlite3MallocSize(pStart); }else{ @@ -180590,10 +183458,10 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE if( sz>=LOOKASIDE_SMALL*3 ){ nBig = szAlloc/(3*LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else if( sz>=LOOKASIDE_SMALL*2 ){ nBig = szAlloc/(LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ if( sz>0 ){ @@ -180748,7 +183616,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ default: { static const struct { int op; /* The opcode */ - u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ + u64 mask; /* Mask of the bit in sqlite3.flags to set/clear */ } aFlagOp[] = { { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, @@ -180769,6 +183637,9 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE, SQLITE_AttachCreate }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE, SQLITE_AttachWrite }, + { SQLITE_DBCONFIG_ENABLE_COMMENTS, SQLITE_Comments }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -181212,10 +184083,6 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */ sqlite3ValueFree(db->pErr); sqlite3CloseExtensions(db); -#if SQLITE_USER_AUTHENTICATION - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); -#endif db->eOpenState = SQLITE_STATE_ERROR; @@ -181559,6 +184426,9 @@ SQLITE_API int sqlite3_busy_handler( db->busyHandler.pBusyArg = pArg; db->busyHandler.nBusy = 0; db->busyTimeout = 0; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = 0; +#endif sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -181608,12 +184478,49 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ sqlite3_busy_handler(db, (int(*)(void*,int))sqliteDefaultBusyCallback, (void*)db); db->busyTimeout = ms; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = ms; +#endif }else{ sqlite3_busy_handler(db, 0, 0); } return SQLITE_OK; } +/* +** Set the setlk timeout value. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3 *db, int ms, int flags){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int iDb; + int bBOC = ((flags & SQLITE_SETLK_BLOCK_ON_CONNECT) ? 1 : 0); +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + if( ms<-1 ) return SQLITE_RANGE; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex_enter(db->mutex); + db->setlkTimeout = ms; + db->setlkFlags = flags; + sqlite3BtreeEnterAll(db); + for(iDb=0; iDbnDb; iDb++){ + Btree *pBt = db->aDb[iDb].pBt; + if( pBt ){ + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, (void*)&bBOC); + } + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); +#endif +#if !defined(SQLITE_ENABLE_API_ARMOR) && !defined(SQLITE_ENABLE_SETLK_TIMEOUT) + UNUSED_PARAMETER(db); + UNUSED_PARAMETER(flags); +#endif + return SQLITE_OK; +} + /* ** Cause any pending operation to stop at its earliest opportunity. */ @@ -181682,7 +184589,8 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS| + SQLITE_RESULT_SUBTYPE|SQLITE_SELFORDER1); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But @@ -182649,8 +185557,8 @@ static const int aHardLimit[] = { #if SQLITE_MAX_VDBE_OP<40 # error SQLITE_MAX_VDBE_OP must be at least 40 #endif -#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>127 -# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 127 +#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>32767 +# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 32767 #endif #if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125 # error SQLITE_MAX_ATTACHED must be between 0 and 125 @@ -182717,8 +185625,8 @@ SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ if( newLimit>=0 ){ /* IMP: R-52476-28732 */ if( newLimit>aHardLimit[limitId] ){ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ - }else if( newLimit<1 && limitId==SQLITE_LIMIT_LENGTH ){ - newLimit = 1; + }else if( newLimitaLimit[limitId] = newLimit; } @@ -183113,6 +186021,9 @@ static int openDatabase( | SQLITE_EnableTrigger | SQLITE_EnableView | SQLITE_CacheSpill + | SQLITE_AttachCreate + | SQLITE_AttachWrite + | SQLITE_Comments #if !defined(SQLITE_TRUSTED_SCHEMA) || SQLITE_TRUSTED_SCHEMA+0!=0 | SQLITE_TrustedSchema #endif @@ -183237,6 +186148,7 @@ static int openDatabase( if( ((1<<(flags&7)) & 0x46)==0 ){ rc = SQLITE_MISUSE_BKPT; /* IMP: R-18321-05872 */ }else{ + if( zFilename==0 ) zFilename = ":memory:"; rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); } if( rc!=SQLITE_OK ){ @@ -183574,7 +186486,7 @@ SQLITE_API int sqlite3_set_clientdata( return SQLITE_OK; }else{ size_t n = strlen(zName); - p = sqlite3_malloc64( sizeof(DbClientData)+n+1 ); + p = sqlite3_malloc64( SZ_DBCLIENTDATA(n+1) ); if( p==0 ){ if( xDestructor ) xDestructor(pData); sqlite3_mutex_leave(db->mutex); @@ -183728,13 +186640,10 @@ SQLITE_API int sqlite3_table_column_metadata( if( zColumnName==0 ){ /* Query for existence of table only */ }else{ - for(iCol=0; iColnCol; iCol++){ + iCol = sqlite3ColumnIndex(pTab, zColumnName); + if( iCol>=0 ){ pCol = &pTab->aCol[iCol]; - if( 0==sqlite3StrICmp(pCol->zCnName, zColumnName) ){ - break; - } - } - if( iCol==pTab->nCol ){ + }else{ if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){ iCol = pTab->iPKey; pCol = iCol>=0 ? &pTab->aCol[iCol] : 0; @@ -183943,8 +186852,8 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b); ** ** If b is true, then activate the SQLITE_FkNoAction setting. If b is - ** false then clearn that setting. If the SQLITE_FkNoAction setting is - ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if + ** false then clear that setting. If the SQLITE_FkNoAction setting is + ** enabled, all foreign key ON DELETE and ON UPDATE actions behave as if ** they were NO ACTION, regardless of how they are defined. ** ** NB: One must usually run "PRAGMA writable_schema=RESET" after @@ -184061,7 +186970,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* Invoke these debugging routines so that the compiler does not ** issue "defined but not used" warnings. */ if( x==9999 ){ - sqlite3ShowExpr(0); sqlite3ShowExpr(0); sqlite3ShowExprList(0); sqlite3ShowIdList(0); @@ -184149,6 +187057,18 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } + /* sqlite3_test_control(SQLITE_TESTCTRL_GETOPT, sqlite3 *db, int *N) + ** + ** Write the current optimization settings into *N. A zero bit means that + ** the optimization is on, and a 1 bit means that the optimization is off. + */ + case SQLITE_TESTCTRL_GETOPT: { + sqlite3 *db = va_arg(ap, sqlite3*); + int *pN = va_arg(ap, int*); + *pN = db->dbOptFlags; + break; + } + /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, onoff, xAlt); ** ** If parameter onoff is 1, subsequent calls to localtime() fail. @@ -184380,24 +187300,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } -#if !defined(SQLITE_OMIT_WSD) - /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X); - ** - ** X<0 Make no changes to the bUseLongDouble. Just report value. - ** X==0 Disable bUseLongDouble - ** X==1 Enable bUseLongDouble - ** X>=2 Set bUseLongDouble to its default value for this platform - */ - case SQLITE_TESTCTRL_USELONGDOUBLE: { - int b = va_arg(ap, int); - if( b>=2 ) b = hasHighPrecisionDouble(b); - if( b>=0 ) sqlite3Config.bUseLongDouble = b>0; - rc = sqlite3Config.bUseLongDouble!=0; - break; - } -#endif - - #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) ** @@ -184705,7 +187607,11 @@ SQLITE_API int sqlite3_snapshot_get( if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; if( SQLITE_TXN_WRITE!=sqlite3BtreeTxnState(pBt) ){ + Pager *pPager = sqlite3BtreePager(pBt); + i64 dummy = 0; + sqlite3PagerSnapshotOpen(pPager, (sqlite3_snapshot*)&dummy); rc = sqlite3BtreeBeginTrans(pBt, 0, 0); + sqlite3PagerSnapshotOpen(pPager, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); } @@ -185294,7 +188200,7 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. A "position" is an index of a token in the token stream ** generated by the tokenizer. Note that POS_END and POS_COLUMN occur -** in the same logical place as the position element, and act as sentinals +** in the same logical place as the position element, and act as sentinels ** ending a position list array. POS_END is 0. POS_COLUMN is 1. ** The positions numbers are not stored literally but rather as two more ** than the difference from the prior position, or the just the position plus @@ -185513,6 +188419,13 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ #ifndef _FTSINT_H #define _FTSINT_H +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ + #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif @@ -185982,6 +188895,19 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ #define deliberate_fall_through +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + + #endif /* SQLITE_AMALGAMATION */ #ifdef SQLITE_DEBUG @@ -186086,7 +189012,7 @@ struct Fts3Table { #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - /* True to disable the incremental doclist optimization. This is controled + /* True to disable the incremental doclist optimization. This is controlled ** by special insert command 'test-no-incr-doclist'. */ int bNoIncrDoclist; @@ -186138,7 +189064,7 @@ struct Fts3Cursor { /* ** The Fts3Cursor.eSearch member is always set to one of the following. -** Actualy, Fts3Cursor.eSearch can be greater than or equal to +** Actually, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in ** @@ -186211,9 +189137,13 @@ struct Fts3Phrase { */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ + Fts3PhraseToken aToken[FLEXARRAY]; /* One for each token in the phrase */ }; +/* Size (in bytes) of an Fts3Phrase object large enough to hold N tokens */ +#define SZ_FTS3PHRASE(N) \ + (offsetof(Fts3Phrase,aToken)+(N)*sizeof(Fts3PhraseToken)) + /* ** A tree of these objects forms the RHS of a MATCH operator. ** @@ -186420,6 +189350,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor*, Fts3Expr*); /* fts3_tokenize_vtab.c */ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *, void(*xDestroy)(void*)); @@ -186446,12 +189377,6 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); # define SQLITE_CORE 1 #endif -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ /* #include "fts3.h" */ #ifndef SQLITE_CORE @@ -188495,10 +191420,15 @@ static int fts3PoslistPhraseMerge( if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); + /* iCol1==0 indicates corruption. Column 0 does not have a POS_COLUMN + ** entry, so this is actually end-of-doclist. */ + if( iCol1==0 ) return 0; } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); + /* As above, iCol2==0 indicates corruption. */ + if( iCol2==0 ) return 0; } while( 1 ){ @@ -188785,7 +191715,7 @@ static int fts3DoclistOrMerge( ** sizes of the two inputs, plus enough space for exactly one of the input ** docids to grow. ** - ** A symetric argument may be made if the doclists are in descending + ** A symmetric argument may be made if the doclists are in descending ** order. */ aOut = sqlite3_malloc64((i64)n1+n2+FTS3_VARINT_MAX-1+FTS3_BUFFER_PADDING); @@ -190584,7 +193514,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ nDistance = iPrev - nMaxUndeferred; } - aOut = (char *)sqlite3Fts3MallocZero(nPoslist+FTS3_BUFFER_PADDING); + aOut = (char *)sqlite3Fts3MallocZero(((i64)nPoslist)+FTS3_BUFFER_PADDING); if( !aOut ){ sqlite3_free(aPoslist); return SQLITE_NOMEM; @@ -190883,7 +193813,7 @@ static int incrPhraseTokenNext( ** ** * does not contain any deferred tokens. ** -** Advance it to the next matching documnent in the database and populate +** Advance it to the next matching document in the database and populate ** the Fts3Doclist.pList and nList fields. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to @@ -191669,7 +194599,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; - aTmp = sqlite3_malloc64(nTmp*2); + aTmp = sqlite3_malloc64(nTmp*2 + FTS3_VARINT_MAX); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; @@ -191890,7 +194820,7 @@ static int fts3EvalNext(Fts3Cursor *pCsr){ } /* -** Restart interation for expression pExpr so that the next call to +** Restart iteration for expression pExpr so that the next call to ** fts3EvalNext() visits the first row. Do not allow incremental ** loading or merging of phrase doclists for this iteration. ** @@ -191933,6 +194863,24 @@ static void fts3EvalRestart( } } +/* +** Expression node pExpr is an MSR phrase. This function restarts pExpr +** so that it is a regular phrase query, not an MSR. SQLITE_OK is returned +** if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor *pCsr, Fts3Expr *pExpr){ + int rc = SQLITE_OK; + if( pExpr->bEof==0 ){ + i64 iDocid = pExpr->iDocid; + fts3EvalRestart(pCsr, pExpr, &rc); + while( rc==SQLITE_OK && pExpr->iDocid!=iDocid ){ + fts3EvalNextRow(pCsr, pExpr, &rc); + if( pExpr->bEof ) rc = FTS_CORRUPT_VTAB; + } + } + return rc; +} + /* ** After allocating the Fts3Expr.aMI[] array for each phrase in the ** expression rooted at pExpr, the cursor iterates through all rows matched @@ -192320,7 +195268,7 @@ SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ } #endif -#if !SQLITE_CORE +#if !defined(SQLITE_CORE) /* ** Initialize API pointer table, if required. */ @@ -193064,6 +196012,23 @@ SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( */ static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); +/* +** Search buffer z[], size n, for a '"' character. Or, if enable_parenthesis +** is defined, search for '(' and ')' as well. Return the index of the first +** such character in the buffer. If there is no such character, return -1. +*/ +static int findBarredChar(const char *z, int n){ + int ii; + for(ii=0; iiiLangid, z, i, &pCursor); + *pnConsumed = n; + rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, n, &pCursor); if( rc==SQLITE_OK ){ const char *zToken; int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; @@ -193105,7 +196063,18 @@ static int getNextToken( rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ - nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + /* Check that this tokenization did not gobble up any " characters. Or, + ** if enable_parenthesis is true, that it did not gobble up any + ** open or close parenthesis characters either. If it did, call + ** getNextToken() again, but pass only that part of the input buffer + ** up to the first such character. */ + int iBarred = findBarredChar(z, iEnd); + if( iBarred>=0 ){ + pModule->xClose(pCursor); + return getNextToken(pParse, iCol, z, iBarred, ppExpr, pnConsumed); + } + + nByte = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1) + nToken; pRet = (Fts3Expr *)sqlite3Fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; @@ -193115,7 +196084,7 @@ static int getNextToken( pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; - pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + pRet->pPhrase->aToken[0].z = (char*)&pRet->pPhrase->aToken[1]; memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); if( iEnd=0 ){ + *pnConsumed = iBarred; + } rc = SQLITE_OK; } @@ -193186,9 +196159,9 @@ static int getNextString( Fts3Expr *p = 0; sqlite3_tokenizer_cursor *pCursor = 0; char *zTemp = 0; - int nTemp = 0; + i64 nTemp = 0; - const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + const int nSpace = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1); int nToken = 0; /* The final Fts3Expr data structure, including the Fts3Phrase, @@ -193222,10 +196195,11 @@ static int getNextString( Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; + if( !zTemp || !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; @@ -193240,9 +196214,6 @@ static int getNextString( nToken = ii+1; } } - - pModule->xClose(pCursor); - pCursor = 0; } if( rc==SQLITE_DONE ){ @@ -193250,7 +196221,10 @@ static int getNextString( char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; + if( !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; @@ -193258,11 +196232,9 @@ static int getNextString( p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; + assert( nTemp==0 || zTemp ); if( zTemp ){ memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ @@ -193272,17 +196244,17 @@ static int getNextString( rc = SQLITE_OK; } - *ppExpr = p; - return rc; -no_mem: - + getnextstring_out: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; + if( rc!=SQLITE_OK ){ + sqlite3_free(p); + p = 0; + } + *ppExpr = p; + return rc; } /* @@ -193561,7 +196533,7 @@ static int fts3ExprParse( /* The isRequirePhrase variable is set to true if a phrase or ** an expression contained in parenthesis is required. If a - ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** binary operator (AND, OR, NOT or NEAR) is encountered when ** isRequirePhrase is set, this is a syntax error. */ if( !isPhrase && isRequirePhrase ){ @@ -194143,7 +197115,6 @@ static void fts3ExprTestCommon( } if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - sqlite3Fts3ExprFree(pExpr); sqlite3_result_error(context, "Error parsing expression", -1); }else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){ sqlite3_result_error_nomem(context); @@ -194386,7 +197357,7 @@ static void fts3HashInsertElement( } -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** "new_size" must be a power of 2. The hash table might fail ** to resize if sqliteMalloc() fails. ** @@ -194841,7 +197812,7 @@ static int star_oh(const char *z){ /* ** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the +** of the word that precedes the zFrom ending, then change the ** ending to zTo. ** ** The input word *pz and zFrom are both in reverse order. zTo @@ -195476,11 +198447,7 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( #ifdef SQLITE_TEST -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -#endif +#include "tclsqlite.h" /* #include */ /* @@ -196356,7 +199323,7 @@ static int fts3tokFilterMethod( fts3tokResetCursor(pCsr); if( idxNum==1 ){ const char *zByte = (const char *)sqlite3_value_text(apVal[0]); - int nByte = sqlite3_value_bytes(apVal[0]); + sqlite3_int64 nByte = sqlite3_value_bytes(apVal[0]); pCsr->zInput = sqlite3_malloc64(nByte+1); if( pCsr->zInput==0 ){ rc = SQLITE_NOMEM; @@ -200428,7 +203395,7 @@ static int fts3IncrmergePush( ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev -** is extended by this function if requrired. +** is extended by this function if required. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. @@ -202091,7 +205058,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferToken( /* ** SQLite value pRowid contains the rowid of a row that may or may not be ** present in the FTS3 table. If it is, delete it and adjust the contents -** of subsiduary data structures accordingly. +** of subsidiary data structures accordingly. */ static int fts3DeleteByRowid( Fts3Table *p, @@ -202417,9 +205384,13 @@ struct MatchinfoBuffer { int nElem; int bGlobal; /* Set if global data is loaded */ char *zMatchinfo; - u32 aMatchinfo[1]; + u32 aMI[FLEXARRAY]; }; +/* Size (in bytes) of a MatchinfoBuffer sufficient for N elements */ +#define SZ_MATCHINFOBUFFER(N) \ + (offsetof(MatchinfoBuffer,aMI)+(((N)+1)/2)*sizeof(u64)) + /* ** The snippet() and offsets() functions both return text values. An instance @@ -202444,13 +205415,13 @@ struct StrBuffer { static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ MatchinfoBuffer *pRet; sqlite3_int64 nByte = sizeof(u32) * (2*(sqlite3_int64)nElem + 1) - + sizeof(MatchinfoBuffer); + + SZ_MATCHINFOBUFFER(1); sqlite3_int64 nStr = strlen(zMatchinfo); pRet = sqlite3Fts3MallocZero(nByte + nStr+1); if( pRet ){ - pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; - pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + pRet->aMI[0] = (u8*)(&pRet->aMI[1]) - (u8*)pRet; + pRet->aMI[1+nElem] = pRet->aMI[0] + sizeof(u32)*((int)nElem+1); pRet->nElem = (int)nElem; pRet->zMatchinfo = ((char*)pRet) + nByte; @@ -202464,10 +205435,10 @@ static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ static void fts3MIBufferFree(void *p){ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); - assert( (u32*)p==&pBuf->aMatchinfo[1] - || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] + assert( (u32*)p==&pBuf->aMI[1] + || (u32*)p==&pBuf->aMI[pBuf->nElem+2] ); - if( (u32*)p==&pBuf->aMatchinfo[1] ){ + if( (u32*)p==&pBuf->aMI[1] ){ pBuf->aRef[1] = 0; }else{ pBuf->aRef[2] = 0; @@ -202484,18 +205455,18 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ if( p->aRef[1]==0 ){ p->aRef[1] = 1; - aOut = &p->aMatchinfo[1]; + aOut = &p->aMI[1]; xRet = fts3MIBufferFree; } else if( p->aRef[2]==0 ){ p->aRef[2] = 1; - aOut = &p->aMatchinfo[p->nElem+2]; + aOut = &p->aMI[p->nElem+2]; xRet = fts3MIBufferFree; }else{ aOut = (u32*)sqlite3_malloc64(p->nElem * sizeof(u32)); if( aOut ){ xRet = sqlite3_free; - if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); + if( p->bGlobal ) memcpy(aOut, &p->aMI[1], p->nElem*sizeof(u32)); } } @@ -202505,7 +205476,7 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ p->bGlobal = 1; - memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); + memcpy(&p->aMI[2+p->nElem], &p->aMI[1], p->nElem*sizeof(u32)); } /* @@ -202707,6 +205678,7 @@ static int fts3SnippetNextCandidate(SnippetIter *pIter){ return 1; } + assert( pIter->nSnippet>=0 ); pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; @@ -202919,7 +205891,7 @@ static int fts3StringAppend( } /* If there is insufficient space allocated at StrBuffer.z, use realloc() - ** to grow the buffer until so that it is big enough to accomadate the + ** to grow the buffer until so that it is big enough to accommodate the ** appended data. */ if( pStr->n+nAppend+1>=pStr->nAlloc ){ @@ -203331,16 +206303,16 @@ static size_t fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ break; case FTS3_MATCHINFO_LHITS: - nVal = pInfo->nCol * pInfo->nPhrase; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase; break; case FTS3_MATCHINFO_LHITS_BM: - nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); + nVal = (size_t)pInfo->nPhrase * ((pInfo->nCol + 31) / 32); break; default: assert( cArg==FTS3_MATCHINFO_HITS ); - nVal = pInfo->nCol * pInfo->nPhrase * 3; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase * 3; break; } @@ -203894,6 +206866,22 @@ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ return rc; } +/* +** If expression pExpr is a phrase expression that uses an MSR query, +** restart it as a regular, non-incremental query. Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +static int fts3ExprRestartIfCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + TermOffsetCtx *p = (TermOffsetCtx*)ctx; + int rc = SQLITE_OK; + UNUSED_PARAMETER(iPhrase); + if( pExpr->pPhrase && pExpr->pPhrase->bIncr ){ + rc = sqlite3Fts3MsrCancel(p->pCsr, pExpr); + pExpr->pPhrase->bIncr = 0; + } + return rc; +} + /* ** Implementation of offsets() function. */ @@ -203930,6 +206918,12 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( sCtx.iDocid = pCsr->iPrevId; sCtx.pCsr = pCsr; + /* If a query restart will be required, do it here, rather than later of + ** after pointers to poslist buffers that may be invalidated by a restart + ** have been saved. */ + rc = sqlite3Fts3ExprIterate(pCsr->pExpr, fts3ExprRestartIfCb, (void*)&sCtx); + if( rc!=SQLITE_OK ) goto offsets_out; + /* Loop through the table columns, appending offset information to ** string-buffer res for each column. */ @@ -204876,8 +207870,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** Beginning with version 3.45.0 (circa 2024-01-01), these routines also ** accept BLOB values that have JSON encoded using a binary representation ** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk -** format SQLite JSONB is completely different and incompatible with -** PostgreSQL JSONB. +** format for SQLite-JSONB is completely different and incompatible with +** PostgreSQL-JSONB. ** ** Decoding and interpreting JSONB is still O(N) where N is the size of ** the input, the same as text JSON. However, the constant of proportionality @@ -204934,7 +207928,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ** The payload size need not be expressed in its minimal form. For example, ** if the payload size is 10, the size can be expressed in any of 5 different -** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by one 0x0a byte, ** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by ** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and ** a single byte of 0x0a. The shorter forms are preferred, of course, but @@ -204944,7 +207938,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** the size when it becomes known, resulting in a non-minimal encoding. ** ** The value (X>>4)==15 is not actually used in the current implementation -** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** (as SQLite is currently unable to handle BLOBs larger than about 2GB) ** but is included in the design to allow for future enhancements. ** ** The payload follows the header. NULL, TRUE, and FALSE have no payload and @@ -205004,23 +207998,47 @@ static const char * const jsonbType[] = { ** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif + }; #define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) @@ -205028,7 +208046,13 @@ static const char jsonIsSpace[] = { ** The set of all space characters recognized by jsonIsspace(). ** Useful as the second argument to strspn(). */ +#ifdef SQLITE_ASCII static const char jsonSpaces[] = "\011\012\015\040"; +#endif +#ifdef SQLITE_EBCDIC +static const char jsonSpaces[] = "\005\045\015\100"; +#endif + /* ** Characters that are special to JSON. Control characters, @@ -205037,23 +208061,46 @@ static const char jsonSpaces[] = "\011\012\015\040"; ** it in the set of special characters. */ static const char jsonIsOk[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, /* 2 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif }; /* Objects */ @@ -205198,7 +208245,7 @@ struct JsonParse { ** Forward references **************************************************************************/ static void jsonReturnStringAsBlob(JsonString*); -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static int jsonArgIsJsonb(sqlite3_value *pJson, JsonParse *p); static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); static void jsonReturnParse(sqlite3_context*,JsonParse*); static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); @@ -205272,7 +208319,7 @@ static int jsonCacheInsert( ** most-recently used entry if it isn't so already. ** ** The JsonParse object returned still belongs to the Cache and might -** be deleted at any moment. If the caller whants the JsonParse to +** be deleted at any moment. If the caller wants the JsonParse to ** linger, it needs to increment the nPJRef reference counter. */ static JsonParse *jsonCacheSearch( @@ -205616,11 +208663,9 @@ static void jsonAppendSqlValue( break; } default: { - if( jsonFuncArgMightBeBinary(pValue) ){ - JsonParse px; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(pValue); - px.nBlob = sqlite3_value_bytes(pValue); + JsonParse px; + memset(&px, 0, sizeof(px)); + if( jsonArgIsJsonb(pValue, &px) ){ jsonTranslateBlobToText(&px, 0, p); }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); @@ -205939,7 +208984,7 @@ static void jsonWrongNumArgs( */ static int jsonBlobExpand(JsonParse *pParse, u32 N){ u8 *aNew; - u32 t; + u64 t; assert( N>pParse->nBlobAlloc ); if( pParse->nBlobAlloc==0 ){ t = 100; @@ -205949,8 +208994,9 @@ static int jsonBlobExpand(JsonParse *pParse, u32 N){ if( tdb, pParse->aBlob, t); if( aNew==0 ){ pParse->oom = 1; return 1; } + assert( t<0x7fffffff ); pParse->aBlob = aNew; - pParse->nBlobAlloc = t; + pParse->nBlobAlloc = (u32)t; return 0; } @@ -206017,7 +209063,7 @@ static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( } -/* Append an node type byte together with the payload size and +/* Append a node type byte together with the payload size and ** possibly also the payload. ** ** If aPayload is not NULL, then it is a pointer to the payload which @@ -206086,8 +209132,10 @@ static int jsonBlobChangePayloadSize( nExtra = 1; }else if( szType==13 ){ nExtra = 2; - }else{ + }else if( szType==14 ){ nExtra = 4; + }else{ + nExtra = 8; } if( szPayload<=11 ){ nNeeded = 0; @@ -206557,7 +209605,12 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ || c=='n' || c=='r' || c=='t' || (c=='u' && jsonIs4Hex(&z[j+1])) ){ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; - }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + }else if( c=='\'' || c=='v' || c=='\n' +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + || (c=='0') /* Legacy bug compatible */ +#else + || (c=='0' && !sqlite3Isdigit(z[j+1])) /* Correct implementation */ +#endif || (0xe2==(u8)c && 0x80==(u8)z[j+1] && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) || (c=='x' && jsonIs2Hex(&z[j+1])) ){ @@ -206907,10 +209960,7 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ u8 x; u32 sz; u32 n; - if( NEVER(i>pParse->nBlob) ){ - *pSz = 0; - return 0; - } + assert( i<=pParse->nBlob ); x = pParse->aBlob[i]>>4; if( x<=11 ){ sz = x; @@ -206947,15 +209997,15 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ *pSz = 0; return 0; } - sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + sz = ((u32)pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; n = 9; } if( (i64)i+sz+n > pParse->nBlob && (i64)i+sz+n > pParse->nBlob-pParse->delta ){ - sz = 0; - n = 0; + *pSz = 0; + return 0; } *pSz = sz; return n; @@ -207052,9 +210102,12 @@ static u32 jsonTranslateBlobToText( } case JSONB_TEXT: case JSONB_TEXTJ: { - jsonAppendChar(pOut, '"'); - jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); - jsonAppendChar(pOut, '"'); + if( pOut->nUsed+sz+2<=pOut->nAlloc || jsonStringGrow(pOut, sz+2)==0 ){ + pOut->zBuf[pOut->nUsed] = '"'; + memcpy(pOut->zBuf+pOut->nUsed+1,(const char*)&pParse->aBlob[i+n],sz); + pOut->zBuf[pOut->nUsed+sz+1] = '"'; + pOut->nUsed += sz+2; + } break; } case JSONB_TEXT5: { @@ -207293,33 +210346,6 @@ static u32 jsonTranslateBlobToPrettyText( return i; } - -/* Return true if the input pJson -** -** For performance reasons, this routine does not do a detailed check of the -** input BLOB to ensure that it is well-formed. Hence, false positives are -** possible. False negatives should never occur, however. -*/ -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ - u32 sz, n; - const u8 *aBlob; - int nBlob; - JsonParse s; - if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; - aBlob = sqlite3_value_blob(pJson); - nBlob = sqlite3_value_bytes(pJson); - if( nBlob<1 ) return 0; - if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; - memset(&s, 0, sizeof(s)); - s.aBlob = (u8*)aBlob; - s.nBlob = nBlob; - n = jsonbPayloadSize(&s, 0, &sz); - if( n==0 ) return 0; - if( sz+n!=(u32)nBlob ) return 0; - if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; - return sz+n==(u32)nBlob; -} - /* ** Given that a JSONB_ARRAY object starts at offset i, return ** the number of entries in that array. @@ -207352,6 +210378,82 @@ static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); } +/* +** If the JSONB at aIns[0..nIns-1] can be expanded (by denormalizing the +** size field) by d bytes, then write the expansion into aOut[] and +** return true. In this way, an overwrite happens without changing the +** size of the JSONB, which reduces memcpy() operations and also make it +** faster and easier to update the B-Tree entry that contains the JSONB +** in the database. +** +** If the expansion of aIns[] by d bytes cannot be (easily) accomplished +** then return false. +** +** The d parameter is guaranteed to be between 1 and 8. +** +** This routine is an optimization. A correct answer is obtained if it +** always leaves the output unchanged and returns false. +*/ +static int jsonBlobOverwrite( + u8 *aOut, /* Overwrite here */ + const u8 *aIns, /* New content */ + u32 nIns, /* Bytes of new content */ + u32 d /* Need to expand new content by this much */ +){ + u32 szPayload; /* Bytes of payload */ + u32 i; /* New header size, after expansion & a loop counter */ + u8 szHdr; /* Size of header before expansion */ + + /* Lookup table for finding the upper 4 bits of the first byte of the + ** expanded aIns[], based on the size of the expanded aIns[] header: + ** + ** 2 3 4 5 6 7 8 9 */ + static const u8 aType[] = { 0xc0, 0xd0, 0, 0xe0, 0, 0, 0, 0xf0 }; + + if( (aIns[0]&0x0f)<=2 ) return 0; /* Cannot enlarge NULL, true, false */ + switch( aIns[0]>>4 ){ + default: { /* aIns[] header size 1 */ + if( ((1<=2 && i<=9 && aType[i-2]!=0 ); + aOut[0] = (aIns[0] & 0x0f) | aType[i-2]; + memcpy(&aOut[i], &aIns[szHdr], nIns-szHdr); + szPayload = nIns - szHdr; + while( 1/*edit-by-break*/ ){ + i--; + aOut[i] = szPayload & 0xff; + if( i==1 ) break; + szPayload >>= 8; + } + assert( (szPayload>>8)==0 ); + return 1; +} + /* ** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of ** content beginning at iDel, and replacing them with nIns bytes of @@ -207373,6 +210475,11 @@ static void jsonBlobEdit( u32 nIns /* Bytes of content to insert */ ){ i64 d = (i64)nIns - (i64)nDel; + if( d<0 && d>=(-8) && aIns!=0 + && jsonBlobOverwrite(&pParse->aBlob[iDel], aIns, nIns, (int)-d) + ){ + return; + } if( d!=0 ){ if( pParse->nBlob + d > pParse->nBlobAlloc ){ jsonBlobExpand(pParse, pParse->nBlob+d); @@ -207384,7 +210491,9 @@ static void jsonBlobEdit( pParse->nBlob += d; pParse->delta += d; } - if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); + if( nIns && aIns ){ + memcpy(&pParse->aBlob[iDel], aIns, nIns); + } } /* @@ -207469,7 +210578,21 @@ static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ case 'r': { *piOut = '\r'; return 2; } case 't': { *piOut = '\t'; return 2; } case 'v': { *piOut = '\v'; return 2; } - case '0': { *piOut = 0; return 2; } + case '0': { + /* JSON5 requires that the \0 escape not be followed by a digit. + ** But SQLite did not enforce this restriction in versions 3.42.0 + ** through 3.49.2. That was a bug. But some applications might have + ** come to depend on that bug. Use the SQLITE_BUG_COMPATIBLE_20250510 + ** option to restore the old buggy behavior. */ +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + /* Legacy bug-compatible behavior */ + *piOut = 0; +#else + /* Correct behavior */ + *piOut = (n>2 && sqlite3Isdigit(z[2])) ? JSON_INVALID_CHAR : 0; +#endif + return 2; + } case '\'': case '"': case '/': @@ -207700,7 +210823,9 @@ static u32 jsonLookupStep( zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; - for(i=1; zPath[i] && zPath[i]!='"'; i++){} + for(i=1; zPath[i] && zPath[i]!='"'; i++){ + if( zPath[i]=='\\' && zPath[i+1]!=0 ) i++; + } nKey = i-1; if( zPath[i] ){ i++; @@ -207967,7 +211092,7 @@ static void jsonReturnFromBlob( char *zOut; u32 nOut = sz; z = (const char*)&pParse->aBlob[i+n]; - zOut = sqlite3DbMallocRaw(db, nOut+1); + zOut = sqlite3DbMallocRaw(db, ((u64)nOut)+1); if( zOut==0 ) goto returnfromblob_oom; for(iIn=iOut=0; iInaBlob = (u8*)sqlite3_value_blob(pArg); - pParse->nBlob = sqlite3_value_bytes(pArg); - }else{ + if( !jsonArgIsJsonb(pArg, pParse) ){ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); return 1; } @@ -208145,7 +211267,7 @@ static char *jsonBadPathError( } /* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent -** arguments come in parse where each pair contains a JSON path and +** arguments come in pairs where each pair contains a JSON path and ** content to insert or set at that patch. Do the updates ** and return the result. ** @@ -208216,27 +211338,46 @@ static void jsonInsertIntoBlob( /* ** If pArg is a blob that seems like a JSONB blob, then initialize ** p to point to that JSONB and return TRUE. If pArg does not seem like -** a JSONB blob, then return FALSE; -** -** This routine is only called if it is already known that pArg is a -** blob. The only open question is whether or not the blob appears -** to be a JSONB blob. +** a JSONB blob, then return FALSE. +** +** For small BLOBs (having no more than 7 bytes of payload) a full +** validity check is done. So for small BLOBs this routine only returns +** true if the value is guaranteed to be a valid JSONB. For larger BLOBs +** (8 byte or more of payload) only the size of the outermost element is +** checked to verify that the BLOB is superficially valid JSONB. +** +** A full JSONB validation is done on smaller BLOBs because those BLOBs might +** also be text JSON that has been incorrectly cast into a BLOB. +** (See tag-20240123-a and https://sqlite.org/forum/forumpost/012136abd5) +** If the BLOB is 9 bytes are larger, then it is not possible for the +** superficial size check done here to pass if the input is really text +** JSON so we do not need to look deeper in that case. +** +** Why we only need to do full JSONB validation for smaller BLOBs: +** +** The first byte of valid JSON text must be one of: '{', '[', '"', ' ', '\n', +** '\r', '\t', '-', or a digit '0' through '9'. Of these, only a subset +** can also be the first byte of JSONB: '{', '[', and digits '3' +** through '9'. In every one of those cases, the payload size is 7 bytes +** or less. So if we do full JSONB validation for every BLOB where the +** payload is less than 7 bytes, we will never get a false positive for +** JSONB on an input that is really text JSON. */ static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ u32 n, sz = 0; + u8 c; + if( sqlite3_value_type(pArg)!=SQLITE_BLOB ) return 0; p->aBlob = (u8*)sqlite3_value_blob(pArg); p->nBlob = (u32)sqlite3_value_bytes(pArg); - if( p->nBlob==0 ){ - p->aBlob = 0; - return 0; - } - if( NEVER(p->aBlob==0) ){ - return 0; - } - if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + if( p->nBlob>0 + && ALWAYS(p->aBlob!=0) + && ((c = p->aBlob[0]) & 0x0f)<=JSONB_OBJECT && (n = jsonbPayloadSize(p, 0, &sz))>0 && sz+n==p->nBlob - && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + && ((c & 0x0f)>JSONB_FALSE || sz==0) + && (sz>7 + || (c!=0x7b && c!=0x5b && !sqlite3Isdigit(c)) + || jsonbValidityCheck(p, 0, p->nBlob, 1)==0) ){ return 1; } @@ -208314,7 +211455,7 @@ static JsonParse *jsonParseFuncArg( ** JSON functions were suppose to work. From the beginning, blob was ** reserved for expansion and a blob value should have raised an error. ** But it did not, due to a bug. And many applications came to depend - ** upon this buggy behavior, espeically when using the CLI and reading + ** upon this buggy behavior, especially when using the CLI and reading ** JSON text using readfile(), which returns a blob. For this reason ** we will continue to support the bug moving forward. ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d @@ -208710,10 +211851,16 @@ static void jsonExtractFunc( ** NUMBER ==> $[NUMBER] // PG compatible ** LABEL ==> $.LABEL // PG compatible ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + ** + ** Updated 2024-05-27: If the NUMBER is negative, then PG counts from + ** the right of the array. Hence for negative NUMBER: + ** + ** NUMBER ==> $[#NUMBER] // PG compatible */ jsonStringInit(&jx, ctx); if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); + if( zPath[0]=='-' ) jsonAppendRawNZ(&jx,"#",1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); }else if( jsonAllAlphanum(zPath, nPath) ){ @@ -209323,21 +212470,17 @@ static void jsonValidFunc( return; } case SQLITE_BLOB: { - if( jsonFuncArgMightBeBinary(argv[0]) ){ + JsonParse py; + memset(&py, 0, sizeof(py)); + if( jsonArgIsJsonb(argv[0], &py) ){ if( flags & 0x04 ){ /* Superficial checking only - accomplished by the - ** jsonFuncArgMightBeBinary() call above. */ + ** jsonArgIsJsonb() call above. */ res = 1; }else if( flags & 0x08 ){ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If ** no errors occur, call that a "strict check". */ - JsonParse px; - u32 iErr; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(argv[0]); - px.nBlob = sqlite3_value_bytes(argv[0]); - iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); - res = iErr==0; + res = 0==jsonbValidityCheck(&py, 0, py.nBlob, 1); } break; } @@ -209395,9 +212538,7 @@ static void jsonErrorFunc( UNUSED_PARAMETER(argc); memset(&s, 0, sizeof(s)); s.db = sqlite3_context_db_handle(ctx); - if( jsonFuncArgMightBeBinary(argv[0]) ){ - s.aBlob = (u8*)sqlite3_value_blob(argv[0]); - s.nBlob = sqlite3_value_bytes(argv[0]); + if( jsonArgIsJsonb(argv[0], &s) ){ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); }else{ s.zJson = (char*)sqlite3_value_text(argv[0]); @@ -209558,18 +212699,20 @@ static void jsonObjectStep( UNUSED_PARAMETER(argc); pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ + z = (const char*)sqlite3_value_text(argv[0]); + n = sqlite3Strlen30(z); if( pStr->zBuf==0 ){ jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); - }else if( pStr->nUsed>1 ){ + }else if( pStr->nUsed>1 && z!=0 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - z = (const char*)sqlite3_value_text(argv[0]); - n = sqlite3Strlen30(z); - jsonAppendString(pStr, z, n); - jsonAppendChar(pStr, ':'); - jsonAppendSqlValue(pStr, argv[1]); + if( z!=0 ){ + jsonAppendString(pStr, z, n); + jsonAppendChar(pStr, ':'); + jsonAppendSqlValue(pStr, argv[1]); + } } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ @@ -210082,9 +213225,8 @@ static int jsonEachFilter( memset(&p->sParse, 0, sizeof(p->sParse)); p->sParse.nJPRef = 1; p->sParse.db = p->db; - if( jsonFuncArgMightBeBinary(argv[0]) ){ - p->sParse.nBlob = sqlite3_value_bytes(argv[0]); - p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + if( jsonArgIsJsonb(argv[0], &p->sParse) ){ + /* We have JSONB */ }else{ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); p->sParse.nJson = sqlite3_value_bytes(argv[0]); @@ -210378,6 +213520,8 @@ SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3 *db){ #endif SQLITE_PRIVATE int sqlite3GetToken(const unsigned char*,int*); /* In the SQLite core */ +/* #include */ + /* ** If building separately, we will need some setup that is normally ** found in sqliteInt.h @@ -210408,6 +213552,14 @@ typedef unsigned int u32; # define ALWAYS(X) (X) # define NEVER(X) (X) #endif +#ifndef offsetof +#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif #endif /* !defined(SQLITE_AMALGAMATION) */ /* Macro to check for 4-byte alignment. Only used inside of assert() */ @@ -210728,9 +213880,13 @@ struct RtreeMatchArg { RtreeGeomCallback cb; /* Info about the callback functions */ int nParam; /* Number of parameters to the SQL function */ sqlite3_value **apSqlParam; /* Original SQL parameter values */ - RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ + RtreeDValue aParam[FLEXARRAY]; /* Values for parameters to the SQL function */ }; +/* Size of an RtreeMatchArg object with N parameters */ +#define SZ_RTREEMATCHARG(N) \ + (offsetof(RtreeMatchArg,aParam)+(N)*sizeof(RtreeDValue)) + #ifndef MAX # define MAX(x,y) ((x) < (y) ? (y) : (x)) #endif @@ -212419,7 +215575,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ } /* -** Return the N-dimensional volumn of the cell stored in *p. +** Return the N-dimensional volume of the cell stored in *p. */ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ RtreeDValue area = (RtreeDValue)1; @@ -214089,8 +217245,8 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ sqlite3_str_append(pOut, "}", 1); } errCode = sqlite3_str_errcode(pOut); - sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); sqlite3_result_error_code(ctx, errCode); + sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); } /* This routine implements an SQL function that returns the "depth" parameter @@ -214185,7 +217341,7 @@ static sqlite3_stmt *rtreeCheckPrepare( /* ** The second and subsequent arguments to this function are a printf() ** style format string and arguments. This function formats the string and -** appends it to the report being accumuated in pCheck. +** appends it to the report being accumulated in pCheck. */ static void rtreeCheckAppendMsg(RtreeCheck *pCheck, const char *zFmt, ...){ va_list ap; @@ -215373,7 +218529,7 @@ static void geopolyBBoxFinal( ** Determine if point (x0,y0) is beneath line segment (x1,y1)->(x2,y2). ** Returns: ** -** +2 x0,y0 is on the line segement +** +2 x0,y0 is on the line segment ** ** +1 x0,y0 is beneath line segment ** @@ -215479,7 +218635,7 @@ static void geopolyWithinFunc( sqlite3_free(p2); } -/* Objects used by the overlap algorihm. */ +/* Objects used by the overlap algorithm. */ typedef struct GeoEvent GeoEvent; typedef struct GeoSegment GeoSegment; typedef struct GeoOverlap GeoOverlap; @@ -216526,8 +219682,7 @@ static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ sqlite3_int64 nBlob; int memErr = 0; - nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue) - + nArg*sizeof(sqlite3_value*); + nBlob = SZ_RTREEMATCHARG(nArg) + nArg*sizeof(sqlite3_value*); pBlob = (RtreeMatchArg *)sqlite3_malloc64(nBlob); if( !pBlob ){ sqlite3_result_error_nomem(ctx); @@ -216606,7 +219761,7 @@ SQLITE_API int sqlite3_rtree_query_callback( ); } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -217197,7 +220352,7 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ return rc; } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -217622,7 +220777,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( ** ** "RBU" stands for "Resumable Bulk Update". As in a large database update ** transmitted via a wireless network to a mobile device. A transaction -** applied using this extension is hence refered to as an "RBU update". +** applied using this extension is hence referred to as an "RBU update". ** ** ** LIMITATIONS @@ -217919,7 +221074,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_open( ** the next call to sqlite3rbu_vacuum() opens a handle that starts a ** new RBU vacuum operation. ** -** As with sqlite3rbu_open(), Zipvfs users should rever to the comment +** As with sqlite3rbu_open(), Zipvfs users should refer to the comment ** describing the sqlite3rbu_create_vfs() API function below for ** a description of the complications associated with using RBU with ** zipvfs databases. @@ -218015,7 +221170,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu); ** ** If the RBU update has been completely applied, mark the RBU database ** as fully applied. Otherwise, assuming no error has occurred, save the -** current state of the RBU update appliation to the RBU database. +** current state of the RBU update application to the RBU database. ** ** If an error has already occurred as part of an sqlite3rbu_step() ** or sqlite3rbu_open() call, or if one occurs within this function, an @@ -218455,6 +221610,27 @@ struct RbuFrame { u32 iWalFrame; }; +#ifndef UNUSED_PARAMETER +/* +** The following macros are used to suppress compiler warnings and to +** make it clear to human readers when a function parameter is deliberately +** left unused within the body of a function. This usually happens when +** a function is called via a function pointer. For example the +** implementation of an SQL aggregate step callback may not use the +** parameter indicating the number of arguments passed to the aggregate, +** if it knows that this is enforced elsewhere. +** +** When a function parameter is not used at all within the body of a function, +** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. +** However, these macros may also be used to suppress warnings related to +** parameters that may or may not be used depending on compilation options. +** For example those parameters only used in assert() statements. In these +** cases the parameters are named as per the usual conventions. +*/ +#define UNUSED_PARAMETER(x) (void)(x) +#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) +#endif + /* ** RBU handle. ** @@ -218506,7 +221682,7 @@ struct sqlite3rbu { int rc; /* Value returned by last rbu_step() call */ char *zErrmsg; /* Error message if rc!=SQLITE_OK */ int nStep; /* Rows processed for current object */ - int nProgress; /* Rows processed for all objects */ + sqlite3_int64 nProgress; /* Rows processed for all objects */ RbuObjIter objiter; /* Iterator for skipping through tbl/idx */ const char *zVfsName; /* Name of automatically created rbu vfs */ rbu_file *pTargetFd; /* File handle open on target db */ @@ -218623,7 +221799,7 @@ static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){ v = (v<<6) + c; } z--; - *pLen -= z - zStart; + *pLen -= (int)(z - zStart); *pz = (char*)z; return v; } @@ -218808,6 +221984,7 @@ static void rbuFossilDeltaFunc( char *aOut; assert( argc==2 ); + UNUSED_PARAMETER(argc); nOrig = sqlite3_value_bytes(argv[0]); aOrig = (const char*)sqlite3_value_blob(argv[0]); @@ -220387,13 +223564,13 @@ static char *rbuObjIterGetIndexWhere(sqlite3rbu *p, RbuObjIter *pIter){ else if( c==')' ){ nParen--; if( nParen==0 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; i++; break; } }else if( c==',' && nParen==1 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; pIter->aIdxCol[iIdxCol].zSpan = &zSql[i+1]; }else if( c=='"' || c=='\'' || c=='`' ){ @@ -221083,6 +224260,8 @@ static void rbuFileSuffix3(const char *zBase, char *z){ for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4); } +#else + UNUSED_PARAMETER2(zBase,z); #endif } @@ -221667,7 +224846,7 @@ static void rbuSaveState(sqlite3rbu *p, int eStage){ "(%d, %Q), " "(%d, %Q), " "(%d, %d), " - "(%d, %d), " + "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " @@ -222025,6 +225204,7 @@ static void rbuIndexCntFunc( sqlite3 *db = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain); assert( nVal==1 ); + UNUSED_PARAMETER(nVal); rc = prepareFreeAndCollectError(db, &pStmt, &zErrmsg, sqlite3_mprintf("SELECT count(*) FROM sqlite_schema " @@ -222300,7 +225480,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_vacuum( ){ if( zTarget==0 ){ return rbuMisuseError(); } if( zState ){ - int n = strlen(zState); + size_t n = strlen(zState); if( n>=7 && 0==memcmp("-vactmp", &zState[n-7], 7) ){ return rbuMisuseError(); } @@ -222517,6 +225697,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){ */ static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){ int rc = SQLITE_OK; + UNUSED_PARAMETER(pArg); #if defined(_WIN32_WCE) { LPWSTR zWideOld; @@ -222915,7 +226096,7 @@ static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ /* If this is an RBU vacuum operation and this is the target database, ** pretend that it has at least one page. Otherwise, SQLite will not - ** check for the existance of a *-wal file. rbuVfsRead() contains + ** check for the existence of a *-wal file. rbuVfsRead() contains ** similar logic. */ if( rc==SQLITE_OK && *pSize==0 && p->pRbu && rbuIsVacuum(p->pRbu) @@ -223421,6 +226602,9 @@ static int rbuVfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ ** No-op. */ static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){ + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(a); + UNUSED_PARAMETER(b); return 0; } @@ -223819,6 +227003,7 @@ static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ pIdxInfo->orderByConsumed = 1; pIdxInfo->idxNum |= 0x08; } + pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_HEX; return SQLITE_OK; } @@ -224476,7 +227661,13 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; } ** ** The data field of sqlite_dbpage table can be updated. The new ** value must be a BLOB which is the correct page size, otherwise the -** update fails. Rows may not be deleted or inserted. +** update fails. INSERT operations also work, and operate as if they +** where REPLACE. The size of the database can be extended by INSERT-ing +** new pages on the end. +** +** Rows may not be deleted. However, doing an INSERT to page number N +** with NULL page data causes the N-th page and all subsequent pages to be +** deleted and the database to be truncated. */ /* #include "sqliteInt.h" ** Requires access to internal data structures ** */ @@ -224499,6 +227690,8 @@ struct DbpageCursor { struct DbpageTable { sqlite3_vtab base; /* Base class. Must be first */ sqlite3 *db; /* The database */ + int iDbTrunc; /* Database to truncate */ + Pgno pgnoTrunc; /* Size to truncate to */ }; /* Columns */ @@ -224507,7 +227700,6 @@ struct DbpageTable { #define DBPAGE_COLUMN_SCHEMA 2 - /* ** Connect to or create a dbpagevfs virtual table. */ @@ -224758,6 +227950,24 @@ static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ return SQLITE_OK; } +/* +** Open write transactions. Since we do not know in advance which database +** files will be written by the sqlite_dbpage virtual table, start a write +** transaction on them all. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int dbpageBeginTrans(DbpageTable *pTab){ + sqlite3 *db = pTab->db; + int rc = SQLITE_OK; + int i; + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0); + } + return rc; +} + static int dbpageUpdate( sqlite3_vtab *pVtab, int argc, @@ -224769,11 +227979,11 @@ static int dbpageUpdate( DbPage *pDbPage = 0; int rc = SQLITE_OK; char *zErr = 0; - const char *zSchema; int iDb; Btree *pBt; Pager *pPager; int szPage; + int isInsert; (void)pRowid; if( pTab->db->flags & SQLITE_Defensive ){ @@ -224784,21 +227994,29 @@ static int dbpageUpdate( zErr = "cannot delete"; goto update_fail; } - pgno = sqlite3_value_int(argv[0]); - if( sqlite3_value_type(argv[0])==SQLITE_NULL - || (Pgno)sqlite3_value_int(argv[1])!=pgno - ){ - zErr = "cannot insert"; - goto update_fail; + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + pgno = (Pgno)sqlite3_value_int(argv[2]); + isInsert = 1; + }else{ + pgno = sqlite3_value_int(argv[0]); + if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ + zErr = "cannot insert"; + goto update_fail; + } + isInsert = 0; } - zSchema = (const char*)sqlite3_value_text(argv[4]); - iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1; - if( NEVER(iDb<0) ){ - zErr = "no such schema"; - goto update_fail; + if( sqlite3_value_type(argv[4])==SQLITE_NULL ){ + iDb = 0; + }else{ + const char *zSchema = (const char*)sqlite3_value_text(argv[4]); + iDb = sqlite3FindDbName(pTab->db, zSchema); + if( iDb<0 ){ + zErr = "no such schema"; + goto update_fail; + } } pBt = pTab->db->aDb[iDb].pBt; - if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){ + if( pgno<1 || NEVER(pBt==0) ){ zErr = "bad page number"; goto update_fail; } @@ -224806,51 +228024,83 @@ static int dbpageUpdate( if( sqlite3_value_type(argv[3])!=SQLITE_BLOB || sqlite3_value_bytes(argv[3])!=szPage ){ - zErr = "bad page value"; + if( sqlite3_value_type(argv[3])==SQLITE_NULL && isInsert && pgno>1 ){ + /* "INSERT INTO dbpage($PGNO,NULL)" causes page number $PGNO and + ** all subsequent pages to be deleted. */ + pTab->iDbTrunc = iDb; + pTab->pgnoTrunc = pgno-1; + pgno = 1; + }else{ + zErr = "bad page value"; + goto update_fail; + } + } + + if( dbpageBeginTrans(pTab)!=SQLITE_OK ){ + zErr = "failed to open transaction"; goto update_fail; } + pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ const void *pData = sqlite3_value_blob(argv[3]); - assert( pData!=0 || pTab->db->mallocFailed ); - if( pData - && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK - ){ - memcpy(sqlite3PagerGetData(pDbPage), pData, szPage); + if( (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK && pData ){ + unsigned char *aPage = sqlite3PagerGetData(pDbPage); + memcpy(aPage, pData, szPage); + pTab->pgnoTrunc = 0; } + }else{ + pTab->pgnoTrunc = 0; } sqlite3PagerUnref(pDbPage); return rc; update_fail: + pTab->pgnoTrunc = 0; sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = sqlite3_mprintf("%s", zErr); return SQLITE_ERROR; } -/* Since we do not know in advance which database files will be -** written by the sqlite_dbpage virtual table, start a write transaction -** on them all. -*/ static int dbpageBegin(sqlite3_vtab *pVtab){ DbpageTable *pTab = (DbpageTable *)pVtab; - sqlite3 *db = pTab->db; - int i; - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0); + pTab->pgnoTrunc = 0; + return SQLITE_OK; +} + +/* Invoke sqlite3PagerTruncate() as necessary, just prior to COMMIT +*/ +static int dbpageSync(sqlite3_vtab *pVtab){ + DbpageTable *pTab = (DbpageTable *)pVtab; + if( pTab->pgnoTrunc>0 ){ + Btree *pBt = pTab->db->aDb[pTab->iDbTrunc].pBt; + Pager *pPager = sqlite3BtreePager(pBt); + sqlite3BtreeEnter(pBt); + if( pTab->pgnoTruncpgnoTrunc); + } + sqlite3BtreeLeave(pBt); } + pTab->pgnoTrunc = 0; return SQLITE_OK; } +/* Cancel any pending truncate. +*/ +static int dbpageRollbackTo(sqlite3_vtab *pVtab, int notUsed1){ + DbpageTable *pTab = (DbpageTable *)pVtab; + pTab->pgnoTrunc = 0; + (void)notUsed1; + return SQLITE_OK; +} /* ** Invoke this routine to register the "dbpage" virtual table module */ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ static sqlite3_module dbpage_module = { - 0, /* iVersion */ + 2, /* iVersion */ dbpageConnect, /* xCreate */ dbpageConnect, /* xConnect */ dbpageBestIndex, /* xBestIndex */ @@ -224865,14 +228115,14 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ dbpageRowid, /* xRowid - read data */ dbpageUpdate, /* xUpdate */ dbpageBegin, /* xBegin */ - 0, /* xSync */ + dbpageSync, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindMethod */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ - 0, /* xRollbackTo */ + dbpageRollbackTo, /* xRollbackTo */ 0, /* xShadowName */ 0 /* xIntegrity */ }; @@ -224960,6 +228210,10 @@ struct SessionBuffer { ** input data. Input data may be supplied either as a single large buffer ** (e.g. sqlite3changeset_start()) or using a stream function (e.g. ** sqlite3changeset_start_strm()). +** +** bNoDiscard: +** If true, then the only time data is discarded is as a result of explicit +** sessionDiscardData() calls. Not within every sessionInputBuffer() call. */ struct SessionInput { int bNoDiscard; /* If true, do not discard in InputBuffer() */ @@ -225021,11 +228275,13 @@ struct sqlite3_changeset_iter { struct SessionTable { SessionTable *pNext; char *zName; /* Local name of table */ - int nCol; /* Number of columns in table zName */ + int nCol; /* Number of non-hidden columns */ + int nTotalCol; /* Number of columns including hidden */ int bStat1; /* True if this is sqlite_stat1 */ int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ const char **azDflt; /* Default value expressions */ + int *aiIdx; /* Index to pass to xNew/xOld */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ int nChange; /* Size of apChange[] array */ @@ -225428,22 +228684,22 @@ static int sessionPreupdateHash( unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ + assert( pTab->nTotalCol==pSession->hook.xCount(pSession->hook.pCtx) ); if( pTab->bRowid ){ - assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); h = sessionHashAppendI64(h, iRowid); }else{ assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); for(i=0; inCol; i++){ if( pTab->abPK[i] ){ int rc; int eType; sqlite3_value *pVal; + int iIdx = pTab->aiIdx[i]; if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } if( rc!=SQLITE_OK ) return rc; @@ -225780,6 +229036,7 @@ static int sessionPreupdateEqual( sqlite3_value *pVal; /* Value returned by preupdate_new/old */ int rc; /* Error code from preupdate_new/old */ int eType = *a++; /* Type of value from change record */ + int iIdx = pTab->aiIdx[iCol]; /* The following calls to preupdate_new() and preupdate_old() can not ** fail. This is because they cache their return values, and by the @@ -225788,10 +229045,10 @@ static int sessionPreupdateEqual( ** this (that the method has already been called). */ if( op==SQLITE_INSERT ){ /* assert( db->pPreUpdate->pNewUnpacked || db->pPreUpdate->aNew ); */ - rc = pSession->hook.xNew(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ /* assert( db->pPreUpdate->pUnpacked ); */ - rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } assert( rc==SQLITE_OK ); (void)rc; /* Suppress warning about unused variable */ @@ -225916,9 +229173,11 @@ static int sessionTableInfo( const char *zDb, /* Name of attached database (e.g. "main") */ const char *zThis, /* Table name */ int *pnCol, /* OUT: number of columns */ + int *pnTotalCol, /* OUT: number of hidden columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ const char ***pazDflt, /* OUT: Array of default value expressions */ + int **paiIdx, /* OUT: Array of xNew/xOld indexes */ u8 **pabPK, /* OUT: Array of booleans - true for PK col */ int *pbRowid /* OUT: True if only PK is a rowid */ ){ @@ -225933,6 +229192,7 @@ static int sessionTableInfo( char **azCol = 0; char **azDflt = 0; u8 *abPK = 0; + int *aiIdx = 0; int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); @@ -225940,6 +229200,8 @@ static int sessionTableInfo( *pazCol = 0; *pabPK = 0; *pnCol = 0; + if( pnTotalCol ) *pnTotalCol = 0; + if( paiIdx ) *paiIdx = 0; if( pzTab ) *pzTab = 0; if( pazDflt ) *pazDflt = 0; @@ -225949,9 +229211,9 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ /* For sqlite_stat1, pretend that (tbl,idx) is the PRIMARY KEY. */ zPragma = sqlite3_mprintf( - "SELECT 0, 'tbl', '', 0, '', 1 UNION ALL " - "SELECT 1, 'idx', '', 0, '', 2 UNION ALL " - "SELECT 2, 'stat', '', 0, '', 0" + "SELECT 0, 'tbl', '', 0, '', 1, 0 UNION ALL " + "SELECT 1, 'idx', '', 0, '', 2, 0 UNION ALL " + "SELECT 2, 'stat', '', 0, '', 0, 0" ); }else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); @@ -225959,7 +229221,7 @@ static int sessionTableInfo( return rc; } }else{ - zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); + zPragma = sqlite3_mprintf("PRAGMA '%q'.table_xinfo('%q')", zDb, zThis); } if( !zPragma ){ return SQLITE_NOMEM; @@ -225976,7 +229238,9 @@ static int sessionTableInfo( while( SQLITE_ROW==sqlite3_step(pStmt) ){ nByte += sqlite3_column_bytes(pStmt, 1); /* name */ nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */ - nDbCol++; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + nDbCol++; + } if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */ } if( nDbCol==0 ) bRowid = 0; @@ -225985,7 +229249,7 @@ static int sessionTableInfo( rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ - nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1); + nByte += nDbCol * (sizeof(const char *)*2 +sizeof(int)+sizeof(u8) + 1 + 1); pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; @@ -225996,8 +229260,8 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ azCol = (char **)pAlloc; azDflt = (char**)&azCol[nDbCol]; - pAlloc = (u8 *)&azDflt[nDbCol]; - abPK = (u8 *)pAlloc; + aiIdx = (int*)&azDflt[nDbCol]; + abPK = (u8 *)&aiIdx[nDbCol]; pAlloc = &abPK[nDbCol]; if( pzTab ){ memcpy(pAlloc, zThis, nThis+1); @@ -226012,27 +229276,32 @@ static int sessionTableInfo( azCol[i] = (char*)pAlloc; pAlloc += nName+1; abPK[i] = 1; + aiIdx[i] = -1; i++; } while( SQLITE_ROW==sqlite3_step(pStmt) ){ - int nName = sqlite3_column_bytes(pStmt, 1); - int nDflt = sqlite3_column_bytes(pStmt, 4); - const unsigned char *zName = sqlite3_column_text(pStmt, 1); - const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); - - if( zName==0 ) break; - memcpy(pAlloc, zName, nName+1); - azCol[i] = (char *)pAlloc; - pAlloc += nName+1; - if( zDflt ){ - memcpy(pAlloc, zDflt, nDflt+1); - azDflt[i] = (char *)pAlloc; - pAlloc += nDflt+1; - }else{ - azDflt[i] = 0; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + int nName = sqlite3_column_bytes(pStmt, 1); + int nDflt = sqlite3_column_bytes(pStmt, 4); + const unsigned char *zName = sqlite3_column_text(pStmt, 1); + const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); + + if( zName==0 ) break; + memcpy(pAlloc, zName, nName+1); + azCol[i] = (char *)pAlloc; + pAlloc += nName+1; + if( zDflt ){ + memcpy(pAlloc, zDflt, nDflt+1); + azDflt[i] = (char *)pAlloc; + pAlloc += nDflt+1; + }else{ + azDflt[i] = 0; + } + abPK[i] = sqlite3_column_int(pStmt, 5); + aiIdx[i] = sqlite3_column_int(pStmt, 0); + i++; } - abPK[i] = sqlite3_column_int(pStmt, 5); - i++; + if( pnTotalCol ) (*pnTotalCol)++; } rc = sqlite3_reset(pStmt); } @@ -226045,6 +229314,7 @@ static int sessionTableInfo( if( pazDflt ) *pazDflt = (const char**)azDflt; *pabPK = abPK; *pnCol = nDbCol; + if( paiIdx ) *paiIdx = aiIdx; }else{ sessionFree(pSession, azCol); } @@ -226056,7 +229326,7 @@ static int sessionTableInfo( /* ** This function is called to initialize the SessionTable.nCol, azCol[] ** abPK[] and azDflt[] members of SessionTable object pTab. If these -** fields are already initilialized, this function is a no-op. +** fields are already initialized, this function is a no-op. ** ** If an error occurs, an error code is stored in sqlite3_session.rc and ** non-zero returned. Or, if no error occurs but the table has no primary @@ -226075,8 +229345,11 @@ static int sessionInitTable( if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); + sqlite3_free(pTab->azCol); + pTab->abPK = 0; rc = sessionTableInfo(pSession, db, zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK, + pTab->zName, &pTab->nCol, &pTab->nTotalCol, 0, &pTab->azCol, + &pTab->azDflt, &pTab->aiIdx, &abPK, ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0) ); if( rc==SQLITE_OK ){ @@ -226111,15 +229384,17 @@ static int sessionInitTable( */ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ int nCol = 0; + int nTotalCol = 0; const char **azCol = 0; const char **azDflt = 0; + int *aiIdx = 0; u8 *abPK = 0; int bRowid = 0; assert( pSession->rc==SQLITE_OK ); pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK, + pTab->zName, &nCol, &nTotalCol, 0, &azCol, &azDflt, &aiIdx, &abPK, (pSession->bImplicitPK ? &bRowid : 0) ); if( pSession->rc==SQLITE_OK ){ @@ -226142,8 +229417,10 @@ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ const char **a = pTab->azCol; pTab->azCol = azCol; pTab->nCol = nCol; + pTab->nTotalCol = nTotalCol; pTab->azDflt = azDflt; pTab->abPK = abPK; + pTab->aiIdx = aiIdx; azCol = a; } if( pSession->bEnableSize ){ @@ -226461,7 +229738,7 @@ static int sessionUpdateMaxSize( int ii; for(ii=0; iinCol; ii++){ sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, pTab->aiIdx[ii], &p); sessionSerializeValue(0, p, &nNew); } } @@ -226481,8 +229758,9 @@ static int sessionUpdateMaxSize( int bChanged = 1; int nOld = 0; int eType; + int iIdx = pTab->aiIdx[ii]; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); if( p==0 ){ return SQLITE_NOMEM; } @@ -226579,11 +229857,11 @@ static void sessionPreupdateOneChange( /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ nExpect = pSession->hook.xCount(pSession->hook.pCtx); - if( (pTab->nCol-pTab->bRowid)nTotalColnCol-pTab->bRowid)!=nExpect ){ + if( pTab->nTotalCol!=nExpect ){ pSession->rc = SQLITE_SCHEMA; return; } @@ -226640,19 +229918,23 @@ static void sessionPreupdateOneChange( /* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ + int iIdx = pTab->aiIdx[i]; sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ - TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); - assert( trc==SQLITE_OK ); + /* This may fail if the column has a non-NULL default and was added + ** using ALTER TABLE ADD COLUMN after this record was created. */ + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx, i, &p); + TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx,iIdx,&p); assert( trc==SQLITE_OK ); } - /* This may fail if SQLite value p contains a utf-16 string that must - ** be converted to utf-8 and an OOM error occurs while doing so. */ - rc = sessionSerializeValue(0, p, &nByte); + if( rc==SQLITE_OK ){ + /* This may fail if SQLite value p contains a utf-16 string that must + ** be converted to utf-8 and an OOM error occurs while doing so. */ + rc = sessionSerializeValue(0, p, &nByte); + } if( rc!=SQLITE_OK ) goto error_out; } if( pTab->bRowid ){ @@ -226679,12 +229961,13 @@ static void sessionPreupdateOneChange( sessionPutI64(&pC->aRecord[1], iRowid); nByte = 9; } - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ sqlite3_value *p = 0; + int iIdx = pTab->aiIdx[i]; if( op!=SQLITE_INSERT ){ - pSession->hook.xOld(pSession->hook.pCtx, i, &p); + pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - pSession->hook.xNew(pSession->hook.pCtx, i, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); } sessionSerializeValue(&pC->aRecord[nByte], p, &nByte); } @@ -227071,7 +230354,9 @@ SQLITE_API int sqlite3session_diff( SessionTable *pTo; /* Table zTbl */ /* Locate and if necessary initialize the target table object */ + pSession->bAutoAttach++; rc = sessionFindTable(pSession, zTbl, &pTo); + pSession->bAutoAttach--; if( pTo==0 ) goto diff_out; if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){ rc = pSession->rc; @@ -227082,16 +230367,43 @@ SQLITE_API int sqlite3session_diff( if( rc==SQLITE_OK ){ int bHasPk = 0; int bMismatch = 0; - int nCol; /* Columns in zFrom.zTbl */ + int nCol = 0; /* Columns in zFrom.zTbl */ int bRowid = 0; - u8 *abPK; + u8 *abPK = 0; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK, - pSession->bImplicitPK ? &bRowid : 0 - ); + char *zDbExists = 0; + + /* Check that database zFrom is attached. */ + zDbExists = sqlite3_mprintf("SELECT * FROM %Q.sqlite_schema", zFrom); + if( zDbExists==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_stmt *pDbExists = 0; + rc = sqlite3_prepare_v2(db, zDbExists, -1, &pDbExists, 0); + if( rc==SQLITE_ERROR ){ + rc = SQLITE_OK; + nCol = -1; + } + sqlite3_finalize(pDbExists); + sqlite3_free(zDbExists); + } + + if( rc==SQLITE_OK && nCol==0 ){ + rc = sessionTableInfo(0, db, zFrom, zTbl, + &nCol, 0, 0, &azCol, 0, 0, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); + } if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ - bMismatch = 1; + if( nCol<=0 ){ + rc = SQLITE_SCHEMA; + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("no such table: %s.%s", zFrom, zTbl); + } + }else{ + bMismatch = 1; + } }else{ int i; for(i=0; iaBuf[p->nBuf]; const char *zIn = zStr; *zOut++ = '"'; - while( *zIn ){ - if( *zIn=='"' ) *zOut++ = '"'; - *zOut++ = *(zIn++); + if( zIn!=0 ){ + while( *zIn ){ + if( *zIn=='"' ) *zOut++ = '"'; + *zOut++ = *(zIn++); + } } *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); @@ -227663,10 +230977,10 @@ static int sessionSelectStmt( int rc = SQLITE_OK; char *zSql = 0; const char *zSep = ""; - const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; int i; + SessionBuffer cols = {0, 0, 0}; SessionBuffer nooptest = {0, 0, 0}; SessionBuffer pkfield = {0, 0, 0}; SessionBuffer pkvar = {0, 0, 0}; @@ -227679,9 +230993,16 @@ static int sessionSelectStmt( sessionAppendStr(&pkvar, "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc ); - zCols = "tbl, ?2, stat"; + sessionAppendStr(&cols, "tbl, ?2, stat", &rc); }else{ + #if 0 + if( bRowid ){ + sessionAppendStr(&cols, SESSIONS_ROWID, &rc); + } + #endif for(i=0; idb; /* Source database handle */ SessionTable *pTab; /* Used to iterate through attached tables */ - SessionBuffer buf = {0,0,0}; /* Buffer in which to accumlate changeset */ + SessionBuffer buf = {0,0,0}; /* Buffer in which to accumulate changeset */ int rc; /* Return code */ assert( xOutput==0 || (pnChangeset==0 && ppChangeset==0) ); @@ -228210,14 +231532,15 @@ SQLITE_API int sqlite3changeset_start_v2_strm( ** object and the buffer is full, discard some data to free up space. */ static void sessionDiscardData(SessionInput *pIn){ - if( pIn->xInput && pIn->iNext>=sessions_strm_chunk_size ){ - int nMove = pIn->buf.nBuf - pIn->iNext; + if( pIn->xInput && pIn->iCurrent>=sessions_strm_chunk_size ){ + int nMove = pIn->buf.nBuf - pIn->iCurrent; assert( nMove>=0 ); if( nMove>0 ){ - memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iNext], nMove); + memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iCurrent], nMove); } - pIn->buf.nBuf -= pIn->iNext; - pIn->iNext = 0; + pIn->buf.nBuf -= pIn->iCurrent; + pIn->iNext -= pIn->iCurrent; + pIn->iCurrent = 0; pIn->nData = pIn->buf.nBuf; } } @@ -228571,8 +231894,8 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; - sessionDiscardData(&p->in); p->in.iCurrent = p->in.iNext; + sessionDiscardData(&p->in); /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ @@ -230010,15 +233333,21 @@ static int sessionChangesetApply( int nTab = 0; /* Result of sqlite3Strlen30(zTab) */ SessionApplyCtx sApply; /* changeset_apply() context object */ int bPatchset; + u64 savedFlag = db->flags & SQLITE_FkNoAction; assert( xConflict!=0 ); + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ + db->flags |= ((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } + pIter->in.bNoDiscard = 1; memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); - sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); } @@ -230076,7 +233405,8 @@ static int sessionChangesetApply( sqlite3changeset_pk(pIter, &abPK, 0); rc = sessionTableInfo(0, db, "main", zNew, - &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid + &sApply.nCol, 0, &zTab, &sApply.azCol, 0, 0, + &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; iflags & SQLITE_FkNoAction ); + db->flags &= ~((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } sqlite3_mutex_leave(sqlite3_db_mutex(db)); return rc; } @@ -230208,12 +233549,6 @@ SQLITE_API int sqlite3changeset_apply_v2( sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); - u64 savedFlag = db->flags & SQLITE_FkNoAction; - - if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ - db->flags |= ((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } if( rc==SQLITE_OK ){ rc = sessionChangesetApply( @@ -230221,11 +233556,6 @@ SQLITE_API int sqlite3changeset_apply_v2( ); } - if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){ - assert( db->flags & SQLITE_FkNoAction ); - db->flags &= ~((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } return rc; } @@ -230546,6 +233876,9 @@ static int sessionChangesetExtendRecord( sessionAppendBlob(pOut, aRec, nRec, &rc); if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){ rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt); + if( rc==SQLITE_OK && SQLITE_ROW!=sqlite3_step(pTab->pDfltStmt) ){ + rc = sqlite3_errcode(pGrp->db); + } } for(ii=nCol; rc==SQLITE_OK && iinCol; ii++){ int eType = sqlite3_column_type(pTab->pDfltStmt, ii); @@ -230562,6 +233895,7 @@ static int sessionChangesetExtendRecord( } if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){ sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal); + pOut->nBuf += 8; } break; } @@ -230701,6 +234035,8 @@ static int sessionOneChangeToHash( u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + assert( nRec>0 ); + /* Ensure that only changesets, or only patchsets, but not a mixture ** of both, are being combined. It is an error to try to combine a ** changeset and a patchset. */ @@ -230778,6 +234114,7 @@ static int sessionChangesetToHash( int nRec; int rc = SQLITE_OK; + pIter->in.bNoDiscard = 1; while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ rc = sessionOneChangeToHash(pGrp, pIter, bRebase); if( rc!=SQLITE_OK ) break; @@ -230917,14 +234254,19 @@ SQLITE_API int sqlite3changegroup_add_change( sqlite3_changegroup *pGrp, sqlite3_changeset_iter *pIter ){ + int rc = SQLITE_OK; + if( pIter->in.iCurrent==pIter->in.iNext || pIter->rc!=SQLITE_OK || pIter->bInvert ){ /* Iterator does not point to any valid entry or is an INVERT iterator. */ - return SQLITE_ERROR; + rc = SQLITE_ERROR; + }else{ + pIter->in.bNoDiscard = 1; + rc = sessionOneChangeToHash(pGrp, pIter, 0); } - return sessionOneChangeToHash(pGrp, pIter, 0); + return rc; } /* @@ -231409,7 +234751,27 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ /************** End of sqlite3session.c **************************************/ /************** Begin file fts5.c ********************************************/ - +/* +** This, the "fts5.c" source file, is a composite file that is itself +** assembled from the following files: +** +** fts5.h +** fts5Int.h +** fts5parse.h <--- Generated from fts5parse.y by Lemon +** fts5parse.c <--- Generated from fts5parse.y by Lemon +** fts5_aux.c +** fts5_buffer.c +** fts5_config.c +** fts5_expr.c +** fts5_hash.c +** fts5_index.c +** fts5_main.c +** fts5_storage.c +** fts5_tokenize.c +** fts5_unicode2.c +** fts5_varint.c +** fts5_vocab.c +*/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) @@ -231419,6 +234781,12 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ # undef NDEBUG #endif +#ifdef HAVE_STDINT_H +/* #include */ +#endif +#ifdef HAVE_INTTYPES_H +/* #include */ +#endif /* ** 2014 May 31 ** @@ -231659,6 +235027,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -231715,19 +235087,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -231769,6 +235179,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -231789,7 +235208,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -231813,7 +235232,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -231837,6 +235256,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -231860,6 +235286,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
    +**
  • There is no "iVersion" field, and +**
  • The xTokenize() method does not take a locale argument. +**
+** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -231968,6 +235418,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -231987,6 +235464,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -232006,7 +235484,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -232033,6 +235511,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -232067,6 +235564,7 @@ SQLITE_EXTENSION_INIT1 /* #include */ /* #include */ +/* #include */ #ifndef SQLITE_AMALGAMATION @@ -232106,6 +235604,34 @@ typedef sqlite3_uint64 u64; # define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) +/* The uptr type is an unsigned integer large enough to hold a pointer +*/ +#if defined(HAVE_STDINT_H) + typedef uintptr_t uptr; +#elif SQLITE_PTRSIZE==4 + typedef u32 uptr; +#else + typedef u64 uptr; +#endif + +#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) +#else +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) +#endif + +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -232178,10 +235704,11 @@ typedef struct Fts5Colset Fts5Colset; */ struct Fts5Colset { int nCol; - int aiCol[1]; + int aiCol[FLEXARRAY]; }; - +/* Size (int bytes) of a complete Fts5Colset object with N columns. */ +#define SZ_FTS5COLSET(N) (sizeof(i64)*((N+2)/2)) /************************************************************************** ** Interface to code in fts5_config.c. fts5_config.c contains contains code @@ -232189,6 +235716,18 @@ struct Fts5Colset { */ typedef struct Fts5Config Fts5Config; +typedef struct Fts5TokenizerConfig Fts5TokenizerConfig; + +struct Fts5TokenizerConfig { + Fts5Tokenizer *pTok; + fts5_tokenizer_v2 *pApi2; + fts5_tokenizer *pApi1; + const char **azArg; + int nArg; + int ePattern; /* FTS_PATTERN_XXX constant */ + const char *pLocale; /* Current locale to use */ + int nLocale; /* Size of pLocale in bytes */ +}; /* ** An instance of the following structure encodes all information that can @@ -232228,9 +235767,12 @@ typedef struct Fts5Config Fts5Config; ** ** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex); ** +** bLocale: +** Set to true if locale=1 was specified when the table was created. */ struct Fts5Config { sqlite3 *db; /* Database handle */ + Fts5Global *pGlobal; /* Global fts5 object for handle db */ char *zDb; /* Database holding FTS index (e.g. "main") */ char *zName; /* Name of FTS index */ int nCol; /* Number of columns */ @@ -232240,16 +235782,17 @@ struct Fts5Config { int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ + int bContentlessUnindexed; /* "contentless_unindexed=" option (dflt=0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ int bTokendata; /* "tokendata=" option value (dflt==0) */ + int bLocale; /* "locale=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; - Fts5Tokenizer *pTok; - fts5_tokenizer *pTokApi; + Fts5TokenizerConfig t; int bLock; /* True when table is preparing statement */ - int ePattern; /* FTS_PATTERN_XXX constant */ + /* Values loaded from the %_config table */ int iVersion; /* fts5 file format 'version' */ @@ -232262,7 +235805,8 @@ struct Fts5Config { char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ int bSecureDelete; /* 'secure-delete' */ - int nDeleteMerge; /* 'deletemerge' */ + int nDeleteMerge; /* 'deletemerge' */ + int bPrefixInsttoken; /* 'prefix-insttoken' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; @@ -232278,9 +235822,10 @@ struct Fts5Config { #define FTS5_CURRENT_VERSION 4 #define FTS5_CURRENT_VERSION_SECUREDELETE 5 -#define FTS5_CONTENT_NORMAL 0 -#define FTS5_CONTENT_NONE 1 -#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_NORMAL 0 +#define FTS5_CONTENT_NONE 1 +#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_UNINDEXED 3 #define FTS5_DETAIL_FULL 0 #define FTS5_DETAIL_NONE 1 @@ -232315,6 +235860,8 @@ static int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, i static int sqlite3Fts5ConfigParseRank(const char*, char**, char**); +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...); + /* ** End of interface to code in fts5_config.c. **************************************************************************/ @@ -232359,7 +235906,7 @@ static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); static void sqlite3Fts5Put32(u8*, int); static int sqlite3Fts5Get32(const u8*); -#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32) +#define FTS5_POS2COLUMN(iPos) (int)((iPos >> 32) & 0x7FFFFFFF) #define FTS5_POS2OFFSET(iPos) (int)(iPos & 0x7FFFFFFF) typedef struct Fts5PoslistReader Fts5PoslistReader; @@ -232516,7 +236063,14 @@ static int sqlite3Fts5StructureTest(Fts5Index*, void*); /* ** Used by xInstToken(): */ -static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +); /* ** Insert or remove data to or from the index. Each time a document is @@ -232644,18 +236198,20 @@ struct Fts5Table { Fts5Index *pIndex; /* Full-text index */ }; -static int sqlite3Fts5GetTokenizer( - Fts5Global*, - const char **azArg, - int nArg, - Fts5Config*, - char **pzErr -); +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig); static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64); static int sqlite3Fts5FlushToDisk(Fts5Table*); +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig); +static void sqlite3Fts5SetLocale(Fts5Config *pConfig, const char *pLoc, int nLoc); + +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal); +static int sqlite3Fts5DecodeLocaleValue(sqlite3_value *pVal, + const char **ppText, int *pnText, const char **ppLoc, int *pnLoc +); + /* ** End of interface to code in fts5.c. **************************************************************************/ @@ -232735,8 +236291,8 @@ static int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName); static int sqlite3Fts5DropAll(Fts5Config*); static int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **); -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**); -static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*); +static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**, int); +static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, int, sqlite3_value**, i64*); static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64); static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg); @@ -232761,6 +236317,9 @@ static int sqlite3Fts5StorageOptimize(Fts5Storage *p); static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge); static int sqlite3Fts5StorageReset(Fts5Storage *p); +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage*); +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel); + /* ** End of interface to code in fts5_storage.c. **************************************************************************/ @@ -232913,6 +236472,7 @@ static int sqlite3Fts5TokenizerPattern( int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), Fts5Tokenizer *pTok ); +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig*); /* ** End of interface to code in fts5_tokenizer.c. **************************************************************************/ @@ -232977,7 +236537,7 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** ** The "lemon" program processes an LALR(1) input grammar file, then uses ** this template to construct a parser. The "lemon" program inserts text -** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** at each "%%" line. Also, any "P-a-r-s-e" identifier prefix (without the ** interstitial "-" characters) contained in this template is changed into ** the value of the %name directive from the grammar. Otherwise, the content ** of this template is copied straight through into the generate parser @@ -234690,6 +238250,7 @@ static int fts5HighlightCb( return rc; } + /* ** Implementation of highlight() function. */ @@ -234720,12 +238281,19 @@ static void fts5HighlightFunction( sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); rc = SQLITE_OK; }else if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iCol */ + int nLoc = 0; /* Size of pLoc in bytes */ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx, fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -234922,6 +238490,8 @@ static void fts5SnippetFunction( memset(&sFinder, 0, sizeof(Fts5SFinder)); for(i=0; ixColumnText(pFts, i, &sFinder.zDoc, &nDoc); if( rc!=SQLITE_OK ) break; - rc = pApi->xTokenize(pFts, - sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb + rc = pApi->xColumnLocale(pFts, i, &pLoc, &nLoc); + if( rc!=SQLITE_OK ) break; + rc = pApi->xTokenize_v2(pFts, + sFinder.zDoc, nDoc, pLoc, nLoc, (void*)&sFinder, fts5SentenceFinderCb ); if( rc!=SQLITE_OK ) break; rc = pApi->xColumnSize(pFts, i, &nDocsize); @@ -234988,6 +238560,9 @@ static void fts5SnippetFunction( rc = pApi->xColumnSize(pFts, iBestCol, &nColSize); } if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iBestCol */ + int nLoc = 0; /* Bytes in pLoc */ + if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter); } @@ -235006,7 +238581,12 @@ static void fts5SnippetFunction( } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iBestCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx,fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -235111,7 +238691,7 @@ static int fts5Bm25GetData( ** under consideration. ** ** The problem with this is that if (N < 2*nHit), the IDF is - ** negative. Which is undesirable. So the mimimum allowable IDF is + ** negative. Which is undesirable. So the minimum allowable IDF is ** (1e-6) - roughly the same as a term that appears in just over ** half of set of 5,000,000 documents. */ double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) ); @@ -235190,6 +238770,53 @@ static void fts5Bm25Function( } } +/* +** Implementation of fts5_get_locale() function. +*/ +static void fts5GetLocaleFunction( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +){ + int iCol = 0; + int eType = 0; + int rc = SQLITE_OK; + const char *zLocale = 0; + int nLocale = 0; + + /* xColumnLocale() must be available */ + assert( pApi->iVersion>=4 ); + + if( nVal!=1 ){ + const char *z = "wrong number of arguments to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + eType = sqlite3_value_numeric_type(apVal[0]); + if( eType!=SQLITE_INTEGER ){ + const char *z = "non-integer argument passed to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + iCol = sqlite3_value_int(apVal[0]); + if( iCol<0 || iCol>=pApi->xColumnCount(pFts) ){ + sqlite3_result_error_code(pCtx, SQLITE_RANGE); + return; + } + + rc = pApi->xColumnLocale(pFts, iCol, &zLocale, &nLocale); + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + return; + } + + sqlite3_result_text(pCtx, zLocale, nLocale, SQLITE_TRANSIENT); +} + static int sqlite3Fts5AuxInit(fts5_api *pApi){ struct Builtin { const char *zFunc; /* Function name (nul-terminated) */ @@ -235197,9 +238824,10 @@ static int sqlite3Fts5AuxInit(fts5_api *pApi){ fts5_extension_function xFunc;/* Callback function */ void (*xDestroy)(void*); /* Destructor function */ } aBuiltin [] = { - { "snippet", 0, fts5SnippetFunction, 0 }, - { "highlight", 0, fts5HighlightFunction, 0 }, - { "bm25", 0, fts5Bm25Function, 0 }, + { "snippet", 0, fts5SnippetFunction, 0 }, + { "highlight", 0, fts5HighlightFunction, 0 }, + { "bm25", 0, fts5Bm25Function, 0 }, + { "fts5_get_locale", 0, fts5GetLocaleFunction, 0 }, }; int rc = SQLITE_OK; /* Return code */ int i; /* To iterate through builtin functions */ @@ -235526,7 +239154,7 @@ static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){ ** * The 52 upper and lower case ASCII characters, and ** * The 10 integer ASCII characters. ** * The underscore character "_" (0x5F). -** * The unicode "subsitute" character (0x1A). +** * The unicode "substitute" character (0x1A). */ static int sqlite3Fts5IsBareword(char t){ u8 aBareword[128] = { @@ -235864,7 +239492,6 @@ static int fts5ConfigSetEnum( ** eventually free any such error message using sqlite3_free(). */ static int fts5ConfigParseSpecial( - Fts5Global *pGlobal, Fts5Config *pConfig, /* Configuration object to update */ const char *zCmd, /* Special command to parse */ const char *zArg, /* Argument to parse */ @@ -235872,6 +239499,7 @@ static int fts5ConfigParseSpecial( ){ int rc = SQLITE_OK; int nCmd = (int)strlen(zCmd); + if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){ const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES; const char *p; @@ -235928,12 +239556,11 @@ static int fts5ConfigParseSpecial( if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){ const char *p = (const char*)zArg; sqlite3_int64 nArg = strlen(zArg) + 1; - char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg); - char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2); - char *pSpace = pDel; + char **azArg = sqlite3Fts5MallocZero(&rc, (sizeof(char*) + 2) * nArg); - if( azArg && pSpace ){ - if( pConfig->pTok ){ + if( azArg ){ + char *pSpace = (char*)&azArg[nArg]; + if( pConfig->t.azArg ){ *pzErr = sqlite3_mprintf("multiple tokenize=... directives"); rc = SQLITE_ERROR; }else{ @@ -235956,16 +239583,14 @@ static int fts5ConfigParseSpecial( *pzErr = sqlite3_mprintf("parse error in tokenize directive"); rc = SQLITE_ERROR; }else{ - rc = sqlite3Fts5GetTokenizer(pGlobal, - (const char**)azArg, (int)nArg, pConfig, - pzErr - ); + pConfig->t.azArg = (const char**)azArg; + pConfig->t.nArg = nArg; + azArg = 0; } } } - sqlite3_free(azArg); - sqlite3_free(pDel); + return rc; } @@ -235994,6 +239619,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("contentless_unindexed", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessUnindexed = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives"); @@ -236014,6 +239649,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("locale", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed locale=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bLocale = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("detail", zCmd, nCmd)==0 ){ const Fts5Enum aDetail[] = { { "none", FTS5_DETAIL_NONE }, @@ -236042,16 +239687,6 @@ static int fts5ConfigParseSpecial( return SQLITE_ERROR; } -/* -** Allocate an instance of the default tokenizer ("simple") at -** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error -** code if an error occurs. -*/ -static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){ - assert( pConfig->pTok==0 && pConfig->pTokApi==0 ); - return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0); -} - /* ** Gobble up the first bareword or quoted word from the input buffer zIn. ** Return a pointer to the character immediately following the last in @@ -236111,7 +239746,8 @@ static int fts5ConfigParseColumn( Fts5Config *p, char *zCol, char *zArg, - char **pzErr + char **pzErr, + int *pbUnindexed ){ int rc = SQLITE_OK; if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME) @@ -236122,6 +239758,7 @@ static int fts5ConfigParseColumn( }else if( zArg ){ if( 0==sqlite3_stricmp(zArg, "unindexed") ){ p->abUnindexed[p->nCol] = 1; + *pbUnindexed = 1; }else{ *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg); rc = SQLITE_ERROR; @@ -236142,11 +239779,26 @@ static int fts5ConfigMakeExprlist(Fts5Config *p){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid); if( p->eContent!=FTS5_CONTENT_NONE ){ + assert( p->eContent==FTS5_CONTENT_EXTERNAL + || p->eContent==FTS5_CONTENT_NORMAL + || p->eContent==FTS5_CONTENT_UNINDEXED + ); for(i=0; inCol; i++){ if( p->eContent==FTS5_CONTENT_EXTERNAL ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]); - }else{ + }else if( p->eContent==FTS5_CONTENT_NORMAL || p->abUnindexed[i] ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); + } + } + } + if( p->eContent==FTS5_CONTENT_NORMAL && p->bLocale ){ + for(i=0; inCol; i++){ + if( p->abUnindexed[i]==0 ){ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.l%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); } } } @@ -236180,10 +239832,12 @@ static int sqlite3Fts5ConfigParse( Fts5Config *pRet; /* New object to return */ int i; sqlite3_int64 nByte; + int bUnindexed = 0; /* True if there are one or more UNINDEXED */ *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config)); if( pRet==0 ) return SQLITE_NOMEM; memset(pRet, 0, sizeof(Fts5Config)); + pRet->pGlobal = pGlobal; pRet->db = db; pRet->iCookie = -1; @@ -236232,13 +239886,13 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; }else{ if( bOption ){ - rc = fts5ConfigParseSpecial(pGlobal, pRet, + rc = fts5ConfigParseSpecial(pRet, ALWAYS(zOne)?zOne:"", zTwo?zTwo:"", pzErr ); }else{ - rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr); + rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr, &bUnindexed); zOne = 0; } } @@ -236270,11 +239924,17 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } - /* If a tokenizer= option was successfully parsed, the tokenizer has - ** already been allocated. Otherwise, allocate an instance of the default - ** tokenizer (unicode61) now. */ - if( rc==SQLITE_OK && pRet->pTok==0 ){ - rc = fts5ConfigDefaultTokenizer(pGlobal, pRet); + /* We only allow contentless_unindexed=1 if the table is actually a + ** contentless one. + */ + if( rc==SQLITE_OK + && pRet->bContentlessUnindexed + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_unindexed=1 requires a contentless table" + ); + rc = SQLITE_ERROR; } /* If no zContent option was specified, fill in the default values. */ @@ -236285,6 +239945,9 @@ static int sqlite3Fts5ConfigParse( ); if( pRet->eContent==FTS5_CONTENT_NORMAL ){ zTail = "content"; + }else if( bUnindexed && pRet->bContentlessUnindexed ){ + pRet->eContent = FTS5_CONTENT_UNINDEXED; + zTail = "content"; }else if( pRet->bColumnsize ){ zTail = "docsize"; } @@ -236318,9 +239981,14 @@ static int sqlite3Fts5ConfigParse( static void sqlite3Fts5ConfigFree(Fts5Config *pConfig){ if( pConfig ){ int i; - if( pConfig->pTok ){ - pConfig->pTokApi->xDelete(pConfig->pTok); + if( pConfig->t.pTok ){ + if( pConfig->t.pApi1 ){ + pConfig->t.pApi1->xDelete(pConfig->t.pTok); + }else{ + pConfig->t.pApi2->xDelete(pConfig->t.pTok); + } } + sqlite3_free((char*)pConfig->t.azArg); sqlite3_free(pConfig->zDb); sqlite3_free(pConfig->zName); for(i=0; inCol; i++){ @@ -236395,10 +240063,24 @@ static int sqlite3Fts5Tokenize( void *pCtx, /* Context passed to xToken() */ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ ){ - if( pText==0 ) return SQLITE_OK; - return pConfig->pTokApi->xTokenize( - pConfig->pTok, pCtx, flags, pText, nText, xToken - ); + int rc = SQLITE_OK; + if( pText ){ + if( pConfig->t.pTok==0 ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } + if( rc==SQLITE_OK ){ + if( pConfig->t.pApi1 ){ + rc = pConfig->t.pApi1->xTokenize( + pConfig->t.pTok, pCtx, flags, pText, nText, xToken + ); + }else{ + rc = pConfig->t.pApi2->xTokenize(pConfig->t.pTok, pCtx, flags, + pText, nText, pConfig->t.pLocale, pConfig->t.nLocale, xToken + ); + } + } + } + return rc; } /* @@ -236602,6 +240284,19 @@ static int sqlite3Fts5ConfigSetValue( }else{ pConfig->bSecureDelete = (bVal ? 1 : 0); } + } + + else if( 0==sqlite3_stricmp(zKey, "insttoken") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bPrefixInsttoken = (bVal ? 1 : 0); + } + }else{ *pbBadkey = 1; } @@ -236652,13 +240347,10 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ rc = SQLITE_ERROR; - if( pConfig->pzErrmsg ){ - assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " - "(found %d, expected %d or %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE - ); - } + sqlite3Fts5ConfigErrmsg(pConfig, "invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE + ); }else{ pConfig->iVersion = iVersion; } @@ -236669,6 +240361,29 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ return rc; } +/* +** Set (*pConfig->pzErrmsg) to point to an sqlite3_malloc()ed buffer +** containing the error message created using printf() style formatting +** string zFmt and its trailing arguments. +*/ +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...){ + va_list ap; /* ... printf arguments */ + char *zMsg = 0; + + va_start(ap, zFmt); + zMsg = sqlite3_vmprintf(zFmt, ap); + if( pConfig->pzErrmsg ){ + assert( *pConfig->pzErrmsg==0 ); + *pConfig->pzErrmsg = zMsg; + }else{ + sqlite3_free(zMsg); + } + + va_end(ap); +} + + + /* ** 2014 May 31 ** @@ -236725,7 +240440,7 @@ struct Fts5Expr { /* ** eType: -** Expression node type. Always one of: +** Expression node type. Usually one of: ** ** FTS5_AND (nChild, apChild valid) ** FTS5_OR (nChild, apChild valid) @@ -236733,6 +240448,10 @@ struct Fts5Expr { ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) ** +** An expression node with eType==0 may also exist. It always matches zero +** rows. This is created when a phrase containing no tokens is parsed. +** e.g. "". +** ** iHeight: ** Distance from this node to furthest leaf. This is always 0 for nodes ** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one @@ -236753,9 +240472,13 @@ struct Fts5ExprNode { /* Child nodes. For a NOT node, this array always contains 2 entries. For ** AND or OR nodes, it contains 2 or more entries. */ int nChild; /* Number of child nodes */ - Fts5ExprNode *apChild[1]; /* Array of child nodes */ + Fts5ExprNode *apChild[FLEXARRAY]; /* Array of child nodes */ }; +/* Size (in bytes) of an Fts5ExprNode object that holds up to N children */ +#define SZ_FTS5EXPRNODE(N) \ + (offsetof(Fts5ExprNode,apChild) + (N)*sizeof(Fts5ExprNode*)) + #define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING) /* @@ -236786,9 +240509,13 @@ struct Fts5ExprPhrase { Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */ Fts5Buffer poslist; /* Current position list */ int nTerm; /* Number of entries in aTerm[] */ - Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */ + Fts5ExprTerm aTerm[FLEXARRAY]; /* Terms that make up this phrase */ }; +/* Size (in bytes) of an Fts5ExprPhrase object that holds up to N terms */ +#define SZ_FTS5EXPRPHRASE(N) \ + (offsetof(Fts5ExprPhrase,aTerm) + (N)*sizeof(Fts5ExprTerm)) + /* ** One or more phrases that must appear within a certain token distance of ** each other within each matching document. @@ -236797,9 +240524,12 @@ struct Fts5ExprNearset { int nNear; /* NEAR parameter */ Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */ int nPhrase; /* Number of entries in aPhrase[] array */ - Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */ + Fts5ExprPhrase *apPhrase[FLEXARRAY]; /* Array of phrase pointers */ }; +/* Size (in bytes) of an Fts5ExprNearset object covering up to N phrases */ +#define SZ_FTS5EXPRNEARSET(N) \ + (offsetof(Fts5ExprNearset,apPhrase)+(N)*sizeof(Fts5ExprPhrase*)) /* ** Parse context. @@ -236953,12 +240683,13 @@ static int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert( sParse.pExpr || sParse.rc!=SQLITE_OK ); assert_expr_depth_ok(sParse.rc, sParse.pExpr); /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ - if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ - int n = sizeof(Fts5Colset); + if( sParse.rc==SQLITE_OK && iColnCol ){ + int n = SZ_FTS5COLSET(1); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ pColset->nCol = 1; @@ -236974,15 +240705,7 @@ static int sqlite3Fts5ExprNew( sParse.rc = SQLITE_NOMEM; sqlite3Fts5ParseNodeFree(sParse.pExpr); }else{ - if( !sParse.pExpr ){ - const int nByte = sizeof(Fts5ExprNode); - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&sParse.rc, nByte); - if( pNew->pRoot ){ - pNew->pRoot->bEof = 1; - } - }else{ - pNew->pRoot = sParse.pExpr; - } + pNew->pRoot = sParse.pExpr; pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; @@ -237800,7 +241523,7 @@ static int fts5ExprNodeTest_STRING( } }else{ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter; - if( pIter->iRowid==iLast || pIter->bEof ) continue; + if( pIter->iRowid==iLast ) continue; bMatch = 0; if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){ return rc; @@ -238322,12 +242045,9 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( Fts5ExprNearset *pRet = 0; if( pParse->rc==SQLITE_OK ){ - if( pPhrase==0 ){ - return pNear; - } if( pNear==0 ){ sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(SZALLOC+1); pRet = sqlite3_malloc64(nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -238338,7 +242058,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( int nNew = pNear->nPhrase + SZALLOC; sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(nNew+1); pRet = (Fts5ExprNearset*)sqlite3_realloc64(pNear, nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -238429,12 +242149,12 @@ static int fts5ParseTokenize( int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0); pNew = (Fts5ExprPhrase*)sqlite3_realloc64(pPhrase, - sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew + SZ_FTS5EXPRPHRASE(nNew+1) ); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ - if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase)); + if( pPhrase==0 ) memset(pNew, 0, SZ_FTS5EXPRPHRASE(1)); pCtx->pPhrase = pPhrase = pNew; pNew->nTerm = nNew - SZALLOC; } @@ -238542,10 +242262,11 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( if( sCtx.pPhrase==0 ){ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, SZ_FTS5EXPRPHRASE(1)); }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } + assert( pParse->apPhrase!=0 ); pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } @@ -238565,7 +242286,7 @@ static int sqlite3Fts5ExprClonePhrase( Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ - if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + if( !pExpr || iPhrase<0 || iPhrase>=pExpr->nPhrase ){ rc = SQLITE_RANGE; }else{ pOrig = pExpr->apExprPhrase[iPhrase]; @@ -238576,19 +242297,18 @@ static int sqlite3Fts5ExprClonePhrase( sizeof(Fts5ExprPhrase*)); } if( rc==SQLITE_OK ){ - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNode)); + pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRNODE(1)); } if( rc==SQLITE_OK ){ pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); + SZ_FTS5EXPRNEARSET(2)); } if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; Fts5Colset *pColset; - nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int); + nByte = SZ_FTS5COLSET(pColsetOrig->nCol); pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte); if( pColset ){ memcpy(pColset, pColsetOrig, (size_t)nByte); @@ -238616,7 +242336,7 @@ static int sqlite3Fts5ExprClonePhrase( }else{ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRPHRASE(1)); } } @@ -238681,7 +242401,8 @@ static void sqlite3Fts5ParseSetDistance( ); return; } - nNear = nNear * 10 + (p->p[i] - '0'); + if( nNear<214748363 ) nNear = nNear * 10 + (p->p[i] - '0'); + /* ^^^^^^^^^^^^^^^--- Prevent integer overflow */ } }else{ nNear = FTS5_DEFAULT_NEARDIST; @@ -238710,7 +242431,7 @@ static Fts5Colset *fts5ParseColset( assert( pParse->rc==SQLITE_OK ); assert( iCol>=0 && iColpConfig->nCol ); - pNew = sqlite3_realloc64(p, sizeof(Fts5Colset) + sizeof(int)*nCol); + pNew = sqlite3_realloc64(p, SZ_FTS5COLSET(nCol+1)); if( pNew==0 ){ pParse->rc = SQLITE_NOMEM; }else{ @@ -238745,7 +242466,7 @@ static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p int nCol = pParse->pConfig->nCol; pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc, - sizeof(Fts5Colset) + sizeof(int)*nCol + SZ_FTS5COLSET(nCol+1) ); if( pRet ){ int i; @@ -238806,7 +242527,7 @@ static Fts5Colset *sqlite3Fts5ParseColset( static Fts5Colset *fts5CloneColset(int *pRc, Fts5Colset *pOrig){ Fts5Colset *pRet; if( pOrig ){ - sqlite3_int64 nByte = sizeof(Fts5Colset) + (pOrig->nCol-1) * sizeof(int); + sqlite3_int64 nByte = SZ_FTS5COLSET(pOrig->nCol); pRet = (Fts5Colset*)sqlite3Fts5MallocZero(pRc, nByte); if( pRet ){ memcpy(pRet, pOrig, (size_t)nByte); @@ -238933,6 +242654,9 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } } +/* +** Add pSub as a child of p. +*/ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ @@ -238971,7 +242695,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( assert( pNear->nPhrase==1 ); assert( pParse->bPhraseToAnd ); - nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*); + nByte = SZ_FTS5EXPRNODE(nTerm+1); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ pRet->eType = FTS5_AND; @@ -238981,7 +242705,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( pParse->nPhrase--; for(ii=0; iirc, sizeof(Fts5ExprPhrase) + &pParse->rc, SZ_FTS5EXPRPHRASE(1) ); if( pPhrase ){ if( parseGrowPhraseArray(pParse) ){ @@ -239050,7 +242774,7 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( if( pRight->eType==eType ) nChild += pRight->nChild-1; } - nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); + nByte = SZ_FTS5EXPRNODE(nChild); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ @@ -239077,19 +242801,23 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; + pNear = 0; + assert( pLeft==0 && pRight==0 ); } } }else{ + assert( pNear==0 ); fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + pLeft = pRight = 0; if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ sqlite3Fts5ParseError(pParse, "fts5 expression tree is too large (maximum depth %d)", SQLITE_FTS5_MAX_EXPR_DEPTH ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; } } @@ -239141,6 +242869,8 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( ); if( pRight->eType==FTS5_EOF ){ + assert( pParse->apPhrase!=0 ); + assert( pParse->nPhrase>0 ); assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] ); sqlite3Fts5ParseNodeFree(pRight); pRet = pLeft; @@ -239713,7 +243443,7 @@ static int fts5ExprPopulatePoslistsCb( int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); - if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + if( rc==SQLITE_OK && (pExpr->pConfig->bTokendata || pT->bPrefix) ){ int iCol = p->iOff>>32; int iTokOff = p->iOff & 0x7FFFFFFF; rc = sqlite3Fts5IndexIterWriteTokendata( @@ -239773,6 +243503,7 @@ static int fts5ExprCheckPoslists(Fts5ExprNode *pNode, i64 iRowid){ pNode->iRowid = iRowid; pNode->bEof = 0; switch( pNode->eType ){ + case 0: case FTS5_TERM: case FTS5_STRING: return (pNode->pNear->apPhrase[0]->poslist.n>0); @@ -239905,21 +243636,20 @@ static int sqlite3Fts5ExprInstToken( return SQLITE_RANGE; } pTerm = &pPhrase->aTerm[iToken]; - if( pTerm->bPrefix==0 ){ - if( pExpr->pConfig->bTokendata ){ - rc = sqlite3Fts5IterToken( - pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut - ); - }else{ - *ppOut = pTerm->pTerm; - *pnOut = pTerm->nFullTerm; - } + if( pExpr->pConfig->bTokendata || pTerm->bPrefix ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, pTerm->pTerm, pTerm->nQueryTerm, + iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; } return rc; } /* -** Clear the token mappings for all Fts5IndexIter objects mannaged by +** Clear the token mappings for all Fts5IndexIter objects managed by ** the expression passed as the only argument. */ static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ @@ -239954,7 +243684,7 @@ typedef struct Fts5HashEntry Fts5HashEntry; /* ** This file contains the implementation of an in-memory hash table used -** to accumuluate "term -> doclist" content before it is flused to a level-0 +** to accumulate "term -> doclist" content before it is flushed to a level-0 ** segment. */ @@ -240011,7 +243741,7 @@ struct Fts5HashEntry { }; /* -** Eqivalent to: +** Equivalent to: ** ** char *fts5EntryKey(Fts5HashEntry *pEntry){ return zKey; } */ @@ -240947,9 +244677,13 @@ struct Fts5Structure { u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ - Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */ + Fts5StructureLevel aLevel[FLEXARRAY]; /* Array of nLevel level objects */ }; +/* Size (in bytes) of an Fts5Structure object holding up to N levels */ +#define SZ_FTS5STRUCTURE(N) \ + (offsetof(Fts5Structure,aLevel) + (N)*sizeof(Fts5StructureLevel)) + /* ** An object of type Fts5SegWriter is used to write to segments. */ @@ -241079,11 +244813,15 @@ struct Fts5SegIter { ** Array of tombstone pages. Reference counted. */ struct Fts5TombstoneArray { - int nRef; /* Number of pointers to this object */ + int nRef; /* Number of pointers to this object */ int nTombstone; - Fts5Data *apTombstone[1]; /* Array of tombstone pages */ + Fts5Data *apTombstone[FLEXARRAY]; /* Array of tombstone pages */ }; +/* Size (in bytes) of an Fts5TombstoneArray holding up to N tombstones */ +#define SZ_FTS5TOMBSTONEARRAY(N) \ + (offsetof(Fts5TombstoneArray,apTombstone)+(N)*sizeof(Fts5Data*)) + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -241152,9 +244890,12 @@ struct Fts5Iter { i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */ Fts5CResult *aFirst; /* Current merge state (see above) */ - Fts5SegIter aSeg[1]; /* Array of segment iterators */ + Fts5SegIter aSeg[FLEXARRAY]; /* Array of segment iterators */ }; +/* Size (in bytes) of an Fts5Iter object holding up to N segment iterators */ +#define SZ_FTS5ITER(N) (offsetof(Fts5Iter,aSeg)+(N)*sizeof(Fts5SegIter)) + /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -241181,9 +244922,13 @@ struct Fts5DlidxLvl { struct Fts5DlidxIter { int nLvl; int iSegid; - Fts5DlidxLvl aLvl[1]; + Fts5DlidxLvl aLvl[FLEXARRAY]; }; +/* Size (in bytes) of an Fts5DlidxIter object with up to N levels */ +#define SZ_FTS5DLIDXITER(N) \ + (offsetof(Fts5DlidxIter,aLvl)+(N)*sizeof(Fts5DlidxLvl)) + static void fts5PutU16(u8 *aOut, u16 iVal){ aOut[0] = (iVal>>8); aOut[1] = (iVal&0xFF); @@ -241303,11 +245048,13 @@ static int fts5LeafFirstTermOff(Fts5Data *pLeaf){ /* ** Close the read-only blob handle, if it is open. */ -static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ +static void fts5IndexCloseReader(Fts5Index *p){ if( p->pReader ){ + int rc; sqlite3_blob *pReader = p->pReader; p->pReader = 0; - sqlite3_blob_close(pReader); + rc = sqlite3_blob_close(pReader); + if( p->rc==SQLITE_OK ) p->rc = rc; } } @@ -241332,7 +245079,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ assert( p->pReader==0 ); p->pReader = pBlob; if( rc!=SQLITE_OK ){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } if( rc==SQLITE_ABORT ) rc = SQLITE_OK; } @@ -241356,11 +245103,12 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ if( rc==SQLITE_OK ){ u8 *aOut = 0; /* Read blob data into this buffer */ int nByte = sqlite3_blob_bytes(p->pReader); - sqlite3_int64 nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING; + int szData = (sizeof(Fts5Data) + 7) & ~7; + sqlite3_int64 nAlloc = szData + nByte + FTS5_DATA_PADDING; pRet = (Fts5Data*)sqlite3_malloc64(nAlloc); if( pRet ){ pRet->nn = nByte; - aOut = pRet->p = (u8*)&pRet[1]; + aOut = pRet->p = (u8*)pRet + szData; }else{ rc = SQLITE_NOMEM; } @@ -241383,6 +245131,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ } assert( (pRet==0)==(p->rc!=SQLITE_OK) ); + assert( pRet==0 || EIGHT_BYTE_ALIGNMENT( pRet->p ) ); return pRet; } @@ -241414,9 +245163,13 @@ static int fts5IndexPrepareStmt( ){ if( p->rc==SQLITE_OK ){ if( zSql ){ - p->rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, + int rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB, ppStmt, 0); + /* If this prepare() call fails with SQLITE_ERROR, then one of the + ** %_idx or %_data tables has been removed or modified. Call this + ** corruption. */ + p->rc = (rc==SQLITE_ERROR ? SQLITE_CORRUPT : rc); }else{ p->rc = SQLITE_NOMEM; } @@ -241543,7 +245296,7 @@ static int sqlite3Fts5StructureTest(Fts5Index *p, void *pStruct){ static void fts5StructureMakeWritable(int *pRc, Fts5Structure **pp){ Fts5Structure *p = *pp; if( *pRc==SQLITE_OK && p->nRef>1 ){ - i64 nByte = sizeof(Fts5Structure)+(p->nLevel-1)*sizeof(Fts5StructureLevel); + i64 nByte = SZ_FTS5STRUCTURE(p->nLevel); Fts5Structure *pNew; pNew = (Fts5Structure*)sqlite3Fts5MallocZero(pRc, nByte); if( pNew ){ @@ -241617,10 +245370,7 @@ static int fts5StructureDecode( ){ return FTS5_CORRUPT; } - nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */ - ); + nByte = SZ_FTS5STRUCTURE(nLevel); pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte); if( pRet ){ @@ -241700,10 +245450,7 @@ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; - sqlite3_int64 nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */ - ); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(nLevel+2); pStruct = sqlite3_realloc64(pStruct, nByte); if( pStruct ){ @@ -242242,7 +245989,7 @@ static Fts5DlidxIter *fts5DlidxIterInit( int bDone = 0; for(i=0; p->rc==SQLITE_OK && bDone==0; i++){ - sqlite3_int64 nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl); + sqlite3_int64 nByte = SZ_FTS5DLIDXITER(i+1); Fts5DlidxIter *pNew; pNew = (Fts5DlidxIter*)sqlite3_realloc64(pIter, nByte); @@ -242458,9 +246205,9 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ ** leave an error in the Fts5Index object. */ static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ - const int nTomb = pIter->pSeg->nPgTombstone; + const i64 nTomb = (i64)pIter->pSeg->nPgTombstone; if( nTomb>0 ){ - int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + i64 nByte = SZ_FTS5TOMBSTONEARRAY(nTomb+1); Fts5TombstoneArray *pNew; pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -242708,7 +246455,7 @@ static void fts5SegIterNext_None( if( iOffiEndofDoclist ){ /* Next entry is on the current page */ - i64 iDelta; + u64 iDelta; iOff += sqlite3Fts5GetVarint(&pIter->pLeaf->p[iOff], (u64*)&iDelta); pIter->iLeafOffset = iOff; pIter->iRowid += iDelta; @@ -243921,8 +247668,7 @@ static Fts5Iter *fts5MultiIterAlloc( for(nSlot=2; nSlotaSeg[] */ + SZ_FTS5ITER(nSlot) + /* pNew + pNew->aSeg[] */ sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */ ); if( pNew ){ @@ -245412,6 +249158,11 @@ static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ nBest = nPercent; } } + + /* If pLvl is already the input level to an ongoing merge, look no + ** further for a merge candidate. The caller should be allowed to + ** continue merging from pLvl first. */ + if( pLvl->nMerge ) break; } } return iRet; @@ -245523,6 +249274,14 @@ static int fts5IndexReturn(Fts5Index *p){ return rc; } +/* +** Close the read-only blob handle, if it is open. +*/ +static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ + fts5IndexCloseReader(p); + fts5IndexReturn(p); +} + typedef struct Fts5FlushCtx Fts5FlushCtx; struct Fts5FlushCtx { Fts5Index *pIdx; @@ -245710,7 +249469,7 @@ static void fts5DoSecureDelete( int iDelKeyOff = 0; /* Offset of deleted key, if any */ nIdx = nPg-iPgIdx; - aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + aIdx = sqlite3Fts5MallocZero(&p->rc, ((i64)nIdx)+16); if( p->rc ) return; memcpy(aIdx, &aPg[iPgIdx], nIdx); @@ -245980,8 +249739,11 @@ static void fts5DoSecureDelete( ** This is called as part of flushing a delete to disk in 'secure-delete' ** mode. It edits the segments within the database described by argument ** pStruct to remove the entries for term zTerm, rowid iRowid. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** has occurred. Any error code is also stored in the Fts5Index handle. */ -static void fts5FlushSecureDelete( +static int fts5FlushSecureDelete( Fts5Index *p, Fts5Structure *pStruct, const char *zTerm, @@ -245991,6 +249753,24 @@ static void fts5FlushSecureDelete( const int f = FTS5INDEX_QUERY_SKIPHASH; Fts5Iter *pIter = 0; /* Used to find term instance */ + /* If the version number has not been set to SECUREDELETE, do so now. */ + if( p->pConfig->iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ + Fts5Config *pConfig = p->pConfig; + sqlite3_stmt *pStmt = 0; + fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf( + "REPLACE INTO %Q.'%q_config' VALUES ('version', %d)", + pConfig->zDb, pConfig->zName, FTS5_CURRENT_VERSION_SECUREDELETE + )); + if( p->rc==SQLITE_OK ){ + int rc; + sqlite3_step(pStmt); + rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK ) p->rc = rc; + pConfig->iCookie++; + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); if( fts5MultiIterEof(p, pIter)==0 ){ i64 iThis = fts5MultiIterRowid(pIter); @@ -246008,6 +249788,7 @@ static void fts5FlushSecureDelete( } fts5MultiIterFree(pIter); + return p->rc; } @@ -246091,8 +249872,9 @@ static void fts5FlushOneHash(Fts5Index *p){ ** using fts5FlushSecureDelete(). */ if( bSecureDelete ){ if( eDetail==FTS5_DETAIL_NONE ){ - if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ iOff++; continue; @@ -246251,7 +250034,7 @@ static Fts5Structure *fts5IndexOptimizeStruct( Fts5Structure *pStruct ){ Fts5Structure *pNew = 0; - sqlite3_int64 nByte = sizeof(Fts5Structure); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(1); int nSeg = pStruct->nSegment; int i; @@ -246280,7 +250063,8 @@ static Fts5Structure *fts5IndexOptimizeStruct( assert( pStruct->aLevel[i].nMerge<=nThis ); } - nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel); + nByte += (((i64)pStruct->nLevel)+1) * sizeof(Fts5StructureLevel); + assert( nByte==SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -246721,6 +250505,387 @@ static void fts5MergePrefixLists( *p1 = out; } + +/* +** Iterate through a range of entries in the FTS index, invoking the xVisit +** callback for each of them. +** +** Parameter pToken points to an nToken buffer containing an FTS index term +** (i.e. a document term with the preceding 1 byte index identifier - +** FTS5_MAIN_PREFIX or similar). If bPrefix is true, then the call visits +** all entries for terms that have pToken/nToken as a prefix. If bPrefix +** is false, then only entries with pToken/nToken as the entire key are +** visited. +** +** If the current table is a tokendata=1 table, then if bPrefix is true then +** each index term is treated separately. However, if bPrefix is false, then +** all index terms corresponding to pToken/nToken are collapsed into a single +** term before the callback is invoked. +** +** The callback invoked for each entry visited is specified by paramter xVisit. +** Each time it is invoked, it is passed a pointer to the Fts5Index object, +** a copy of the 7th paramter to this function (pCtx) and a pointer to the +** iterator that indicates the current entry. If the current entry is the +** first with a new term (i.e. different from that of the previous entry, +** including the very first term), then the final two parameters are passed +** a pointer to the term and its size in bytes, respectively. If the current +** entry is not the first associated with its term, these two parameters +** are passed 0. +** +** If parameter pColset is not NULL, then it is used to filter entries before +** the callback is invoked. +*/ +static int fts5VisitEntries( + Fts5Index *p, /* Fts5 index object */ + Fts5Colset *pColset, /* Columns filter to apply, or NULL */ + u8 *pToken, /* Buffer containing token */ + int nToken, /* Size of buffer pToken in bytes */ + int bPrefix, /* True for a prefix scan */ + void (*xVisit)(Fts5Index*, void *pCtx, Fts5Iter *pIter, const u8*, int), + void *pCtx /* Passed as second argument to xVisit() */ +){ + const int flags = (bPrefix ? FTS5INDEX_QUERY_SCAN : 0) + | FTS5INDEX_QUERY_SKIPEMPTY + | FTS5INDEX_QUERY_NOOUTPUT; + Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ + int bNewTerm = 1; + Fts5Structure *pStruct = fts5StructureRead(p); + + fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); + fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &bNewTerm) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + int nNew = 0; + const u8 *pNew = 0; + + p1->xSetOutputs(p1, pSeg); + if( p->rc ) break; + + if( bNewTerm ){ + nNew = pSeg->term.n; + pNew = pSeg->term.p; + if( nNewrc; +} + + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits (so all iRowid fields are the same). +** Or, for an iterator used by an "ORDER BY rank" query, it accumulates an +** array of these for the entire query (in which case iRowid fields may take +** a variety of values). +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +** +** iRowid: +** Rowid for the current entry. +** +** iPos: +** Position of current entry within row. In the usual ((iCol<<32)+iOff) +** format (e.g. see macros FTS5_POS2COLUMN() and FTS5_POS2OFFSET()). +** +** iIter: +** If the Fts5TokenDataIter iterator that the entry is part of is +** actually an iterator (i.e. with nIter>0, not just a container for +** Fts5TokenDataMap structures), then this variable is an index into +** the apIter[] array. The corresponding term is that which the iterator +** at apIter[iIter] currently points to. +** +** Or, if the Fts5TokenDataIter iterator is just a container object +** (nIter==0), then iIter is an index into the term.p[] buffer where +** the term is stored. +** +** nByte: +** In the case where iIter is an index into term.p[], this variable +** is the size of the term in bytes. If iIter is an index into apIter[], +** this variable is unused. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ + int nByte; /* Length of token in bytes (or 0) */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +** +** This object serves two purposes. The first is as a container for an array +** of Fts5TokenDataMap structures, which are used to find the token required +** when the xInstToken() API is used. This is done by the nMapAlloc, nMap and +** aMap[] variables. +*/ +struct Fts5TokenDataIter { + int nMapAlloc; /* Allocated size of aMap[] in entries */ + int nMap; /* Number of valid entries in aMap[] */ + Fts5TokenDataMap *aMap; /* Array of (rowid+pos -> token) mappings */ + + /* The following are used for prefix-queries only. */ + Fts5Buffer terms; + + /* The following are used for other full-token tokendata queries only. */ + int nIter; + int nIterAlloc; + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[FLEXARRAY]; +}; + +/* Size in bytes of an Fts5TokenDataIter object holding up to N iterators */ +#define SZ_FTS5TOKENDATAITER(N) \ + (offsetof(Fts5TokenDataIter,apIter) + (N)*sizeof(Fts5Iter)) + +/* +** The two input arrays - a1[] and a2[] - are in sorted order. This function +** merges the two arrays together and writes the result to output array +** aOut[]. aOut[] is guaranteed to be large enough to hold the result. +** +** Duplicate entries are copied into the output. So the size of the output +** array is always (n1+n2) entries. +*/ +static void fts5TokendataMerge( + Fts5TokenDataMap *a1, int n1, /* Input array 1 */ + Fts5TokenDataMap *a2, int n2, /* Input array 2 */ + Fts5TokenDataMap *aOut /* Output array */ +){ + int i1 = 0; + int i2 = 0; + + assert( n1>=0 && n2>=0 ); + while( i1=n2 || (i1rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nAlloc = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nAlloc); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->aMap[pT->nMap].nByte = nByte; + pT->nMap++; + } +} + +/* +** Sort the contents of the pT->aMap[] array. +** +** The sorting algorithm requires a malloc(). If this fails, an error code +** is left in Fts5Index.rc before returning. +*/ +static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ + Fts5TokenDataMap *aTmp = 0; + int nByte = pT->nMap * sizeof(Fts5TokenDataMap); + + aTmp = (Fts5TokenDataMap*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( aTmp ){ + Fts5TokenDataMap *a1 = pT->aMap; + Fts5TokenDataMap *a2 = aTmp; + i64 nHalf; + + for(nHalf=1; nHalfnMap; nHalf=nHalf*2){ + int i1; + for(i1=0; i1nMap; i1+=(nHalf*2)){ + int n1 = MIN(nHalf, pT->nMap-i1); + int n2 = MIN(nHalf, pT->nMap-i1-n1); + fts5TokendataMerge(&a1[i1], n1, &a1[i1+n1], n2, &a2[i1]); + } + SWAPVAL(Fts5TokenDataMap*, a1, a2); + } + + if( a1!=pT->aMap ){ + memcpy(pT->aMap, a1, pT->nMap*sizeof(Fts5TokenDataMap)); + } + sqlite3_free(aTmp); + +#ifdef SQLITE_DEBUG + { + int ii; + for(ii=1; iinMap; ii++){ + Fts5TokenDataMap *p1 = &pT->aMap[ii-1]; + Fts5TokenDataMap *p2 = &pT->aMap[ii]; + assert( p1->iRowidiRowid + || (p1->iRowid==p2->iRowid && p1->iPos<=p2->iPos) + ); + } + } +#endif + } +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + fts5BufferFree(&pSet->terms); + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + + +/* +** fts5VisitEntries() context object used by fts5SetupPrefixIterTokendata() +** to pass data to prefixIterSetupTokendataCb(). +*/ +typedef struct TokendataSetupCtx TokendataSetupCtx; +struct TokendataSetupCtx { + Fts5TokenDataIter *pT; /* Object being populated with mappings */ + int iTermOff; /* Offset of current term in terms.p[] */ + int nTermByte; /* Size of current term in bytes */ +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIterTokendata(). This +** callback adds an entry to the Fts5TokenDataIter.aMap[] array for each +** position in the current position-list. It doesn't matter that some of +** these may be out of order - they will be sorted later. +*/ +static void prefixIterSetupTokendataCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + TokendataSetupCtx *pSetup = (TokendataSetupCtx*)pCtx; + int iPosOff = 0; + i64 iPos = 0; + + if( pNew ){ + pSetup->nTermByte = nNew-1; + pSetup->iTermOff = pSetup->pT->terms.n; + fts5BufferAppendBlob(&p->rc, &pSetup->pT->terms, nNew-1, pNew+1); + } + + while( 0==sqlite3Fts5PoslistNext64( + p1->base.pData, p1->base.nData, &iPosOff, &iPos + ) ){ + fts5TokendataIterAppendMap(p, + pSetup->pT, pSetup->iTermOff, pSetup->nTermByte, p1->base.iRowid, iPos + ); + } +} + + +/* +** Context object passed by fts5SetupPrefixIter() to fts5VisitEntries(). +*/ +typedef struct PrefixSetupCtx PrefixSetupCtx; +struct PrefixSetupCtx { + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); + void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); + i64 iLastRowid; + int nMerge; + Fts5Buffer *aBuf; + int nBuf; + Fts5Buffer doclist; + TokendataSetupCtx *pTokendata; +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIter() +*/ +static void prefixIterSetupCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + PrefixSetupCtx *pSetup = (PrefixSetupCtx*)pCtx; + const int nMerge = pSetup->nMerge; + + if( p1->base.nData>0 ){ + if( p1->base.iRowid<=pSetup->iLastRowid && pSetup->doclist.n>0 ){ + int i; + for(i=0; p->rc==SQLITE_OK && pSetup->doclist.n; i++){ + int i1 = i*nMerge; + int iStore; + assert( i1+nMerge<=pSetup->nBuf ); + for(iStore=i1; iStoreaBuf[iStore].n==0 ){ + fts5BufferSwap(&pSetup->doclist, &pSetup->aBuf[iStore]); + fts5BufferZero(&pSetup->doclist); + break; + } + } + if( iStore==i1+nMerge ){ + pSetup->xMerge(p, &pSetup->doclist, nMerge, &pSetup->aBuf[i1]); + for(iStore=i1; iStoreaBuf[iStore]); + } + } + } + pSetup->iLastRowid = 0; + } + + pSetup->xAppend( + p, (u64)p1->base.iRowid-(u64)pSetup->iLastRowid, p1, &pSetup->doclist + ); + pSetup->iLastRowid = p1->base.iRowid; + } + + if( pSetup->pTokendata ){ + prefixIterSetupTokendataCb(p, (void*)pSetup->pTokendata, p1, pNew, nNew); + } +} + static void fts5SetupPrefixIter( Fts5Index *p, /* Index to read from */ int bDesc, /* True for "ORDER BY rowid DESC" */ @@ -246731,38 +250896,41 @@ static void fts5SetupPrefixIter( Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; - Fts5Buffer *aBuf; - int nBuf = 32; - int nMerge = 1; + PrefixSetupCtx s; + TokendataSetupCtx s2; + + memset(&s, 0, sizeof(s)); + memset(&s2, 0, sizeof(s2)); + + s.nMerge = 1; + s.iLastRowid = 0; + s.nBuf = 32; + if( iIdx==0 + && p->pConfig->eDetail==FTS5_DETAIL_FULL + && p->pConfig->bPrefixInsttoken + ){ + s.pTokendata = &s2; + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, SZ_FTS5TOKENDATAITER(1)); + } - void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); - void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ - xMerge = fts5MergeRowidLists; - xAppend = fts5AppendRowid; + s.xMerge = fts5MergeRowidLists; + s.xAppend = fts5AppendRowid; }else{ - nMerge = FTS5_MERGE_NLIST-1; - nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ - xMerge = fts5MergePrefixLists; - xAppend = fts5AppendPoslist; + s.nMerge = FTS5_MERGE_NLIST-1; + s.nBuf = s.nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ + s.xMerge = fts5MergePrefixLists; + s.xAppend = fts5AppendPoslist; } - aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); + s.aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*s.nBuf); pStruct = fts5StructureRead(p); - assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); + assert( p->rc!=SQLITE_OK || (s.aBuf && pStruct) ); if( p->rc==SQLITE_OK ){ - const int flags = FTS5INDEX_QUERY_SCAN - | FTS5INDEX_QUERY_SKIPEMPTY - | FTS5INDEX_QUERY_NOOUTPUT; + void *pCtx = (void*)&s; int i; - i64 iLastRowid = 0; - Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ Fts5Data *pData; - Fts5Buffer doclist; - int bNewTerm = 1; - - memset(&doclist, 0, sizeof(doclist)); /* If iIdx is non-zero, then it is the number of a prefix-index for ** prefixes 1 character longer than the prefix being queried for. That @@ -246770,94 +250938,46 @@ static void fts5SetupPrefixIter( ** corresponding to the prefix itself. That one is extracted from the ** main term index here. */ if( iIdx!=0 ){ - int dummy = 0; - const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; pToken[0] = FTS5_MAIN_PREFIX; - fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); - for(; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &dummy) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - p1->xSetOutputs(p1, pSeg); - if( p1->base.nData ){ - xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - } - fts5MultiIterFree(p1); + fts5VisitEntries(p, pColset, pToken, nToken, 0, prefixIterSetupCb, pCtx); } pToken[0] = FTS5_MAIN_PREFIX + iIdx; - fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); + fts5VisitEntries(p, pColset, pToken, nToken, 1, prefixIterSetupCb, pCtx); - for( /* no-op */ ; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &bNewTerm) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - int nTerm = pSeg->term.n; - const u8 *pTerm = pSeg->term.p; - p1->xSetOutputs(p1, pSeg); - - assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 ); - if( bNewTerm ){ - if( nTermbase.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ - for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - int i1 = i*nMerge; - int iStore; - assert( i1+nMerge<=nBuf ); - for(iStore=i1; iStorebase.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - - assert( (nBuf%nMerge)==0 ); - for(i=0; irc==SQLITE_OK ){ - xMerge(p, &doclist, nMerge, &aBuf[i]); + s.xMerge(p, &s.doclist, s.nMerge, &s.aBuf[i]); } - for(iFree=i; iFreerc!=SQLITE_OK ); if( pData ){ pData->p = (u8*)&pData[1]; - pData->nn = pData->szLeaf = doclist.n; - if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n); + pData->nn = pData->szLeaf = s.doclist.n; + if( s.doclist.n ) memcpy(pData->p, s.doclist.p, s.doclist.n); fts5MultiIterNew2(p, pData, bDesc, ppIter); } - fts5BufferFree(&doclist); + + assert( (*ppIter)!=0 || p->rc!=SQLITE_OK ); + if( p->rc==SQLITE_OK && s.pTokendata ){ + fts5TokendataIterSortMap(p, s2.pT); + (*ppIter)->pTokenDataIter = s2.pT; + s2.pT = 0; + } } + fts5TokendataIterDelete(s2.pT); + fts5BufferFree(&s.doclist); fts5StructureRelease(pStruct); - sqlite3_free(aBuf); + sqlite3_free(s.aBuf); } @@ -246895,7 +251015,7 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ static int sqlite3Fts5IndexSync(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); return fts5IndexReturn(p); } @@ -246906,11 +251026,10 @@ static int sqlite3Fts5IndexSync(Fts5Index *p){ ** records must be invalidated. */ static int sqlite3Fts5IndexRollback(Fts5Index *p){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); fts5IndexDiscardData(p); fts5StructureInvalidate(p); - /* assert( p->rc==SQLITE_OK ); */ - return SQLITE_OK; + return fts5IndexReturn(p); } /* @@ -246919,15 +251038,17 @@ static int sqlite3Fts5IndexRollback(Fts5Index *p){ ** and the initial version of the "averages" record (a zero-byte blob). */ static int sqlite3Fts5IndexReinit(Fts5Index *p){ - Fts5Structure s; + Fts5Structure *pTmp; + u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; fts5StructureInvalidate(p); fts5IndexDiscardData(p); - memset(&s, 0, sizeof(Fts5Structure)); + pTmp = (Fts5Structure*)tmpSpace; + memset(pTmp, 0, SZ_FTS5STRUCTURE(1)); if( p->pConfig->bContentlessDelete ){ - s.nOriginCntr = 1; + pTmp->nOriginCntr = 1; } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); - fts5StructureWrite(p, &s); + fts5StructureWrite(p, pTmp); return fts5IndexReturn(p); } @@ -247111,37 +251232,15 @@ static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ pSeg->pLeaf = 0; } -/* -** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an -** array of these for each row it visits. Or, for an iterator used by an -** "ORDER BY rank" query, it accumulates an array of these for the entire -** query. -** -** Each instance in the array indicates the iterator (and therefore term) -** associated with position iPos of rowid iRowid. This is used by the -** xInstToken() API. -*/ -struct Fts5TokenDataMap { - i64 iRowid; /* Row this token is located in */ - i64 iPos; /* Position of token */ - int iIter; /* Iterator token was read from */ -}; - -/* -** An object used to supplement Fts5Iter for tokendata=1 iterators. -*/ -struct Fts5TokenDataIter { - int nIter; - int nIterAlloc; - - int nMap; - int nMapAlloc; - Fts5TokenDataMap *aMap; - - Fts5PoslistReader *aPoslistReader; - int *aPoslistToIter; - Fts5Iter *apIter[1]; -}; +static void fts5IterClose(Fts5IndexIter *pIndexIter){ + if( pIndexIter ){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); + fts5MultiIterFree(pIter); + fts5IndexCloseReader(pIndex); + } +} /* ** This function appends iterator pAppend to Fts5TokenDataIter pIn and @@ -247157,7 +251256,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( if( p->rc==SQLITE_OK ){ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; - int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + int nByte = SZ_FTS5TOKENDATAITER(nAlloc+1); Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); if( pNew==0 ){ @@ -247170,7 +251269,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( } } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + fts5IterClose((Fts5IndexIter*)pAppend); }else{ pRet->apIter[pRet->nIter++] = pAppend; } @@ -247179,54 +251278,6 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( return pRet; } -/* -** Delete an Fts5TokenDataIter structure and its contents. -*/ -static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ - if( pSet ){ - int ii; - for(ii=0; iinIter; ii++){ - fts5MultiIterFree(pSet->apIter[ii]); - } - sqlite3_free(pSet->aPoslistReader); - sqlite3_free(pSet->aMap); - sqlite3_free(pSet); - } -} - -/* -** Append a mapping to the token-map belonging to object pT. -*/ -static void fts5TokendataIterAppendMap( - Fts5Index *p, - Fts5TokenDataIter *pT, - int iIter, - i64 iRowid, - i64 iPos -){ - if( p->rc==SQLITE_OK ){ - if( pT->nMap==pT->nMapAlloc ){ - int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; - int nByte = nNew * sizeof(Fts5TokenDataMap); - Fts5TokenDataMap *aNew; - - aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); - if( aNew==0 ){ - p->rc = SQLITE_NOMEM; - return; - } - - pT->aMap = aNew; - pT->nMapAlloc = nNew; - } - - pT->aMap[pT->nMap].iRowid = iRowid; - pT->aMap[pT->nMap].iPos = iPos; - pT->aMap[pT->nMap].iIter = iIter; - pT->nMap++; - } -} - /* ** The iterator passed as the only argument must be a tokendata=1 iterator ** (pIter->pTokenDataIter!=0). This function sets the iterator output @@ -247267,7 +251318,7 @@ static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ pIter->base.iRowid = iRowid; if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ - fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, 0, iRowid, -1); }else if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ int nReader = 0; @@ -247431,7 +251482,7 @@ static Fts5Iter *fts5SetupTokendataIter( fts5BufferSet(&p->rc, &bSeek, nToken, pToken); } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247496,7 +251547,7 @@ static Fts5Iter *fts5SetupTokendataIter( ** not point to any terms that match the query. So delete it and break ** out of the loop - all required iterators have been collected. */ if( pSmall==0 ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247520,6 +251571,7 @@ static Fts5Iter *fts5SetupTokendataIter( pRet = fts5MultiIterAlloc(p, 0); } if( pRet ){ + pRet->nSeg = 0; pRet->pTokenDataIter = pSet; if( pSet ){ fts5IterSetOutputsTokendata(pRet); @@ -247535,7 +251587,6 @@ static Fts5Iter *fts5SetupTokendataIter( return pRet; } - /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -247558,8 +251609,14 @@ static int sqlite3Fts5IndexQuery( int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ int bTokendata = pConfig->bTokendata; + assert( buf.p!=0 ); if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + /* The NOTOKENDATA flag is set when each token in a tokendata=1 table + ** should be treated individually, instead of merging all those with + ** a common prefix into a single entry. This is used, for example, by + ** queries performed as part of an integrity-check, or by the fts5vocab + ** module. */ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ bTokendata = 0; } @@ -247590,7 +251647,7 @@ static int sqlite3Fts5IndexQuery( } if( bTokendata && iIdx==0 ){ - buf.p[0] = '0'; + buf.p[0] = FTS5_MAIN_PREFIX; pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ @@ -247603,7 +251660,7 @@ static int sqlite3Fts5IndexQuery( fts5StructureRelease(pStruct); } }else{ - /* Scan multiple terms in the main index */ + /* Scan multiple terms in the main index for a prefix query. */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); if( pRet==0 ){ @@ -247619,9 +251676,9 @@ static int sqlite3Fts5IndexQuery( } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pRet); + fts5IterClose((Fts5IndexIter*)pRet); pRet = 0; - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } *ppIter = (Fts5IndexIter*)pRet; @@ -247639,7 +251696,8 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 0, 0); }else{ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); @@ -247676,7 +251734,8 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 1, iMatch); }else{ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); @@ -247695,14 +251754,62 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** pIter is a prefix query. This function populates pIter->pTokenDataIter +** with an Fts5TokenDataIter object containing mappings for all rows +** matched by the query. +*/ +static int fts5SetupPrefixIterTokendata( + Fts5Iter *pIter, + const char *pToken, /* Token prefix to search for */ + int nToken /* Size of pToken in bytes */ +){ + Fts5Index *p = pIter->pIndex; + Fts5Buffer token = {0, 0, 0}; + TokendataSetupCtx ctx; + + memset(&ctx, 0, sizeof(ctx)); + + fts5BufferGrow(&p->rc, &token, nToken+1); + assert( token.p!=0 || p->rc!=SQLITE_OK ); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); + + if( p->rc==SQLITE_OK ){ + + /* Fill in the token prefix to search for */ + token.p[0] = FTS5_MAIN_PREFIX; + memcpy(&token.p[1], pToken, nToken); + token.n = nToken+1; + + fts5VisitEntries( + p, 0, token.p, token.n, 1, prefixIterSetupTokendataCb, (void*)&ctx + ); + + fts5TokendataIterSortMap(p, ctx.pT); + } + + if( p->rc==SQLITE_OK ){ + pIter->pTokenDataIter = ctx.pT; + }else{ + fts5TokendataIterDelete(ctx.pT); + } + fts5BufferFree(&token); + + return fts5IndexReturn(p); +} + /* ** This is used by xInstToken() to access the token at offset iOff, column ** iCol of row iRowid. The token is returned via output variables *ppOut ** and *pnOut. The iterator passed as the first argument must be a tokendata=1 ** iterator (pIter->pTokenDataIter!=0). +** +** pToken/nToken: */ static int sqlite3Fts5IterToken( Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, i64 iRowid, int iCol, int iOff, @@ -247710,13 +251817,22 @@ static int sqlite3Fts5IterToken( ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; - Fts5TokenDataMap *aMap = pT->aMap; i64 iPos = (((i64)iCol)<<32) + iOff; - + Fts5TokenDataMap *aMap = 0; int i1 = 0; - int i2 = pT->nMap; + int i2 = 0; int iTest = 0; + assert( pT || (pToken && pIter->nSeg>0) ); + if( pT==0 ){ + int rc = fts5SetupPrefixIterTokendata(pIter, pToken, nToken); + if( rc!=SQLITE_OK ) return rc; + pT = pIter->pTokenDataIter; + } + + i2 = pT->nMap; + aMap = pT->aMap; + while( i2>i1 ){ iTest = (i1 + i2) / 2; @@ -247739,9 +251855,15 @@ static int sqlite3Fts5IterToken( } if( i2>i1 ){ - Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; - *ppOut = (const char*)pMap->aSeg[0].term.p+1; - *pnOut = pMap->aSeg[0].term.n-1; + if( pIter->nSeg==0 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + }else{ + Fts5TokenDataMap *p = &aMap[iTest]; + *ppOut = (const char*)&pT->terms.p[p->iIter]; + *pnOut = aMap[iTest].nByte; + } } return SQLITE_OK; @@ -247753,7 +251875,9 @@ static int sqlite3Fts5IterToken( */ static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter && pIter->pTokenDataIter ){ + if( pIter && pIter->pTokenDataIter + && (pIter->nSeg==0 || pIter->pIndex->pConfig->eDetail!=FTS5_DETAIL_FULL) + ){ pIter->pTokenDataIter->nMap = 0; } } @@ -247773,17 +251897,30 @@ static int sqlite3Fts5IndexIterWriteTokendata( Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; Fts5Index *p = pIter->pIndex; - int ii; + i64 iPos = (((i64)iCol)<<32) + iOff; assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); - assert( pIter->pTokenDataIter ); - - for(ii=0; iinIter; ii++){ - Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; - if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; - } - if( iinIter ){ - fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + assert( pIter->pTokenDataIter || pIter->nSeg>0 ); + if( pIter->nSeg>0 ){ + /* This is a prefix term iterator. */ + if( pT==0 ){ + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); + pIter->pTokenDataIter = pT; + } + if( pT ){ + fts5TokendataIterAppendMap(p, pT, pT->terms.n, nToken, iRowid, iPos); + fts5BufferAppendBlob(&p->rc, &pT->terms, nToken, (const u8*)pToken); + } + }else{ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, 0, iRowid, iPos); + } } return fts5IndexReturn(p); } @@ -247793,11 +251930,9 @@ static int sqlite3Fts5IndexIterWriteTokendata( */ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ - Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - Fts5Index *pIndex = pIter->pIndex; - fts5TokendataIterDelete(pIter->pTokenDataIter); - fts5MultiIterFree(pIter); - sqlite3Fts5IndexCloseReader(pIndex); + Fts5Index *pIndex = ((Fts5Iter*)pIndexIter)->pIndex; + fts5IterClose(pIndexIter); + fts5IndexReturn(pIndex); } } @@ -248327,7 +252462,7 @@ static int fts5QueryCksum( rc = sqlite3Fts5IterNext(pIter); } } - sqlite3Fts5IterClose(pIter); + fts5IterClose(pIter); *pCksum = cksum; return rc; @@ -248804,7 +252939,7 @@ static void fts5DecodeRowid( #if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */ + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid components */ fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ @@ -249050,7 +253185,7 @@ static void fts5DecodeFunction( ** buffer overreads even if the record is corrupt. */ n = sqlite3_value_bytes(apVal[1]); aBlob = sqlite3_value_blob(apVal[1]); - nSpace = n + FTS5_DATA_ZERO_PADDING; + nSpace = ((i64)n) + FTS5_DATA_ZERO_PADDING; a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace); if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); @@ -249336,7 +253471,7 @@ static int fts5structConnectMethod( /* ** We must have a single struct=? constraint that will be passed through -** into the xFilter method. If there is no valid stmt=? constraint, +** into the xFilter method. If there is no valid struct=? constraint, ** then return an SQLITE_CONSTRAINT error. */ static int fts5structBestIndexMethod( @@ -249678,8 +253813,18 @@ struct Fts5Global { Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */ Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */ Fts5Cursor *pCsr; /* First in list of all open cursors */ + u32 aLocaleHdr[4]; }; +/* +** Size of header on fts5_locale() values. And macro to access a buffer +** containing a copy of the header from an Fts5Config pointer. +*/ +#define FTS5_LOCALE_HDR_SIZE ((int)sizeof( ((Fts5Global*)0)->aLocaleHdr )) +#define FTS5_LOCALE_HDR(pConfig) ((const u8*)(pConfig->pGlobal->aLocaleHdr)) + +#define FTS5_INSTTOKEN_SUBTYPE 73 + /* ** Each auxiliary function registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part @@ -249698,11 +253843,28 @@ struct Fts5Auxiliary { ** Each tokenizer module registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part ** of the Fts5Global.pTok list. +** +** bV2Native: +** True if the tokenizer was registered using xCreateTokenizer_v2(), false +** for xCreateTokenizer(). If this variable is true, then x2 is populated +** with the routines as supplied by the caller and x1 contains synthesized +** wrapper routines. In this case the user-data pointer passed to +** x1.xCreate should be a pointer to the Fts5TokenizerModule structure, +** not a copy of pUserData. +** +** Of course, if bV2Native is false, then x1 contains the real routines and +** x2 the synthesized ones. In this case a pointer to the Fts5TokenizerModule +** object should be passed to x2.xCreate. +** +** The synthesized wrapper routines are necessary for xFindTokenizer(_v2) +** calls. */ struct Fts5TokenizerModule { char *zName; /* Name of tokenizer */ void *pUserData; /* User pointer passed to xCreate() */ - fts5_tokenizer x; /* Tokenizer functions */ + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ void (*xDestroy)(void*); /* Destructor function */ Fts5TokenizerModule *pNext; /* Next registered tokenizer module */ }; @@ -249738,9 +253900,11 @@ struct Fts5Sorter { i64 iRowid; /* Current rowid */ const u8 *aPoslist; /* Position lists for current row */ int nIdx; /* Number of entries in aIdx[] */ - int aIdx[1]; /* Offsets into aPoslist for current row */ + int aIdx[FLEXARRAY]; /* Offsets into aPoslist for current row */ }; +/* Size (int bytes) of an Fts5Sorter object with N indexes */ +#define SZ_FTS5SORTER(N) (offsetof(Fts5Sorter,nIdx)+((N+2)/2)*sizeof(i64)) /* ** Virtual-table cursor object. @@ -249790,7 +253954,7 @@ struct Fts5Cursor { Fts5Auxiliary *pAux; /* Currently executing extension function */ Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */ - /* Cache used by auxiliary functions xInst() and xInstCount() */ + /* Cache used by auxiliary API functions xInst() and xInstCount() */ Fts5PoslistReader *aInstIter; /* One for each phrase */ int nInstAlloc; /* Size of aInst[] array (entries / 3) */ int nInstCount; /* Number of phrase instances */ @@ -249901,10 +254065,16 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ #endif /* -** Return true if pTab is a contentless table. +** Return true if pTab is a contentless table. If parameter bIncludeUnindexed +** is true, this includes contentless tables that store UNINDEXED columns +** only. */ -static int fts5IsContentless(Fts5FullTable *pTab){ - return pTab->p.pConfig->eContent==FTS5_CONTENT_NONE; +static int fts5IsContentless(Fts5FullTable *pTab, int bIncludeUnindexed){ + int eContent = pTab->p.pConfig->eContent; + return ( + eContent==FTS5_CONTENT_NONE + || (bIncludeUnindexed && eContent==FTS5_CONTENT_UNINDEXED) + ); } /* @@ -249972,8 +254142,12 @@ static int fts5InitVtab( assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 ); } if( rc==SQLITE_OK ){ + pConfig->pzErrmsg = pzErr; pTab->p.pConfig = pConfig; pTab->pGlobal = pGlobal; + if( bCreate || sqlite3Fts5TokenizerPreload(&pConfig->t) ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } } /* Open the index sub-system */ @@ -249995,11 +254169,7 @@ static int fts5InitVtab( /* Load the initial configuration */ if( rc==SQLITE_OK ){ - assert( pConfig->pzErrmsg==0 ); - pConfig->pzErrmsg = pzErr; - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); - sqlite3Fts5IndexRollback(pTab->p.pIndex); - pConfig->pzErrmsg = 0; + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie-1); } if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ @@ -250009,6 +254179,7 @@ static int fts5InitVtab( rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); } + if( pConfig ) pConfig->pzErrmsg = 0; if( rc!=SQLITE_OK ){ fts5FreeVtab(pTab); pTab = 0; @@ -250076,10 +254247,10 @@ static int fts5UsePatternMatch( ){ assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB ); assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE ); - if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ + if( pConfig->t.ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ return 1; } - if( pConfig->ePattern==FTS5_PATTERN_LIKE + if( pConfig->t.ePattern==FTS5_PATTERN_LIKE && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB) ){ return 1; @@ -250126,10 +254297,10 @@ static int fts5UsePatternMatch( ** This function ensures that there is at most one "r" or "=". And that if ** there exists an "=" then there is no "<" or ">". ** -** Costs are assigned as follows: +** If an unusable MATCH operator is present in the WHERE clause, then +** SQLITE_CONSTRAINT is returned. ** -** a) If an unusable MATCH operator is present in the WHERE clause, the -** cost is unconditionally set to 1e50 (a really big number). +** Costs are assigned as follows: ** ** a) If a MATCH operator is present, the cost depends on the other ** constraints also present. As follows: @@ -250162,7 +254333,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ int bSeenEq = 0; int bSeenGt = 0; int bSeenLt = 0; - int bSeenMatch = 0; + int nSeenMatch = 0; int bSeenRank = 0; @@ -250193,18 +254364,16 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* A MATCH operator or equivalent */ if( p->usable==0 || iCol<0 ){ /* As there exists an unusable MATCH constraint this is an - ** unusable plan. Set a prohibitively high cost. */ - pInfo->estimatedCost = 1e50; - assert( iIdxStr < pInfo->nConstraint*6 + 1 ); + ** unusable plan. Return SQLITE_CONSTRAINT. */ idxStr[iIdxStr] = 0; - return SQLITE_OK; + return SQLITE_CONSTRAINT; }else{ if( iCol==nCol+1 ){ if( bSeenRank ) continue; idxStr[iIdxStr++] = 'r'; bSeenRank = 1; - }else if( iCol>=0 ){ - bSeenMatch = 1; + }else{ + nSeenMatch++; idxStr[iIdxStr++] = 'M'; sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); idxStr += strlen(&idxStr[iIdxStr]); @@ -250221,6 +254390,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ idxStr += strlen(&idxStr[iIdxStr]); pInfo->aConstraintUsage[i].argvIndex = ++iCons; assert( idxStr[iIdxStr]=='\0' ); + nSeenMatch++; }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){ idxStr[iIdxStr++] = '='; bSeenEq = 1; @@ -250257,7 +254427,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; - if( iSort==(pConfig->nCol+1) && bSeenMatch ){ + if( iSort==(pConfig->nCol+1) && nSeenMatch>0 ){ idxFlags |= FTS5_BI_ORDER_RANK; }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; @@ -250272,14 +254442,17 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* Calculate the estimated cost based on the flags set in idxFlags. */ if( bSeenEq ){ - pInfo->estimatedCost = bSeenMatch ? 100.0 : 10.0; - if( bSeenMatch==0 ) fts5SetUniqueFlag(pInfo); + pInfo->estimatedCost = nSeenMatch ? 1000.0 : 10.0; + if( nSeenMatch==0 ) fts5SetUniqueFlag(pInfo); }else if( bSeenLt && bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 500.0 : 250000.0; + pInfo->estimatedCost = nSeenMatch ? 5000.0 : 250000.0; }else if( bSeenLt || bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 750.0 : 750000.0; + pInfo->estimatedCost = nSeenMatch ? 7500.0 : 750000.0; }else{ - pInfo->estimatedCost = bSeenMatch ? 1000.0 : 1000000.0; + pInfo->estimatedCost = nSeenMatch ? 10000.0 : 1000000.0; + } + for(i=1; iestimatedCost *= 0.4; } pInfo->idxNum = idxFlags; @@ -250555,6 +254728,7 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ } }else{ rc = SQLITE_OK; + CsrFlagSet(pCsr, FTS5CSR_REQUIRE_DOCSIZE); } break; } @@ -250584,7 +254758,7 @@ static int fts5PrepareStatement( rc = sqlite3_prepare_v3(pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT, &pRet, 0); if( rc!=SQLITE_OK ){ - *pConfig->pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(pConfig->db)); + sqlite3Fts5ConfigErrmsg(pConfig, "%s", sqlite3_errmsg(pConfig->db)); } sqlite3_free(zSql); } @@ -250608,7 +254782,7 @@ static int fts5CursorFirstSorted( const char *zRankArgs = pCsr->zRankArgs; nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr); - nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1); + nByte = SZ_FTS5SORTER(nPhrase); pSorter = (Fts5Sorter*)sqlite3_malloc64(nByte); if( pSorter==0 ) return SQLITE_NOMEM; memset(pSorter, 0, (size_t)nByte); @@ -250808,6 +254982,145 @@ static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){ return iDefault; } +/* +** Set the error message on the virtual table passed as the first argument. +*/ +static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ + va_list ap; /* ... printf arguments */ + va_start(ap, zFormat); + sqlite3_free(p->p.base.zErrMsg); + p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + +/* +** Arrange for subsequent calls to sqlite3Fts5Tokenize() to use the locale +** specified by pLocale/nLocale. The buffer indicated by pLocale must remain +** valid until after the final call to sqlite3Fts5Tokenize() that will use +** the locale. +*/ +static void sqlite3Fts5SetLocale( + Fts5Config *pConfig, + const char *zLocale, + int nLocale +){ + Fts5TokenizerConfig *pT = &pConfig->t; + pT->pLocale = zLocale; + pT->nLocale = nLocale; +} + +/* +** Clear any locale configured by an earlier call to sqlite3Fts5SetLocale(). +*/ +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig){ + sqlite3Fts5SetLocale(pConfig, 0, 0); +} + +/* +** Return true if the value passed as the only argument is an +** fts5_locale() value. +*/ +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal){ + int ret = 0; + if( sqlite3_value_type(pVal)==SQLITE_BLOB ){ + /* Call sqlite3_value_bytes() after sqlite3_value_blob() in this case. + ** If the blob was created using zeroblob(), then sqlite3_value_blob() + ** may call malloc(). If this malloc() fails, then the values returned + ** by both value_blob() and value_bytes() will be 0. If value_bytes() were + ** called first, then the NULL pointer returned by value_blob() might + ** be dereferenced. */ + const u8 *pBlob = sqlite3_value_blob(pVal); + int nBlob = sqlite3_value_bytes(pVal); + if( nBlob>FTS5_LOCALE_HDR_SIZE + && 0==memcmp(pBlob, FTS5_LOCALE_HDR(pConfig), FTS5_LOCALE_HDR_SIZE) + ){ + ret = 1; + } + } + return ret; +} + +/* +** Value pVal is guaranteed to be an fts5_locale() value, according to +** sqlite3Fts5IsLocaleValue(). This function extracts the text and locale +** from the value and returns them separately. +** +** If successful, SQLITE_OK is returned and (*ppText) and (*ppLoc) set +** to point to buffers containing the text and locale, as utf-8, +** respectively. In this case output parameters (*pnText) and (*pnLoc) are +** set to the sizes in bytes of these two buffers. +** +** Or, if an error occurs, then an SQLite error code is returned. The final +** value of the four output parameters is undefined in this case. +*/ +static int sqlite3Fts5DecodeLocaleValue( + sqlite3_value *pVal, + const char **ppText, + int *pnText, + const char **ppLoc, + int *pnLoc +){ + const char *p = sqlite3_value_blob(pVal); + int n = sqlite3_value_bytes(pVal); + int nLoc = 0; + + assert( sqlite3_value_type(pVal)==SQLITE_BLOB ); + assert( n>FTS5_LOCALE_HDR_SIZE ); + + for(nLoc=FTS5_LOCALE_HDR_SIZE; p[nLoc]; nLoc++){ + if( nLoc==(n-1) ){ + return SQLITE_MISMATCH; + } + } + *ppLoc = &p[FTS5_LOCALE_HDR_SIZE]; + *pnLoc = nLoc - FTS5_LOCALE_HDR_SIZE; + + *ppText = &p[nLoc+1]; + *pnText = n - nLoc - 1; + return SQLITE_OK; +} + +/* +** Argument pVal is the text of a full-text search expression. It may or +** may not have been wrapped by fts5_locale(). This function extracts +** the text of the expression, and sets output variable (*pzText) to +** point to a nul-terminated buffer containing the expression. +** +** If pVal was an fts5_locale() value, then sqlite3Fts5SetLocale() is called +** to set the tokenizer to use the specified locale. +** +** If output variable (*pbFreeAndReset) is set to true, then the caller +** is required to (a) call sqlite3Fts5ClearLocale() to reset the tokenizer +** locale, and (b) call sqlite3_free() to free (*pzText). +*/ +static int fts5ExtractExprText( + Fts5Config *pConfig, /* Fts5 configuration */ + sqlite3_value *pVal, /* Value to extract expression text from */ + char **pzText, /* OUT: nul-terminated buffer of text */ + int *pbFreeAndReset /* OUT: Free (*pzText) and clear locale */ +){ + int rc = SQLITE_OK; + + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + *pzText = sqlite3Fts5Mprintf(&rc, "%.*s", nText, pText); + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + } + *pbFreeAndReset = 1; + }else{ + *pzText = (char*)sqlite3_value_text(pVal); + *pbFreeAndReset = 0; + } + + return rc; +} + + /* ** This is the xFilter interface for the virtual table. See ** the virtual table xFilter method documentation for additional @@ -250838,17 +255151,12 @@ static int fts5FilterMethod( sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */ int iCol; /* Column on LHS of MATCH operator */ char **pzErrmsg = pConfig->pzErrmsg; + int bPrefixInsttoken = pConfig->bPrefixInsttoken; int i; int iIdxStr = 0; Fts5Expr *pExpr = 0; - if( pConfig->bLock ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "recursively defined fts5 content table" - ); - return SQLITE_ERROR; - } - + assert( pConfig->bLock==0 ); if( pCsr->ePlan ){ fts5FreeCursorComponents(pCsr); memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr)); @@ -250872,8 +255180,17 @@ static int fts5FilterMethod( pRank = apVal[i]; break; case 'M': { - const char *zText = (const char*)sqlite3_value_text(apVal[i]); + char *zText = 0; + int bFreeAndReset = 0; + int bInternal = 0; + + rc = fts5ExtractExprText(pConfig, apVal[i], &zText, &bFreeAndReset); + if( rc!=SQLITE_OK ) goto filter_out; if( zText==0 ) zText = ""; + if( sqlite3_value_subtype(apVal[i])==FTS5_INSTTOKEN_SUBTYPE ){ + pConfig->bPrefixInsttoken = 1; + } + iCol = 0; do{ iCol = iCol*10 + (idxStr[iIdxStr]-'0'); @@ -250885,7 +255202,7 @@ static int fts5FilterMethod( ** indicates that the MATCH expression is not a full text query, ** but a request for an internal parameter. */ rc = fts5SpecialMatch(pTab, pCsr, &zText[1]); - goto filter_out; + bInternal = 1; }else{ char **pzErr = &pTab->p.base.zErrMsg; rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr); @@ -250893,9 +255210,15 @@ static int fts5FilterMethod( rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); pExpr = 0; } - if( rc!=SQLITE_OK ) goto filter_out; } + if( bFreeAndReset ){ + sqlite3_free(zText); + sqlite3Fts5ClearLocale(pConfig); + } + + if( bInternal || rc!=SQLITE_OK ) goto filter_out; + break; } case 'L': @@ -250983,9 +255306,7 @@ static int fts5FilterMethod( } } }else if( pConfig->zContent==0 ){ - *pConfig->pzErrmsg = sqlite3_mprintf( - "%s: table does not support scanning", pConfig->zName - ); + fts5SetVtabError(pTab,"%s: table does not support scanning",pConfig->zName); rc = SQLITE_ERROR; }else{ /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup @@ -251009,6 +255330,7 @@ static int fts5FilterMethod( filter_out: sqlite3Fts5ExprFree(pExpr); pConfig->pzErrmsg = pzErrmsg; + pConfig->bPrefixInsttoken = bPrefixInsttoken; return rc; } @@ -251028,9 +255350,13 @@ static i64 fts5CursorRowid(Fts5Cursor *pCsr){ assert( pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE + || pCsr->ePlan==FTS5_PLAN_SCAN + || pCsr->ePlan==FTS5_PLAN_ROWID ); if( pCsr->pSorter ){ return pCsr->pSorter->iRowid; + }else if( pCsr->ePlan>=FTS5_PLAN_SCAN ){ + return sqlite3_column_int64(pCsr->pStmt, 0); }else{ return sqlite3Fts5ExprRowid(pCsr->pExpr); } @@ -251047,25 +255373,16 @@ static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ int ePlan = pCsr->ePlan; assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 ); - switch( ePlan ){ - case FTS5_PLAN_SPECIAL: - *pRowid = 0; - break; - - case FTS5_PLAN_SOURCE: - case FTS5_PLAN_MATCH: - case FTS5_PLAN_SORTED_MATCH: - *pRowid = fts5CursorRowid(pCsr); - break; - - default: - *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); - break; + if( ePlan==FTS5_PLAN_SPECIAL ){ + *pRowid = 0; + }else{ + *pRowid = fts5CursorRowid(pCsr); } return SQLITE_OK; } + /* ** If the cursor requires seeking (bSeekRequired flag is set), seek it. ** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise. @@ -251102,8 +255419,13 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ rc = sqlite3_reset(pCsr->pStmt); if( rc==SQLITE_OK ){ rc = FTS5_CORRUPT; + fts5SetVtabError((Fts5FullTable*)pTab, + "fts5: missing row %lld from content table %s", + fts5CursorRowid(pCsr), + pTab->pConfig->zContent + ); }else if( pTab->pConfig->pzErrmsg ){ - *pTab->pConfig->pzErrmsg = sqlite3_mprintf( + fts5SetVtabError((Fts5FullTable*)pTab, "%s", sqlite3_errmsg(pTab->pConfig->db) ); } @@ -251112,14 +255434,6 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ return rc; } -static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ - va_list ap; /* ... printf arguments */ - va_start(ap, zFormat); - assert( p->p.base.zErrMsg==0 ); - p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); - va_end(ap); -} - /* ** This function is called to handle an FTS INSERT command. In other words, ** an INSERT statement of the form: @@ -251157,7 +255471,7 @@ static int fts5SpecialInsert( } bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ - if( pConfig->eContent==FTS5_CONTENT_NONE ){ + if( fts5IsContentless(pTab, 1) ){ fts5SetVtabError(pTab, "'rebuild' may not be used with a contentless fts5 table" ); @@ -251213,7 +255527,7 @@ static int fts5SpecialDelete( int eType1 = sqlite3_value_type(apVal[1]); if( eType1==SQLITE_INTEGER ){ sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]); - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2]); + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2], 0); } return rc; } @@ -251226,7 +255540,7 @@ static void fts5StorageInsert( ){ int rc = *pRc; if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid); + rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, 0, apVal, piRowid); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid); @@ -251234,6 +255548,67 @@ static void fts5StorageInsert( *pRc = rc; } +/* +** +** This function is called when the user attempts an UPDATE on a contentless +** table. Parameter bRowidModified is true if the UPDATE statement modifies +** the rowid value. Parameter apVal[] contains the new values for each user +** defined column of the fts5 table. pConfig is the configuration object of the +** table being updated (guaranteed to be contentless). The contentless_delete=1 +** and contentless_unindexed=1 options may or may not be set. +** +** This function returns SQLITE_OK if the UPDATE can go ahead, or an SQLite +** error code if it cannot. In this case an error message is also loaded into +** pConfig. Output parameter (*pbContent) is set to true if the caller should +** update the %_content table only - not the FTS index or any other shadow +** table. This occurs when an UPDATE modifies only UNINDEXED columns of the +** table. +** +** An UPDATE may proceed if: +** +** * The only columns modified are UNINDEXED columns, or +** +** * The contentless_delete=1 option was specified and all of the indexed +** columns (not a subset) have been modified. +*/ +static int fts5ContentlessUpdate( + Fts5Config *pConfig, + sqlite3_value **apVal, + int bRowidModified, + int *pbContent +){ + int ii; + int bSeenIndex = 0; /* Have seen modified indexed column */ + int bSeenIndexNC = 0; /* Have seen unmodified indexed column */ + int rc = SQLITE_OK; + + for(ii=0; iinCol; ii++){ + if( pConfig->abUnindexed[ii]==0 ){ + if( sqlite3_value_nochange(apVal[ii]) ){ + bSeenIndexNC++; + }else{ + bSeenIndex++; + } + } + } + + if( bSeenIndex==0 && bRowidModified==0 ){ + *pbContent = 1; + }else{ + if( bSeenIndexNC || pConfig->bContentlessDelete==0 ){ + rc = SQLITE_ERROR; + sqlite3Fts5ConfigErrmsg(pConfig, + (pConfig->bContentlessDelete ? + "%s a subset of columns on fts5 contentless-delete table: %s" : + "%s contentless fts5 table: %s") + , "cannot UPDATE", pConfig->zName + ); + } + } + + return rc; +} + /* ** This function is the implementation of the xUpdate callback used by ** FTS3 virtual tables. It is invoked by SQLite each time a row is to be @@ -251258,7 +255633,6 @@ static int fts5UpdateMethod( Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ - int bUpdateOrDelete = 0; /* A transaction must be open when this is called. */ assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); @@ -251270,7 +255644,7 @@ static int fts5UpdateMethod( ); assert( pTab->p.pConfig->pzErrmsg==0 ); if( pConfig->pgsz==0 ){ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie); if( rc!=SQLITE_OK ) return rc; } @@ -251295,7 +255669,6 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); - bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -251320,88 +255693,104 @@ static int fts5UpdateMethod( assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL ); assert( nArg!=1 || eType0==SQLITE_INTEGER ); - /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. Except - they are both supported if the CREATE - ** VIRTUAL TABLE statement contained "contentless_delete=1". */ - if( eType0==SQLITE_INTEGER - && pConfig->eContent==FTS5_CONTENT_NONE - && pConfig->bContentlessDelete==0 - ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "cannot %s contentless fts5 table: %s", - (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName - ); - rc = SQLITE_ERROR; - } - /* DELETE */ - else if( nArg==1 ){ - i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); - bUpdateOrDelete = 1; + if( nArg==1 ){ + /* It is only possible to DELETE from a contentless table if the + ** contentless_delete=1 flag is set. */ + if( fts5IsContentless(pTab, 1) && pConfig->bContentlessDelete==0 ){ + fts5SetVtabError(pTab, + "cannot DELETE from contentless fts5 table: %s", pConfig->zName + ); + rc = SQLITE_ERROR; + }else{ + i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0, 0); + } } /* INSERT or UPDATE */ else{ int eType1 = sqlite3_value_numeric_type(apVal[1]); - if( eType1!=SQLITE_INTEGER && eType1!=SQLITE_NULL ){ - rc = SQLITE_MISMATCH; + /* It is an error to write an fts5_locale() value to a table without + ** the locale=1 option. */ + if( pConfig->bLocale==0 ){ + int ii; + for(ii=0; iinCol; ii++){ + sqlite3_value *pVal = apVal[ii+2]; + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + fts5SetVtabError(pTab, "fts5_locale() requires locale=1"); + rc = SQLITE_MISMATCH; + goto update_out; + } + } } - else if( eType0!=SQLITE_INTEGER ){ + if( eType0!=SQLITE_INTEGER ){ /* An INSERT statement. If the conflict-mode is REPLACE, first remove ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); - bUpdateOrDelete = 1; + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); } /* UPDATE */ else{ + Fts5Storage *pStorage = pTab->pStorage; i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */ - if( eType1==SQLITE_INTEGER && iOld!=iNew ){ + int bContent = 0; /* Content only update */ + + /* If this is a contentless table (including contentless_unindexed=1 + ** tables), check if the UPDATE may proceed. */ + if( fts5IsContentless(pTab, 1) ){ + rc = fts5ContentlessUpdate(pConfig, &apVal[2], iOld!=iNew, &bContent); + if( rc!=SQLITE_OK ) goto update_out; + } + + if( eType1!=SQLITE_INTEGER ){ + rc = SQLITE_MISMATCH; + }else if( iOld!=iNew ){ + assert( bContent==0 ); if( eConflict==SQLITE_REPLACE ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); }else{ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 0, apVal, pRowid); + } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 0); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal,*pRowid); + rc = sqlite3Fts5StorageIndexInsert(pStorage, apVal, *pRowid); } } + }else if( bContent ){ + /* This occurs when an UPDATE on a contentless table affects *only* + ** UNINDEXED columns. This is a no-op for contentless_unindexed=0 + ** tables, or a write to the %_content table only for =1 tables. */ + assert( fts5IsContentless(pTab, 1) ); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 1, apVal, pRowid); + } }else{ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); fts5StorageInsert(&rc, pTab, apVal, pRowid); } - bUpdateOrDelete = 1; + sqlite3Fts5StorageReleaseDeleteRow(pStorage); } } } - if( rc==SQLITE_OK - && bUpdateOrDelete - && pConfig->bSecureDelete - && pConfig->iVersion==FTS5_CURRENT_VERSION - ){ - rc = sqlite3Fts5StorageConfigValue( - pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE - ); - if( rc==SQLITE_OK ){ - pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; - } - } - + update_out: pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -251423,9 +255812,11 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ ** Implementation of xBegin() method. */ static int fts5BeginMethod(sqlite3_vtab *pVtab){ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); - fts5NewTransaction((Fts5FullTable*)pVtab); - return SQLITE_OK; + int rc = fts5NewTransaction((Fts5FullTable*)pVtab); + if( rc==SQLITE_OK ){ + fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); + } + return rc; } /* @@ -251448,6 +255839,7 @@ static int fts5RollbackMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0); rc = sqlite3Fts5StorageRollback(pTab->pStorage); + pTab->p.pConfig->pgsz = 0; return rc; } @@ -251479,17 +255871,40 @@ static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){ return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow); } -static int fts5ApiTokenize( +/* +** Implementation of xTokenize_v2() API. +*/ +static int fts5ApiTokenize_v2( Fts5Context *pCtx, const char *pText, int nText, + const char *pLoc, int nLoc, void *pUserData, int (*xToken)(void*, int, const char*, int, int, int) ){ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); - return sqlite3Fts5Tokenize( - pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken + int rc = SQLITE_OK; + + sqlite3Fts5SetLocale(pTab->pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pTab->pConfig, + FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken ); + sqlite3Fts5SetLocale(pTab->pConfig, 0, 0); + + return rc; +} + +/* +** Implementation of xTokenize() API. This is just xTokenize_v2() with NULL/0 +** passed as the locale. +*/ +static int fts5ApiTokenize( + Fts5Context *pCtx, + const char *pText, int nText, + void *pUserData, + int (*xToken)(void*, int, const char*, int, int, int) +){ + return fts5ApiTokenize_v2(pCtx, pText, nText, 0, 0, pUserData, xToken); } static int fts5ApiPhraseCount(Fts5Context *pCtx){ @@ -251502,6 +255917,49 @@ static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){ return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase); } +/* +** Argument pStmt is an SQL statement of the type used by Fts5Cursor. This +** function extracts the text value of column iCol of the current row. +** Additionally, if there is an associated locale, it invokes +** sqlite3Fts5SetLocale() to configure the tokenizer. In all cases the caller +** should invoke sqlite3Fts5ClearLocale() to clear the locale at some point +** after this function returns. +** +** If successful, (*ppText) is set to point to a buffer containing the text +** value as utf-8 and SQLITE_OK returned. (*pnText) is set to the size of that +** buffer in bytes. It is not guaranteed to be nul-terminated. If an error +** occurs, an SQLite error code is returned. The final values of the two +** output parameters are undefined in this case. +*/ +static int fts5TextFromStmt( + Fts5Config *pConfig, + sqlite3_stmt *pStmt, + int iCol, + const char **ppText, + int *pnText +){ + sqlite3_value *pVal = sqlite3_column_value(pStmt, iCol+1); + const char *pLoc = 0; + int nLoc = 0; + int rc = SQLITE_OK; + + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, ppText, pnText, &pLoc, &nLoc); + }else{ + *ppText = (const char*)sqlite3_value_text(pVal); + *pnText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + pLoc = (const char*)sqlite3_column_text(pStmt, iCol+1+pConfig->nCol); + nLoc = sqlite3_column_bytes(pStmt, iCol+1+pConfig->nCol); + } + } + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + return rc; +} + static int fts5ApiColumnText( Fts5Context *pCtx, int iCol, @@ -251511,28 +255969,35 @@ static int fts5ApiColumnText( int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); if( iCol<0 || iCol>=pTab->pConfig->nCol ){ rc = SQLITE_RANGE; - }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) - || pCsr->ePlan==FTS5_PLAN_SPECIAL - ){ + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab), 0) ){ *pz = 0; *pn = 0; }else{ rc = fts5SeekCursor(pCsr, 0); if( rc==SQLITE_OK ){ - *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1); - *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1); + rc = fts5TextFromStmt(pTab->pConfig, pCsr->pStmt, iCol, pz, pn); + sqlite3Fts5ClearLocale(pTab->pConfig); } } return rc; } +/* +** This is called by various API functions - xInst, xPhraseFirst, +** xPhraseFirstColumn etc. - to obtain the position list for phrase iPhrase +** of the current row. This function works for both detail=full tables (in +** which case the position-list was read from the fts index) or for other +** detail= modes if the row content is available. +*/ static int fts5CsrPoslist( - Fts5Cursor *pCsr, - int iPhrase, - const u8 **pa, - int *pn + Fts5Cursor *pCsr, /* Fts5 cursor object */ + int iPhrase, /* Phrase to find position list for */ + const u8 **pa, /* OUT: Pointer to position list buffer */ + int *pn /* OUT: Size of (*pa) in bytes */ ){ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; int rc = SQLITE_OK; @@ -251540,20 +256005,32 @@ static int fts5CsrPoslist( if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ rc = SQLITE_RANGE; + }else if( pConfig->eDetail!=FTS5_DETAIL_FULL + && fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + ){ + *pa = 0; + *pn = 0; + return SQLITE_OK; }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; + aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive); if( aPopulator==0 ) rc = SQLITE_NOMEM; + if( rc==SQLITE_OK ){ + rc = fts5SeekCursor(pCsr, 0); + } for(i=0; inCol && rc==SQLITE_OK; i++){ - int n; const char *z; - rc = fts5ApiColumnText((Fts5Context*)pCsr, i, &z, &n); + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ rc = sqlite3Fts5ExprPopulatePoslists( pConfig, pCsr->pExpr, aPopulator, i, z, n ); } + sqlite3Fts5ClearLocale(pConfig); } sqlite3_free(aPopulator); @@ -251578,7 +256055,6 @@ static int fts5CsrPoslist( *pn = 0; } - return rc; } @@ -251647,7 +256123,8 @@ static int fts5CacheInstArray(Fts5Cursor *pCsr){ aInst[0] = iBest; aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos); aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos); - if( aInst[1]<0 || aInst[1]>=nCol ){ + assert( aInst[1]>=0 ); + if( aInst[1]>=nCol ){ rc = FTS5_CORRUPT; break; } @@ -251725,7 +256202,7 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ if( pConfig->bColumnsize ){ i64 iRowid = fts5CursorRowid(pCsr); rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize); - }else if( pConfig->zContent==0 ){ + }else if( !pConfig->zContent || pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ int i; for(i=0; inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ @@ -251734,17 +256211,19 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ } }else{ int i; + rc = fts5SeekCursor(pCsr, 0); for(i=0; rc==SQLITE_OK && inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ - const char *z; int n; - void *p = (void*)(&pCsr->aColumnSize[i]); + const char *z = 0; + int n = 0; pCsr->aColumnSize[i] = 0; - rc = fts5ApiColumnText(pCtx, i, &z, &n); + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5Tokenize( - pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_AUX, + z, n, (void*)&pCsr->aColumnSize[i], fts5ColumnSizeCb ); } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -251824,11 +256303,10 @@ static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){ } static void fts5ApiPhraseNext( - Fts5Context *pUnused, + Fts5Context *pCtx, Fts5PhraseIter *pIter, int *piCol, int *piOff ){ - UNUSED_PARAM(pUnused); if( pIter->a>=pIter->b ){ *piCol = -1; *piOff = -1; @@ -251836,8 +256314,12 @@ static void fts5ApiPhraseNext( int iVal; pIter->a += fts5GetVarint32(pIter->a, iVal); if( iVal==1 ){ + /* Avoid returning a (*piCol) value that is too large for the table, + ** even if the position-list is corrupt. The caller might not be + ** expecting it. */ + int nCol = ((Fts5Table*)(((Fts5Cursor*)pCtx)->base.pVtab))->pConfig->nCol; pIter->a += fts5GetVarint32(pIter->a, iVal); - *piCol = iVal; + *piCol = (iVal>=nCol ? nCol-1 : iVal); *piOff = 0; pIter->a += fts5GetVarint32(pIter->a, iVal); } @@ -251987,8 +256469,48 @@ static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); +/* +** The xColumnLocale() API. +*/ +static int fts5ApiColumnLocale( + Fts5Context *pCtx, + int iCol, + const char **pzLocale, + int *pnLocale +){ + int rc = SQLITE_OK; + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; + + *pzLocale = 0; + *pnLocale = 0; + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); + if( iCol<0 || iCol>=pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( + pConfig->abUnindexed[iCol]==0 + && 0==fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + && pConfig->bLocale + ){ + rc = fts5SeekCursor(pCsr, 0); + if( rc==SQLITE_OK ){ + const char *zDummy = 0; + int nDummy = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &zDummy, &nDummy); + if( rc==SQLITE_OK ){ + *pzLocale = pConfig->t.pLocale; + *pnLocale = pConfig->t.nLocale; + } + sqlite3Fts5ClearLocale(pConfig); + } + } + + return rc; +} + static const Fts5ExtensionApi sFts5Api = { - 3, /* iVersion */ + 4, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -252009,7 +256531,9 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, fts5ApiQueryToken, - fts5ApiInstToken + fts5ApiInstToken, + fts5ApiColumnLocale, + fts5ApiTokenize_v2 }; /* @@ -252060,6 +256584,7 @@ static void fts5ApiInvoke( sqlite3_value **argv ){ assert( pCsr->pAux==0 ); + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); pCsr->pAux = pAux; pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv); pCsr->pAux = 0; @@ -252073,6 +256598,21 @@ static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){ return pCsr; } +/* +** Parameter zFmt is a printf() style formatting string. This function +** formats it using the trailing arguments and returns the result as +** an error message to the context passed as the first argument. +*/ +static void fts5ResultError(sqlite3_context *pCtx, const char *zFmt, ...){ + char *zErr = 0; + va_list ap; + va_start(ap, zFmt); + zErr = sqlite3_vmprintf(zFmt, ap); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); + va_end(ap); +} + static void fts5ApiCallback( sqlite3_context *context, int argc, @@ -252088,12 +256628,13 @@ static void fts5ApiCallback( iCsrId = sqlite3_value_int64(argv[0]); pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId); - if( pCsr==0 || pCsr->ePlan==0 ){ - char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); + if( pCsr==0 || (pCsr->ePlan==0 || pCsr->ePlan==FTS5_PLAN_SPECIAL) ){ + fts5ResultError(context, "no such cursor: %lld", iCsrId); }else{ + sqlite3_vtab *pTab = pCsr->base.pVtab; fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]); + sqlite3_free(pTab->zErrMsg); + pTab->zErrMsg = 0; } } @@ -252211,8 +256752,8 @@ static int fts5ColumnMethod( ** auxiliary function. */ sqlite3_result_int64(pCtx, pCsr->iCsrId); }else if( iCol==pConfig->nCol+1 ){ - /* The value of the "rank" column. */ + if( pCsr->ePlan==FTS5_PLAN_SOURCE ){ fts5PoslistBlob(pCtx, pCsr); }else if( @@ -252223,20 +256764,32 @@ static int fts5ColumnMethod( fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg); } } - }else if( !fts5IsContentless(pTab) ){ - pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - rc = fts5SeekCursor(pCsr, 1); - if( rc==SQLITE_OK ){ - sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); + }else{ + if( !sqlite3_vtab_nochange(pCtx) && pConfig->eContent!=FTS5_CONTENT_NONE ){ + pConfig->pzErrmsg = &pTab->p.base.zErrMsg; + rc = fts5SeekCursor(pCsr, 1); + if( rc==SQLITE_OK ){ + sqlite3_value *pVal = sqlite3_column_value(pCsr->pStmt, iCol+1); + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &z, &n); + if( rc==SQLITE_OK ){ + sqlite3_result_text(pCtx, z, n, SQLITE_TRANSIENT); + } + sqlite3Fts5ClearLocale(pConfig); + }else{ + sqlite3_result_value(pCtx, pVal); + } + } + + pConfig->pzErrmsg = 0; } - pConfig->pzErrmsg = 0; - }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){ - char *zErr = sqlite3_mprintf("cannot UPDATE a subset of " - "columns on fts5 contentless-delete table: %s", pConfig->zName - ); - sqlite3_result_error(pCtx, zErr, -1); - sqlite3_free(zErr); } + return rc; } @@ -252376,47 +256929,210 @@ static int fts5CreateAux( } /* -** Register a new tokenizer. This is the implementation of the -** fts5_api.xCreateTokenizer() method. +** This function is used by xCreateTokenizer_v2() and xCreateTokenizer(). +** It allocates and partially populates a new Fts5TokenizerModule object. +** The new object is already linked into the Fts5Global context before +** returning. +** +** If successful, SQLITE_OK is returned and a pointer to the new +** Fts5TokenizerModule object returned via output parameter (*ppNew). All +** that is required is for the caller to fill in the methods in +** Fts5TokenizerModule.x1 and x2, and to set Fts5TokenizerModule.bV2Native +** as appropriate. +** +** If an error occurs, an SQLite error code is returned and the final value +** of (*ppNew) undefined. */ -static int fts5CreateTokenizer( - fts5_api *pApi, /* Global context (one per db handle) */ +static int fts5NewTokenizerModule( + Fts5Global *pGlobal, /* Global context (one per db handle) */ const char *zName, /* Name of new function */ void *pUserData, /* User data for aux. function */ - fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ - void(*xDestroy)(void*) /* Destructor for pUserData */ + void(*xDestroy)(void*), /* Destructor for pUserData */ + Fts5TokenizerModule **ppNew ){ - Fts5Global *pGlobal = (Fts5Global*)pApi; - Fts5TokenizerModule *pNew; - sqlite3_int64 nName; /* Size of zName and its \0 terminator */ - sqlite3_int64 nByte; /* Bytes of space to allocate */ int rc = SQLITE_OK; + Fts5TokenizerModule *pNew; + sqlite3_int64 nName; /* Size of zName and its \0 terminator */ + sqlite3_int64 nByte; /* Bytes of space to allocate */ nName = strlen(zName) + 1; nByte = sizeof(Fts5TokenizerModule) + nName; - pNew = (Fts5TokenizerModule*)sqlite3_malloc64(nByte); + *ppNew = pNew = (Fts5TokenizerModule*)sqlite3Fts5MallocZero(&rc, nByte); if( pNew ){ - memset(pNew, 0, (size_t)nByte); pNew->zName = (char*)&pNew[1]; memcpy(pNew->zName, zName, nName); pNew->pUserData = pUserData; - pNew->x = *pTokenizer; pNew->xDestroy = xDestroy; pNew->pNext = pGlobal->pTok; pGlobal->pTok = pNew; if( pNew->pNext==0 ){ pGlobal->pDfltTok = pNew; } + } + + return rc; +} + +/* +** An instance of this type is used as the Fts5Tokenizer object for +** wrapper tokenizers - those that provide access to a v1 tokenizer via +** the fts5_tokenizer_v2 API, and those that provide access to a v2 tokenizer +** via the fts5_tokenizer API. +*/ +typedef struct Fts5VtoVTokenizer Fts5VtoVTokenizer; +struct Fts5VtoVTokenizer { + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ + Fts5Tokenizer *pReal; +}; + +/* +** Create a wrapper tokenizer. The context argument pCtx points to the +** Fts5TokenizerModule object. +*/ +static int fts5VtoVCreate( + void *pCtx, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + Fts5TokenizerModule *pMod = (Fts5TokenizerModule*)pCtx; + Fts5VtoVTokenizer *pNew = 0; + int rc = SQLITE_OK; + + pNew = (Fts5VtoVTokenizer*)sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + if( rc==SQLITE_OK ){ + pNew->x1 = pMod->x1; + pNew->x2 = pMod->x2; + pNew->bV2Native = pMod->bV2Native; + if( pMod->bV2Native ){ + rc = pMod->x2.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + }else{ + rc = pMod->x1.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + } + if( rc!=SQLITE_OK ){ + sqlite3_free(pNew); + pNew = 0; + } + } + + *ppOut = (Fts5Tokenizer*)pNew; + return rc; +} + +/* +** Delete an Fts5VtoVTokenizer wrapper tokenizer. +*/ +static void fts5VtoVDelete(Fts5Tokenizer *pTok){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + if( p ){ + if( p->bV2Native ){ + p->x2.xDelete(p->pReal); + }else{ + p->x1.xDelete(p->pReal); + } + sqlite3_free(p); + } +} + + +/* +** xTokenizer method for a wrapper tokenizer that offers the v1 interface +** (no support for locales). +*/ +static int fts5V1toV2Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native ); + return p->x2.xTokenize(p->pReal, pCtx, flags, pText, nText, 0, 0, xToken); +} + +/* +** xTokenizer method for a wrapper tokenizer that offers the v2 interface +** (with locale support). +*/ +static int fts5V2toV1Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native==0 ); + UNUSED_PARAM2(pLocale,nLocale); + return p->x1.xTokenize(p->pReal, pCtx, flags, pText, nText, xToken); +} + +/* +** Register a new tokenizer. This is the implementation of the +** fts5_api.xCreateTokenizer_v2() method. +*/ +static int fts5CreateTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer_v2 *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5Global *pGlobal = (Fts5Global*)pApi; + int rc = SQLITE_OK; + + if( pTokenizer->iVersion>2 ){ + rc = SQLITE_ERROR; }else{ - rc = SQLITE_NOMEM; + Fts5TokenizerModule *pNew = 0; + rc = fts5NewTokenizerModule(pGlobal, zName, pUserData, xDestroy, &pNew); + if( pNew ){ + pNew->x2 = *pTokenizer; + pNew->bV2Native = 1; + pNew->x1.xCreate = fts5VtoVCreate; + pNew->x1.xTokenize = fts5V1toV2Tokenize; + pNew->x1.xDelete = fts5VtoVDelete; + } } return rc; } +/* +** The fts5_api.xCreateTokenizer() method. +*/ +static int fts5CreateTokenizer( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5TokenizerModule *pNew = 0; + int rc = SQLITE_OK; + + rc = fts5NewTokenizerModule( + (Fts5Global*)pApi, zName, pUserData, xDestroy, &pNew + ); + if( pNew ){ + pNew->x1 = *pTokenizer; + pNew->x2.xCreate = fts5VtoVCreate; + pNew->x2.xTokenize = fts5V2toV1Tokenize; + pNew->x2.xDelete = fts5VtoVDelete; + } + return rc; +} + +/* +** Search the global context passed as the first argument for a tokenizer +** module named zName. If found, return a pointer to the Fts5TokenizerModule +** object. Otherwise, return NULL. +*/ static Fts5TokenizerModule *fts5LocateTokenizer( - Fts5Global *pGlobal, - const char *zName + Fts5Global *pGlobal, /* Global (one per db handle) object */ + const char *zName /* Name of tokenizer module to find */ ){ Fts5TokenizerModule *pMod = 0; @@ -252431,6 +257147,36 @@ static Fts5TokenizerModule *fts5LocateTokenizer( return pMod; } +/* +** Find a tokenizer. This is the implementation of the +** fts5_api.xFindTokenizer_v2() method. +*/ +static int fts5FindTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of tokenizer */ + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer /* Populate this object */ +){ + int rc = SQLITE_OK; + Fts5TokenizerModule *pMod; + + pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); + if( pMod ){ + if( pMod->bV2Native ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *ppTokenizer = &pMod->x2; + }else{ + *ppTokenizer = 0; + *ppUserData = 0; + rc = SQLITE_ERROR; + } + + return rc; +} + /* ** Find a tokenizer. This is the implementation of the ** fts5_api.xFindTokenizer() method. @@ -252446,55 +257192,75 @@ static int fts5FindTokenizer( pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); if( pMod ){ - *pTokenizer = pMod->x; - *ppUserData = pMod->pUserData; + if( pMod->bV2Native==0 ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *pTokenizer = pMod->x1; }else{ - memset(pTokenizer, 0, sizeof(fts5_tokenizer)); + memset(pTokenizer, 0, sizeof(*pTokenizer)); + *ppUserData = 0; rc = SQLITE_ERROR; } return rc; } -static int sqlite3Fts5GetTokenizer( - Fts5Global *pGlobal, - const char **azArg, - int nArg, - Fts5Config *pConfig, - char **pzErr -){ - Fts5TokenizerModule *pMod; +/* +** Attempt to instantiate the tokenizer. +*/ +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig){ + const char **azArg = pConfig->t.azArg; + const int nArg = pConfig->t.nArg; + Fts5TokenizerModule *pMod = 0; int rc = SQLITE_OK; - pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]); + pMod = fts5LocateTokenizer(pConfig->pGlobal, nArg==0 ? 0 : azArg[0]); if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + sqlite3Fts5ConfigErrmsg(pConfig, "no such tokenizer: %s", azArg[0]); }else{ - rc = pMod->x.xCreate( - pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**) = 0; + if( pMod->bV2Native ){ + xCreate = pMod->x2.xCreate; + pConfig->t.pApi2 = &pMod->x2; + }else{ + pConfig->t.pApi1 = &pMod->x1; + xCreate = pMod->x1.xCreate; + } + + rc = xCreate(pMod->pUserData, + (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->t.pTok ); - pConfig->pTokApi = &pMod->x; + if( rc!=SQLITE_OK ){ - if( pzErr && rc!=SQLITE_NOMEM ){ - *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( rc!=SQLITE_NOMEM ){ + sqlite3Fts5ConfigErrmsg(pConfig, "error in tokenizer constructor"); } - }else{ - pConfig->ePattern = sqlite3Fts5TokenizerPattern( - pMod->x.xCreate, pConfig->pTok + }else if( pMod->bV2Native==0 ){ + pConfig->t.ePattern = sqlite3Fts5TokenizerPattern( + pMod->x1.xCreate, pConfig->t.pTok ); } } if( rc!=SQLITE_OK ){ - pConfig->pTokApi = 0; - pConfig->pTok = 0; + pConfig->t.pApi1 = 0; + pConfig->t.pApi2 = 0; + pConfig->t.pTok = 0; } return rc; } + +/* +** xDestroy callback passed to sqlite3_create_module(). This is invoked +** when the db handle is being closed. Free memory associated with +** tokenizers and aux functions registered with this db handle. +*/ static void fts5ModuleDestroy(void *pCtx){ Fts5TokenizerModule *pTok, *pNextTok; Fts5Auxiliary *pAux, *pNextAux; @@ -252515,6 +257281,10 @@ static void fts5ModuleDestroy(void *pCtx){ sqlite3_free(pGlobal); } +/* +** Implementation of the fts5() function used by clients to obtain the +** API pointer. +*/ static void fts5Fts5Func( sqlite3_context *pCtx, /* Function call context */ int nArg, /* Number of args */ @@ -252538,7 +257308,82 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3", -1, SQLITE_TRANSIENT); +} + +/* +** Implementation of fts5_locale(LOCALE, TEXT) function. +** +** If parameter LOCALE is NULL, or a zero-length string, then a copy of +** TEXT is returned. Otherwise, both LOCALE and TEXT are interpreted as +** text, and the value returned is a blob consisting of: +** +** * The 4 bytes 0x00, 0xE0, 0xB2, 0xEb (FTS5_LOCALE_HEADER). +** * The LOCALE, as utf-8 text, followed by +** * 0x00, followed by +** * The TEXT, as utf-8 text. +** +** There is no final nul-terminator following the TEXT value. +*/ +static void fts5LocaleFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + const char *zLocale = 0; + int nLocale = 0; + const char *zText = 0; + int nText = 0; + + assert( nArg==2 ); + UNUSED_PARAM(nArg); + + zLocale = (const char*)sqlite3_value_text(apArg[0]); + nLocale = sqlite3_value_bytes(apArg[0]); + + zText = (const char*)sqlite3_value_text(apArg[1]); + nText = sqlite3_value_bytes(apArg[1]); + + if( zLocale==0 || zLocale[0]=='\0' ){ + sqlite3_result_text(pCtx, zText, nText, SQLITE_TRANSIENT); + }else{ + Fts5Global *p = (Fts5Global*)sqlite3_user_data(pCtx); + u8 *pBlob = 0; + u8 *pCsr = 0; + int nBlob = 0; + + nBlob = FTS5_LOCALE_HDR_SIZE + nLocale + 1 + nText; + pBlob = (u8*)sqlite3_malloc(nBlob); + if( pBlob==0 ){ + sqlite3_result_error_nomem(pCtx); + return; + } + + pCsr = pBlob; + memcpy(pCsr, (const u8*)p->aLocaleHdr, FTS5_LOCALE_HDR_SIZE); + pCsr += FTS5_LOCALE_HDR_SIZE; + memcpy(pCsr, zLocale, nLocale); + pCsr += nLocale; + (*pCsr++) = 0x00; + if( zText ) memcpy(pCsr, zText, nText); + assert( &pCsr[nText]==&pBlob[nBlob] ); + + sqlite3_result_blob(pCtx, pBlob, nBlob, sqlite3_free); + } +} + +/* +** Implementation of fts5_insttoken() function. +*/ +static void fts5InsttokenFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + assert( nArg==1 ); + (void)nArg; + sqlite3_result_value(pCtx, apArg[0]); + sqlite3_result_subtype(pCtx, FTS5_INSTTOKEN_SUBTYPE); } /* @@ -252633,10 +257478,22 @@ static int fts5Init(sqlite3 *db){ void *p = (void*)pGlobal; memset(pGlobal, 0, sizeof(Fts5Global)); pGlobal->db = db; - pGlobal->api.iVersion = 2; + pGlobal->api.iVersion = 3; pGlobal->api.xCreateFunction = fts5CreateAux; pGlobal->api.xCreateTokenizer = fts5CreateTokenizer; pGlobal->api.xFindTokenizer = fts5FindTokenizer; + pGlobal->api.xCreateTokenizer_v2 = fts5CreateTokenizer_v2; + pGlobal->api.xFindTokenizer_v2 = fts5FindTokenizer_v2; + + /* Initialize pGlobal->aLocaleHdr[] to a 128-bit pseudo-random vector. + ** The constants below were generated randomly. */ + sqlite3_randomness(sizeof(pGlobal->aLocaleHdr), pGlobal->aLocaleHdr); + pGlobal->aLocaleHdr[0] ^= 0xF924976D; + pGlobal->aLocaleHdr[1] ^= 0x16596E13; + pGlobal->aLocaleHdr[2] ^= 0x7C80BEAA; + pGlobal->aLocaleHdr[3] ^= 0x9B03A67F; + assert( sizeof(pGlobal->aLocaleHdr)==16 ); + rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy); if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db); if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db); @@ -252655,6 +257512,20 @@ static int fts5Init(sqlite3 *db){ p, fts5SourceIdFunc, 0, 0 ); } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_locale", 2, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE|SQLITE_SUBTYPE, + p, fts5LocaleFunc, 0, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_insttoken", 1, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE, + p, fts5InsttokenFunc, 0, 0 + ); + } } /* If SQLITE_FTS5_ENABLE_TEST_MI is defined, assume that the file @@ -252662,8 +257533,8 @@ static int fts5Init(sqlite3 *db){ ** its entry point to enable the matchinfo() demo. */ #ifdef SQLITE_FTS5_ENABLE_TEST_MI if( rc==SQLITE_OK ){ - extern int sqlite3Fts5TestRegisterMatchinfo(sqlite3*); - rc = sqlite3Fts5TestRegisterMatchinfo(db); + extern int sqlite3Fts5TestRegisterMatchinfoAPI(fts5_api*); + rc = sqlite3Fts5TestRegisterMatchinfoAPI(&pGlobal->api); } #endif @@ -252729,13 +257600,40 @@ SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3 *db){ /* #include "fts5Int.h" */ +/* +** pSavedRow: +** SQL statement FTS5_STMT_LOOKUP2 is a copy of FTS5_STMT_LOOKUP, it +** does a by-rowid lookup to retrieve a single row from the %_content +** table or equivalent external-content table/view. +** +** However, FTS5_STMT_LOOKUP2 is only used when retrieving the original +** values for a row being UPDATEd. In that case, the SQL statement is +** not reset and pSavedRow is set to point at it. This is so that the +** insert operation that follows the delete may access the original +** row values for any new values for which sqlite3_value_nochange() returns +** true. i.e. if the user executes: +** +** CREATE VIRTUAL TABLE ft USING fts5(a, b, c, locale=1); +** ... +** UPDATE fts SET a=?, b=? WHERE rowid=?; +** +** then the value passed to the xUpdate() method of this table as the +** new.c value is an sqlite3_value_nochange() value. So in this case it +** must be read from the saved row stored in Fts5Storage.pSavedRow. +** +** This is necessary - using sqlite3_value_nochange() instead of just having +** SQLite pass the original value back via xUpdate() - so as not to discard +** any locale information associated with such values. +** +*/ struct Fts5Storage { Fts5Config *pConfig; Fts5Index *pIndex; int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */ i64 nTotalRow; /* Total number of rows in FTS table */ i64 *aTotalSize; /* Total sizes of each column */ - sqlite3_stmt *aStmt[11]; + sqlite3_stmt *pSavedRow; + sqlite3_stmt *aStmt[12]; }; @@ -252749,14 +257647,15 @@ struct Fts5Storage { # error "FTS5_STMT_LOOKUP mismatch" #endif -#define FTS5_STMT_INSERT_CONTENT 3 -#define FTS5_STMT_REPLACE_CONTENT 4 -#define FTS5_STMT_DELETE_CONTENT 5 -#define FTS5_STMT_REPLACE_DOCSIZE 6 -#define FTS5_STMT_DELETE_DOCSIZE 7 -#define FTS5_STMT_LOOKUP_DOCSIZE 8 -#define FTS5_STMT_REPLACE_CONFIG 9 -#define FTS5_STMT_SCAN 10 +#define FTS5_STMT_LOOKUP2 3 +#define FTS5_STMT_INSERT_CONTENT 4 +#define FTS5_STMT_REPLACE_CONTENT 5 +#define FTS5_STMT_DELETE_CONTENT 6 +#define FTS5_STMT_REPLACE_DOCSIZE 7 +#define FTS5_STMT_DELETE_DOCSIZE 8 +#define FTS5_STMT_LOOKUP_DOCSIZE 9 +#define FTS5_STMT_REPLACE_CONFIG 10 +#define FTS5_STMT_SCAN 11 /* ** Prepare the two insert statements - Fts5Storage.pInsertContent and @@ -252786,6 +257685,7 @@ static int fts5StorageGetStmt( "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC", "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC", "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */ + "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP2 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ @@ -252801,6 +257701,8 @@ static int fts5StorageGetStmt( Fts5Config *pC = p->pConfig; char *zSql = 0; + assert( ArraySize(azStmt)==ArraySize(p->aStmt) ); + switch( eStmt ){ case FTS5_STMT_SCAN: zSql = sqlite3_mprintf(azStmt[eStmt], @@ -252817,6 +257719,7 @@ static int fts5StorageGetStmt( break; case FTS5_STMT_LOOKUP: + case FTS5_STMT_LOOKUP2: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist, pC->zContent, pC->zContentRowid ); @@ -252824,20 +257727,35 @@ static int fts5StorageGetStmt( case FTS5_STMT_INSERT_CONTENT: case FTS5_STMT_REPLACE_CONTENT: { - int nCol = pC->nCol + 1; - char *zBind; + char *zBind = 0; int i; - zBind = sqlite3_malloc64(1 + nCol*2); - if( zBind ){ - for(i=0; ieContent==FTS5_CONTENT_NORMAL + || pC->eContent==FTS5_CONTENT_UNINDEXED + ); + + /* Add bindings for the "c*" columns - those that store the actual + ** table content. If eContent==NORMAL, then there is one binding + ** for each column. Or, if eContent==UNINDEXED, then there are only + ** bindings for the UNINDEXED columns. */ + for(i=0; rc==SQLITE_OK && i<(pC->nCol+1); i++){ + if( !i || pC->eContent==FTS5_CONTENT_NORMAL || pC->abUnindexed[i-1] ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z%s?%d", zBind, zBind?",":"",i+1); } - zBind[i*2-1] = '\0'; - zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind); - sqlite3_free(zBind); } + + /* Add bindings for any "l*" columns. Only non-UNINDEXED columns + ** require these. */ + if( pC->bLocale && pC->eContent==FTS5_CONTENT_NORMAL ){ + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( pC->abUnindexed[i]==0 ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z,?%d", zBind, pC->nCol+i+2); + } + } + } + + zSql = sqlite3Fts5Mprintf(&rc, azStmt[eStmt], pC->zDb, pC->zName,zBind); + sqlite3_free(zBind); break; } @@ -252863,7 +257781,7 @@ static int fts5StorageGetStmt( rc = SQLITE_NOMEM; }else{ int f = SQLITE_PREPARE_PERSISTENT; - if( eStmt>FTS5_STMT_LOOKUP ) f |= SQLITE_PREPARE_NO_VTAB; + if( eStmt>FTS5_STMT_LOOKUP2 ) f |= SQLITE_PREPARE_NO_VTAB; p->pConfig->bLock++; rc = sqlite3_prepare_v3(pC->db, zSql, -1, f, &p->aStmt[eStmt], 0); p->pConfig->bLock--; @@ -252871,6 +257789,11 @@ static int fts5StorageGetStmt( if( rc!=SQLITE_OK && pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db)); } + if( rc==SQLITE_ERROR && eStmt>FTS5_STMT_LOOKUP2 && eStmtpIndex = pIndex; if( bCreate ){ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ int nDefn = 32 + pConfig->nCol*10; - char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 10); + char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 20); if( zDefn==0 ){ rc = SQLITE_NOMEM; }else{ @@ -253034,8 +257959,20 @@ static int sqlite3Fts5StorageOpen( sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY"); iOff = (int)strlen(zDefn); for(i=0; inCol; i++){ - sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); - iOff += (int)strlen(&zDefn[iOff]); + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->abUnindexed[i] + ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } + if( pConfig->bLocale ){ + for(i=0; inCol; i++){ + if( pConfig->abUnindexed[i]==0 ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", l%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } } rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr); } @@ -253112,15 +258049,49 @@ static int fts5StorageInsertCallback( return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken); } +/* +** This function is used as part of an UPDATE statement that modifies the +** rowid of a row. In that case, this function is called first to set +** Fts5Storage.pSavedRow to point to a statement that may be used to +** access the original values of the row being deleted - iDel. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +** It is not considered an error if row iDel does not exist. In this case +** pSavedRow is not set and SQLITE_OK returned. +*/ +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel){ + int rc = SQLITE_OK; + sqlite3_stmt *pSeek = 0; + + assert( p->pSavedRow==0 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+1, &pSeek, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + rc = sqlite3_reset(pSeek); + }else{ + p->pSavedRow = pSeek; + } + } + + return rc; +} + /* ** If a row with rowid iDel is present in the %_content table, add the ** delete-markers to the FTS index necessary to delete it. Do not actually ** remove the %_content row at this time though. +** +** If parameter bSaveRow is true, then Fts5Storage.pSavedRow is left +** pointing to a statement (FTS5_STMT_LOOKUP2) that may be used to access +** the original values of the row being deleted. This is used by UPDATE +** statements. */ static int fts5StorageDeleteFromIndex( Fts5Storage *p, i64 iDel, - sqlite3_value **apVal + sqlite3_value **apVal, + int bSaveRow /* True to set pSavedRow */ ){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ @@ -253129,12 +258100,21 @@ static int fts5StorageDeleteFromIndex( int iCol; Fts5InsertCtx ctx; + assert( bSaveRow==0 || apVal==0 ); + assert( bSaveRow==0 || bSaveRow==1 ); + assert( FTS5_STMT_LOOKUP2==FTS5_STMT_LOOKUP+1 ); + if( apVal==0 ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pSeek, 1, iDel); - if( sqlite3_step(pSeek)!=SQLITE_ROW ){ - return sqlite3_reset(pSeek); + if( p->pSavedRow && bSaveRow ){ + pSeek = p->pSavedRow; + p->pSavedRow = 0; + }else{ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+bSaveRow, &pSeek, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + return sqlite3_reset(pSeek); + } } } @@ -253142,27 +258122,56 @@ static int fts5StorageDeleteFromIndex( ctx.iCol = -1; for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ - const char *zText; - int nText; + sqlite3_value *pVal = 0; + sqlite3_value *pFree = 0; + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + assert( pSeek==0 || apVal==0 ); assert( pSeek!=0 || apVal!=0 ); if( pSeek ){ - zText = (const char*)sqlite3_column_text(pSeek, iCol); - nText = sqlite3_column_bytes(pSeek, iCol); - }else if( ALWAYS(apVal) ){ - zText = (const char*)sqlite3_value_text(apVal[iCol-1]); - nText = sqlite3_value_bytes(apVal[iCol-1]); + pVal = sqlite3_column_value(pSeek, iCol); }else{ - continue; + pVal = apVal[iCol-1]; } - ctx.szCol = 0; - rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, - zText, nText, (void*)&ctx, fts5StorageInsertCallback - ); - p->aTotalSize[iCol-1] -= (i64)ctx.szCol; - if( p->aTotalSize[iCol-1]<0 ){ - rc = FTS5_CORRUPT; + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + if( sqlite3_value_type(pVal)!=SQLITE_TEXT ){ + /* Make a copy of the value to work with. This is because the call + ** to sqlite3_value_text() below forces the type of the value to + ** SQLITE_TEXT, and we may need to use it again later. */ + pFree = pVal = sqlite3_value_dup(pVal); + if( pVal==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol+pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } + } } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + ctx.szCol = 0; + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, + pText, nText, (void*)&ctx, fts5StorageInsertCallback + ); + p->aTotalSize[iCol-1] -= (i64)ctx.szCol; + if( rc==SQLITE_OK && p->aTotalSize[iCol-1]<0 ){ + rc = FTS5_CORRUPT; + } + sqlite3Fts5ClearLocale(pConfig); + } + sqlite3_value_free(pFree); } } if( rc==SQLITE_OK && p->nTotalRow<1 ){ @@ -253171,11 +258180,29 @@ static int fts5StorageDeleteFromIndex( p->nTotalRow--; } - rc2 = sqlite3_reset(pSeek); - if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK && bSaveRow ){ + assert( p->pSavedRow==0 ); + p->pSavedRow = pSeek; + }else{ + rc2 = sqlite3_reset(pSeek); + if( rc==SQLITE_OK ) rc = rc2; + } return rc; } +/* +** Reset any saved statement pSavedRow. Zero pSavedRow as well. This +** should be called by the xUpdate() method of the fts5 table before +** returning from any operation that may have set Fts5Storage.pSavedRow. +*/ +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage *pStorage){ + assert( pStorage->pSavedRow==0 + || pStorage->pSavedRow==pStorage->aStmt[FTS5_STMT_LOOKUP2] + ); + sqlite3_reset(pStorage->pSavedRow); + pStorage->pSavedRow = 0; +} + /* ** This function is called to process a DELETE on a contentless_delete=1 ** table. It adds the tombstone required to delete the entry with rowid @@ -253188,7 +258215,9 @@ static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ int rc = SQLITE_OK; assert( p->pConfig->bContentlessDelete ); - assert( p->pConfig->eContent==FTS5_CONTENT_NONE ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE + || p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ); /* Look up the origin of the document in the %_docsize table. Store ** this in stack variable iOrigin. */ @@ -253232,12 +258261,12 @@ static int fts5StorageInsertDocsize( rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); sqlite3_bind_int64(pReplace, 3, iOrigin); } - if( rc==SQLITE_OK ){ - sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - sqlite3_bind_null(pReplace, 2); - } + } + if( rc==SQLITE_OK ){ + sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); } } return rc; @@ -253291,7 +258320,12 @@ static int fts5StorageSaveTotals(Fts5Storage *p){ /* ** Remove a row from the FTS table. */ -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ +static int sqlite3Fts5StorageDelete( + Fts5Storage *p, /* Storage object */ + i64 iDel, /* Rowid to delete from table */ + sqlite3_value **apVal, /* Optional - values to remove from index */ + int bSaveRow /* If true, set pSavedRow for deleted row */ +){ Fts5Config *pConfig = p->pConfig; int rc; sqlite3_stmt *pDel = 0; @@ -253307,8 +258341,14 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap if( rc==SQLITE_OK ){ if( p->pConfig->bContentlessDelete ){ rc = fts5StorageContentlessDelete(p, iDel); + if( rc==SQLITE_OK + && bSaveRow + && p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ + rc = sqlite3Fts5StorageFindDeleteRow(p, iDel); + } }else{ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = fts5StorageDeleteFromIndex(p, iDel, apVal, bSaveRow); } } @@ -253323,7 +258363,9 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap } /* Delete the %_content record */ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ if( rc==SQLITE_OK ){ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0); } @@ -253355,8 +258397,13 @@ static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){ ); if( rc==SQLITE_OK && pConfig->bColumnsize ){ rc = fts5ExecPrintf(pConfig->db, 0, - "DELETE FROM %Q.'%q_docsize';", - pConfig->zDb, pConfig->zName + "DELETE FROM %Q.'%q_docsize';", pConfig->zDb, pConfig->zName + ); + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ + rc = fts5ExecPrintf(pConfig->db, 0, + "DELETE FROM %Q.'%q_content';", pConfig->zDb, pConfig->zName ); } @@ -253397,14 +258444,36 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_column_text(pScan, ctx.iCol+1); - int nText = sqlite3_column_bytes(pScan, ctx.iCol+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pLoc in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = sqlite3_column_value(pScan, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253470,6 +258539,7 @@ static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){ */ static int sqlite3Fts5StorageContentInsert( Fts5Storage *p, + int bReplace, /* True to use REPLACE instead of INSERT */ sqlite3_value **apVal, i64 *piRowid ){ @@ -253477,7 +258547,9 @@ static int sqlite3Fts5StorageContentInsert( int rc = SQLITE_OK; /* Insert the new row into the %_content table. */ - if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent!=FTS5_CONTENT_NORMAL + && pConfig->eContent!=FTS5_CONTENT_UNINDEXED + ){ if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){ *piRowid = sqlite3_value_int64(apVal[1]); }else{ @@ -253486,9 +258558,52 @@ static int sqlite3Fts5StorageContentInsert( }else{ sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */ int i; /* Counter variable */ - rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0); - for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ - rc = sqlite3_bind_value(pInsert, i, apVal[i]); + + assert( FTS5_STMT_INSERT_CONTENT+1==FTS5_STMT_REPLACE_CONTENT ); + assert( bReplace==0 || bReplace==1 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT+bReplace, &pInsert, 0); + if( pInsert ) sqlite3_clear_bindings(pInsert); + + /* Bind the rowid value */ + sqlite3_bind_value(pInsert, 1, apVal[1]); + + /* Loop through values for user-defined columns. i=2 is the leftmost + ** user-defined column. As is column 1 of pSavedRow. */ + for(i=2; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ + int bUnindexed = pConfig->abUnindexed[i-2]; + if( pConfig->eContent==FTS5_CONTENT_NORMAL || bUnindexed ){ + sqlite3_value *pVal = apVal[i]; + + if( sqlite3_value_nochange(pVal) && p->pSavedRow ){ + /* This is an UPDATE statement, and user-defined column (i-2) was not + ** modified. Retrieve the value from Fts5Storage.pSavedRow. */ + pVal = sqlite3_column_value(p->pSavedRow, i-1); + if( pConfig->bLocale && bUnindexed==0 ){ + sqlite3_bind_value(pInsert, pConfig->nCol + i, + sqlite3_column_value(p->pSavedRow, pConfig->nCol + i - 1) + ); + } + }else if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + const char *pLoc = 0; + int nText = 0; + int nLoc = 0; + assert( pConfig->bLocale ); + + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pInsert, i, pText, nText, SQLITE_TRANSIENT); + if( bUnindexed==0 ){ + int iLoc = pConfig->nCol + i; + sqlite3_bind_text(pInsert, iLoc, pLoc, nLoc, SQLITE_TRANSIENT); + } + } + + continue; + } + + rc = sqlite3_bind_value(pInsert, i, pVal); + } } if( rc==SQLITE_OK ){ sqlite3_step(pInsert); @@ -253523,14 +258638,38 @@ static int sqlite3Fts5StorageIndexInsert( for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_value_text(apVal[ctx.iCol+2]); - int nText = sqlite3_value_bytes(apVal[ctx.iCol+2]); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pText in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = apVal[ctx.iCol+2]; + if( p->pSavedRow && sqlite3_value_nochange(pVal) ){ + pVal = sqlite3_column_value(p->pSavedRow, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(p->pSavedRow, iCol); + nLoc = sqlite3_column_bytes(p->pSavedRow, iCol); + } + }else{ + pVal = apVal[ctx.iCol+2]; + } + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, pText, nText, (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253694,29 +258833,61 @@ static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg){ rc = sqlite3Fts5TermsetNew(&ctx.pTermset); } for(i=0; rc==SQLITE_OK && inCol; i++){ - if( pConfig->abUnindexed[i] ) continue; - ctx.iCol = i; - ctx.szCol = 0; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - if( rc==SQLITE_OK ){ - const char *zText = (const char*)sqlite3_column_text(pScan, i+1); - int nText = sqlite3_column_bytes(pScan, i+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageIntegrityCallback - ); - } - if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ - rc = FTS5_CORRUPT; - } - aTotalSize[i] += ctx.szCol; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + if( pConfig->abUnindexed[i]==0 ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + sqlite3_value *pVal = sqlite3_column_value(pScan, i+1); + + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue( + pVal, &pText, &nText, &pLoc, &nLoc + ); + }else{ + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = i + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + ctx.iCol = i; + ctx.szCol = 0; + + if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageIntegrityCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } + + /* If this is not a columnsize=0 database, check that the number + ** of tokens in the value matches the aColSize[] value read from + ** the %_docsize table. */ + if( rc==SQLITE_OK + && pConfig->bColumnsize + && ctx.szCol!=aColSize[i] + ){ + rc = FTS5_CORRUPT; + } + aTotalSize[i] += ctx.szCol; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; + } } } sqlite3Fts5TermsetFree(ctx.pTermset); @@ -254023,7 +259194,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && i=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + while( zInpTokenizer ){ - p->tokenizer.xDelete(p->pTokenizer); + p->tokenizer_v2.xDelete(p->pTokenizer); } sqlite3_free(p); } @@ -254531,6 +259699,7 @@ static int fts5PorterCreate( PorterTokenizer *pRet; void *pUserdata = 0; const char *zBase = "unicode61"; + fts5_tokenizer_v2 *pV2 = 0; if( nArg>0 ){ zBase = azArg[0]; @@ -254539,14 +259708,15 @@ static int fts5PorterCreate( pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer)); if( pRet ){ memset(pRet, 0, sizeof(PorterTokenizer)); - rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer); + rc = pApi->xFindTokenizer_v2(pApi, zBase, &pUserdata, &pV2); }else{ rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ int nArg2 = (nArg>0 ? nArg-1 : 0); - const char **azArg2 = (nArg2 ? &azArg[1] : 0); - rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer); + const char **az2 = (nArg2 ? &azArg[1] : 0); + memcpy(&pRet->tokenizer_v2, pV2, sizeof(fts5_tokenizer_v2)); + rc = pRet->tokenizer_v2.xCreate(pUserdata, az2, nArg2, &pRet->pTokenizer); } if( rc!=SQLITE_OK ){ @@ -255197,6 +260367,7 @@ static int fts5PorterTokenize( void *pCtx, int flags, const char *pText, int nText, + const char *pLoc, int nLoc, int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd) ){ PorterTokenizer *p = (PorterTokenizer*)pTokenizer; @@ -255204,8 +260375,8 @@ static int fts5PorterTokenize( sCtx.xToken = xToken; sCtx.pCtx = pCtx; sCtx.aBuf = p->aBuf; - return p->tokenizer.xTokenize( - p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb + return p->tokenizer_v2.xTokenize( + p->pTokenizer, (void*)&sCtx, flags, pText, nText, pLoc, nLoc, fts5PorterCb ); } @@ -255235,41 +260406,46 @@ static int fts5TriCreate( Fts5Tokenizer **ppOut ){ int rc = SQLITE_OK; - TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew)); + TrigramTokenizer *pNew = 0; UNUSED_PARAM(pUnused); - if( pNew==0 ){ - rc = SQLITE_NOMEM; + if( nArg%2 ){ + rc = SQLITE_ERROR; }else{ int i; - pNew->bFold = 1; - pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && ibFold = 1; + pNew->iFoldParam = 0; + + for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); + } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ - pNew->bFold = (zArg[0]=='0'); - } - }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ - if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ rc = SQLITE_ERROR; - }else{ - pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; } - }else{ - rc = SQLITE_ERROR; } - } - if( iiFoldParam!=0 && pNew->bFold==0 ){ - rc = SQLITE_ERROR; - } + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } - if( rc!=SQLITE_OK ){ - fts5TriDelete((Fts5Tokenizer*)pNew); - pNew = 0; + if( rc!=SQLITE_OK ){ + fts5TriDelete((Fts5Tokenizer*)pNew); + pNew = 0; + } } } *ppOut = (Fts5Tokenizer*)pNew; @@ -255292,8 +260468,8 @@ static int fts5TriTokenize( char *zOut = aBuf; int ii; const unsigned char *zIn = (const unsigned char*)pText; - const unsigned char *zEof = &zIn[nText]; - u32 iCode; + const unsigned char *zEof = (zIn ? &zIn[nText] : 0); + u32 iCode = 0; int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); @@ -255302,8 +260478,8 @@ static int fts5TriTokenize( for(ii=0; ii<3; ii++){ do { aStart[ii] = zIn - (const unsigned char*)pText; + if( zIn>=zEof ) return SQLITE_OK; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) return SQLITE_OK; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); WRITE_UTF8(zOut, iCode); @@ -255324,8 +260500,11 @@ static int fts5TriTokenize( /* Read characters from the input up until the first non-diacritic */ do { iNext = zIn - (const unsigned char*)pText; + if( zIn>=zEof ){ + iCode = 0; + break; + } READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); @@ -255374,6 +260553,16 @@ static int sqlite3Fts5TokenizerPattern( return FTS5_PATTERN_NONE; } +/* +** Return true if the tokenizer described by p->azArg[] is the trigram +** tokenizer. This tokenizer needs to be loaded before xBestIndex is +** called for the first time in order to correctly handle LIKE/GLOB. +*/ +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig *p){ + return (p->nArg>=1 && 0==sqlite3_stricmp(p->azArg[0], "trigram")); +} + + /* ** Register all built-in tokenizers with FTS5. */ @@ -255384,7 +260573,6 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ } aBuiltin[] = { { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}}, { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }}, - { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }}, { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}}, }; @@ -255399,7 +260587,20 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ 0 ); } - + if( rc==SQLITE_OK ){ + fts5_tokenizer_v2 sPorter = { + 2, + fts5PorterCreate, + fts5PorterDelete, + fts5PorterTokenize + }; + rc = pApi->xCreateTokenizer_v2(pApi, + "porter", + (void*)pApi, + &sPorter, + 0 + ); + } return rc; } @@ -255769,6 +260970,9 @@ static int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){ default: return 1; } break; + + default: + return 1; } return 0; } @@ -256181,7 +261385,6 @@ static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){ aAscii[0] = 0; /* 0x00 is never a token character */ } - /* ** 2015 May 30 ** @@ -256593,6 +261796,7 @@ struct Fts5VocabCursor { int nLeTerm; /* Size of zLeTerm in bytes */ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */ + int colUsed; /* Copy of sqlite3_index_info.colUsed */ /* These are used by 'col' tables only */ int iCol; @@ -256619,9 +261823,11 @@ struct Fts5VocabCursor { /* ** Bits for the mask used as the idxNum value by xBestIndex/xFilter. */ -#define FTS5_VOCAB_TERM_EQ 0x01 -#define FTS5_VOCAB_TERM_GE 0x02 -#define FTS5_VOCAB_TERM_LE 0x04 +#define FTS5_VOCAB_TERM_EQ 0x0100 +#define FTS5_VOCAB_TERM_GE 0x0200 +#define FTS5_VOCAB_TERM_LE 0x0400 + +#define FTS5_VOCAB_COLUSED_MASK 0xFF /* @@ -256719,12 +261925,12 @@ static int fts5VocabInitVtab( *pzErr = sqlite3_mprintf("wrong number of vtable arguments"); rc = SQLITE_ERROR; }else{ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ const char *zDb = bDb ? argv[3] : argv[1]; const char *zTab = bDb ? argv[4] : argv[3]; const char *zType = bDb ? argv[5] : argv[4]; - int nDb = (int)strlen(zDb)+1; - int nTab = (int)strlen(zTab)+1; + i64 nDb = strlen(zDb)+1; + i64 nTab = strlen(zTab)+1; int eType = 0; rc = fts5VocabTableType(zType, pzErr, &eType); @@ -256798,11 +262004,13 @@ static int fts5VocabBestIndexMethod( int iTermEq = -1; int iTermGe = -1; int iTermLe = -1; - int idxNum = 0; + int idxNum = (int)pInfo->colUsed; int nArg = 0; UNUSED_PARAM(pUnused); + assert( (pInfo->colUsed & FTS5_VOCAB_COLUSED_MASK)==pInfo->colUsed ); + for(i=0; inConstraint; i++){ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i]; if( p->usable==0 ) continue; @@ -256894,7 +262102,7 @@ static int fts5VocabOpenMethod( if( rc==SQLITE_OK ){ pVTab->zErrMsg = sqlite3_mprintf( "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl - ); + ); rc = SQLITE_ERROR; } }else{ @@ -257054,9 +262262,19 @@ static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){ switch( pTab->eType ){ case FTS5_VOCAB_ROW: - if( eDetail==FTS5_DETAIL_FULL ){ - while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){ - pCsr->aCnt[0]++; + /* Do not bother counting the number of instances if the "cnt" + ** column is not being read (according to colUsed). */ + if( eDetail==FTS5_DETAIL_FULL && (pCsr->colUsed & 0x04) ){ + while( iPosaCnt[] */ + pCsr->aCnt[0]++; + } } } pCsr->aDoc[0]++; @@ -257154,6 +262372,7 @@ static int fts5VocabFilterMethod( if( idxNum & FTS5_VOCAB_TERM_EQ ) pEq = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_GE ) pGe = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++]; + pCsr->colUsed = (idxNum & FTS5_VOCAB_COLUSED_MASK); if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); @@ -257321,7 +262540,7 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ } - +/* Here ends the fts5.c composite file. */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */ /************** End of fts5.c ************************************************/ @@ -257677,363 +262896,9 @@ SQLITE_API int sqlite3_stmt_init( /************** End of stmt.c ************************************************/ /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } +#endif /* SQLITE_AMALGAMATION */ /************************** End of sqlite3.c ******************************/ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the bulk of the implementation of the -** user-authentication extension feature. Some parts of the user- -** authentication code are contained within the SQLite core (in the -** src/ subdirectory of the main source code tree) but those parts -** that could reasonable be separated out are moved into this file. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation, then add the SQLITE_USER_AUTHENTICATION -** compile-time option. See the user-auth.txt file in the same source -** directory as this file for additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION -#ifndef SQLITEINT_H -# include "sqliteInt.h" -#endif - -/* -** Prepare an SQL statement for use by the user authentication logic. -** Return a pointer to the prepared statement on success. Return a -** NULL pointer if there is an error of any kind. -*/ -static sqlite3_stmt *sqlite3UserAuthPrepare( - sqlite3 *db, - const char *zFormat, - ... -){ - sqlite3_stmt *pStmt; - char *zSql; - int rc; - va_list ap; - u64 savedFlags = db->flags; - - va_start(ap, zFormat); - zSql = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - if( zSql==0 ) return 0; - db->flags |= SQLITE_WriteSchema; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - db->flags = savedFlags; - sqlite3_free(zSql); - if( rc ){ - sqlite3_finalize(pStmt); - pStmt = 0; - } - return pStmt; -} - -/* -** Check to see if the sqlite_user table exists in database zDb. -*/ -static int userTableExists(sqlite3 *db, const char *zDb){ - int rc; - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - if( db->init.busy==0 ){ - char *zErr = 0; - sqlite3Init(db, &zErr); - sqlite3DbFree(db, zErr); - } - rc = sqlite3FindTable(db, "sqlite_user", zDb)!=0; - sqlite3BtreeLeaveAll(db); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Check to see if database zDb has a "sqlite_user" table and if it does -** whether that table can authenticate zUser with nPw,zPw. Write one of -** the UAUTH_* user authorization level codes into *peAuth and return a -** result code. -*/ -static int userAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - sqlite3_stmt *pStmt; - int rc; - - *peAuth = UAUTH_Unknown; - if( !userTableExists(db, "main") ){ - *peAuth = UAUTH_Admin; /* No sqlite_user table. Everybody is admin. */ - return SQLITE_OK; - } - if( db->auth.zAuthUser==0 ){ - *peAuth = UAUTH_Fail; - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "SELECT pw=sqlite_crypt(?1,pw), isAdmin FROM \"%w\".sqlite_user" - " WHERE uname=?2", zDb); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_bind_blob(pStmt, 1, db->auth.zAuthPW, db->auth.nAuthPW,SQLITE_STATIC); - sqlite3_bind_text(pStmt, 2, db->auth.zAuthUser, -1, SQLITE_STATIC); - rc = sqlite3_step(pStmt); - if( rc==SQLITE_ROW && sqlite3_column_int(pStmt,0) ){ - *peAuth = sqlite3_column_int(pStmt, 1) + UAUTH_User; - }else{ - *peAuth = UAUTH_Fail; - } - return sqlite3_finalize(pStmt); -} -int sqlite3UserAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - int rc; - u8 savedAuthLevel; - assert( zDb!=0 ); - assert( peAuth!=0 ); - savedAuthLevel = db->auth.authLevel; - db->auth.authLevel = UAUTH_Admin; - rc = userAuthCheckLogin(db, zDb, peAuth); - db->auth.authLevel = savedAuthLevel; - return rc; -} - -/* -** If the current authLevel is UAUTH_Unknown, the take actions to figure -** out what authLevel should be -*/ -void sqlite3UserAuthInit(sqlite3 *db){ - if( db->auth.authLevel==UAUTH_Unknown ){ - u8 authLevel = UAUTH_Fail; - sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - if( authLevelflags &= ~SQLITE_WriteSchema; - } -} - -/* -** Implementation of the sqlite_crypt(X,Y) function. -** -** If Y is NULL then generate a new hash for password X and return that -** hash. If Y is not null, then generate a hash for password X using the -** same salt as the previous hash Y and return the new hash. -*/ -void sqlite3CryptFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - const char *zIn; - int nIn, ii; - u8 *zOut; - char zSalt[8]; - zIn = sqlite3_value_blob(argv[0]); - nIn = sqlite3_value_bytes(argv[0]); - if( sqlite3_value_type(argv[1])==SQLITE_BLOB - && sqlite3_value_bytes(argv[1])==nIn+sizeof(zSalt) - ){ - memcpy(zSalt, sqlite3_value_blob(argv[1]), sizeof(zSalt)); - }else{ - sqlite3_randomness(sizeof(zSalt), zSalt); - } - zOut = sqlite3_malloc( nIn+sizeof(zSalt) ); - if( zOut==0 ){ - sqlite3_result_error_nomem(context); - }else{ - memcpy(zOut, zSalt, sizeof(zSalt)); - for(ii=0; iiauth.authLevel = UAUTH_Unknown; - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); - memset(&db->auth, 0, sizeof(db->auth)); - db->auth.zAuthUser = sqlite3_mprintf("%s", zUsername); - if( db->auth.zAuthUser==0 ) return SQLITE_NOMEM; - db->auth.zAuthPW = sqlite3_malloc( nPW+1 ); - if( db->auth.zAuthPW==0 ) return SQLITE_NOMEM; - memcpy(db->auth.zAuthPW,zPW,nPW); - db->auth.nAuthPW = nPW; - rc = sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - sqlite3ExpirePreparedStatements(db, 0); - if( rc ){ - return rc; /* OOM error, I/O error, etc. */ - } - if( authLevelauth.authLevelauth.zAuthUser==0 ){ - assert( isAdmin!=0 ); - sqlite3_user_authenticate(db, zUsername, aPW, nPW); - } - return SQLITE_OK; -} - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* Modified password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -){ - sqlite3_stmt *pStmt; - int rc; - u8 authLevel; - - authLevel = db->auth.authLevel; - if( authLevelauth.zAuthUser, zUsername)!=0 ){ - if( db->auth.authLevelauth.authLevel = UAUTH_Admin; - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be modified does not exist */ - }else{ - pStmt = sqlite3UserAuthPrepare(db, - "UPDATE sqlite_user SET isAdmin=%d, pw=sqlite_crypt(?1,NULL)" - " WHERE uname=%Q", isAdmin, zUsername); - if( pStmt==0 ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3_bind_blob(pStmt, 1, aPW, nPW, SQLITE_STATIC); - sqlite3_step(pStmt); - rc = sqlite3_finalize(pStmt); - } - } - db->auth.authLevel = authLevel; - return rc; -} - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -){ - sqlite3_stmt *pStmt; - if( db->auth.authLevelauth.zAuthUser, zUsername)==0 ){ - /* Cannot delete self */ - return SQLITE_AUTH; - } - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be deleted does not exist */ - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "DELETE FROM sqlite_user WHERE uname=%Q", zUsername); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_step(pStmt); - return sqlite3_finalize(pStmt); -} - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index d67a4adb6..c34235d84 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -134,7 +134,7 @@ extern "C" { ** ** Since [version 3.6.18] ([dateof:3.6.18]), ** SQLite source code has been stored in the -** Fossil configuration management +** Fossil configuration management ** system. ^The SQLITE_SOURCE_ID macro evaluates to ** a string which identifies a particular check-in of SQLite ** within its configuration management system. ^The SQLITE_SOURCE_ID @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.46.1" -#define SQLITE_VERSION_NUMBER 3046001 -#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" +#define SQLITE_VERSION "3.50.4" +#define SQLITE_VERSION_NUMBER 3050004 +#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -653,6 +653,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -669,6 +676,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -773,8 +781,8 @@ struct sqlite3_file { ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. +** PENDING, or EXCLUSIVE lock on the file. It returns, via its output +** pointer parameter, true if such a lock exists and false otherwise. ** ** The xFileControl() method is a generic interface that allows custom ** VFS implementations to directly control an open file using the @@ -815,6 +823,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1092,6 +1101,11 @@ struct sqlite3_io_methods { ** pointed to by the pArg argument. This capability is used during testing ** and only needs to be supported when SQLITE_TEST is defined. ** +**
  • [[SQLITE_FCNTL_NULL_IO]] +** The [SQLITE_FCNTL_NULL_IO] opcode sets the low-level file descriptor +** or file handle for the [sqlite3_file] object such that it will no longer +** read or write to the database file. +** **
  • [[SQLITE_FCNTL_WAL_BLOCK]] ** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might ** be advantageous to block on the next WAL lock if the lock is not immediately @@ -1150,6 +1164,12 @@ struct sqlite3_io_methods { ** the value that M is to be set to. Before returning, the 32-bit signed ** integer is overwritten with the previous value of M. ** +**
  • [[SQLITE_FCNTL_BLOCK_ON_CONNECT]] +** The [SQLITE_FCNTL_BLOCK_ON_CONNECT] opcode is used to configure the +** VFS to block when taking a SHARED lock to connect to a wal mode database. +** This is used to implement the functionality associated with +** SQLITE_SETLK_BLOCK_ON_CONNECT. +** **
  • [[SQLITE_FCNTL_DATA_VERSION]] ** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to ** a database file. The argument is a pointer to a 32-bit unsigned integer. @@ -1245,6 +1265,8 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 +#define SQLITE_FCNTL_NULL_IO 43 +#define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1975,13 +1997,16 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_LOOKASIDE]]
    SQLITE_CONFIG_LOOKASIDE
    **
    ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine -** the default size of lookaside memory on each [database connection]. +** the default size of [lookaside memory] on each [database connection]. ** The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE -** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** option to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
    +** size of each lookaside buffer slot ("sz") and the second is the number of +** slots allocated to each database connection ("cnt").)^ +** ^(SQLITE_CONFIG_LOOKASIDE sets the default lookaside size. +** The [SQLITE_DBCONFIG_LOOKASIDE] option to [sqlite3_db_config()] can +** be used to change the lookaside configuration on individual connections.)^ +** The [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to change the +** default lookaside configuration at compile-time. +** ** ** [[SQLITE_CONFIG_PCACHE2]]
    SQLITE_CONFIG_PCACHE2
    **
    ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is @@ -2197,7 +2222,15 @@ struct sqlite3_mem_methods { ** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. +** can be passed as the second parameter to the [sqlite3_db_config()] interface. +** +** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** variable number of parameters, though always at least two. The number of +** parameters passed into sqlite3_db_config() depends on which of these +** constants is given as the second parameter. This documentation page +** refers to parameters beyond the second as "arguments". Thus, when this +** page says "the N-th argument" it means "the N-th parameter past the +** configuration option" or "the (N+2)-th parameter to sqlite3_db_config()". ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications @@ -2209,31 +2242,57 @@ struct sqlite3_mem_methods { **
    ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    -**
    ^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a +**
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the +** configuration of the [lookaside memory allocator] within a database +** connection. +** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not +** in the [DBCONFIG arguments|usual format]. +** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, +** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE +** should have a total of five parameters. +**
      +**
    1. The first argument ("buf") is a ** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** The first argument may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. +**

    2. The second argument ("sz") is the +** size of each lookaside buffer slot. Lookaside is disabled if "sz" +** is less than 8. The "sz" argument should be a multiple of 8 less than +** 65536. If "sz" does not meet this constraint, it is reduced in size until +** it does. +**

    3. The third argument ("cnt") is the number of slots. Lookaside is disabled +** if "cnt"is less than 1. The "cnt" value will be reduced, if necessary, so +** that the product of "sz" and "cnt" does not exceed 2,147,418,112. The "cnt" +** parameter is usually chosen so that the product of "sz" and "cnt" is less +** than 1,000,000. +**

    +**

    If the "buf" argument is not NULL, then it must +** point to a memory buffer with a size that is greater than +** or equal to the product of "sz" and "cnt". +** The buffer must be aligned to an 8-byte boundary. +** The lookaside memory ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. +** when the value returned by [SQLITE_DBSTATUS_LOOKASIDE_USED] is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^

    +** [SQLITE_BUSY]. +** If the "buf" argument is NULL and an attempt +** to allocate memory based on "sz" and "cnt" fails, then +** lookaside is silently disabled. +**

    +** The [SQLITE_CONFIG_LOOKASIDE] configuration option can be used to set the +** default lookaside configuration at initialization. The +** [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to set the default lookaside +** configuration at compile-time. Typical values for lookaside are 1200 for +** "sz" and 40 to 100 for "cnt". +**

    ** ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **
    SQLITE_DBCONFIG_ENABLE_FKEY
    **
    ^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. +** [foreign key constraints]. This is the same setting that is +** enabled or disabled by the [PRAGMA foreign_keys] statement. ** The first argument is an integer which is 0 to disable FK enforcement, ** positive to enable FK enforcement or negative to leave FK enforcement ** unchanged. The second parameter is a pointer to an integer into which @@ -2255,13 +2314,13 @@ struct sqlite3_mem_methods { **

    Originally this option disabled all triggers. ^(However, since ** SQLite version 3.35.0, TEMP triggers are still allowed even if ** this option is off. So, in other words, this option now only disables -** triggers in the main database schema or in the schemas of ATTACH-ed +** triggers in the main database schema or in the schemas of [ATTACH]-ed ** databases.)^

    ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **
    SQLITE_DBCONFIG_ENABLE_VIEW
    **
    ^This option is used to enable or disable [CREATE VIEW | views]. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable views, ** positive to enable views or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which @@ -2280,7 +2339,7 @@ struct sqlite3_mem_methods { **
    ^This option is used to enable or disable the ** [fts3_tokenizer()] function which is part of the ** [FTS3] full-text search engine extension. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable fts3_tokenizer() or ** positive to enable fts3_tokenizer() or negative to leave the setting ** unchanged. @@ -2295,7 +2354,7 @@ struct sqlite3_mem_methods { ** interface independently of the [load_extension()] SQL function. ** The [sqlite3_enable_load_extension()] API enables or disables both the ** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. -** There should be two additional arguments. +** There must be two additional arguments. ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. @@ -2309,23 +2368,30 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_MAINDBNAME]]
    SQLITE_DBCONFIG_MAINDBNAME
    **
    ^This option is used to change the name of the "main" database -** schema. ^The sole argument is a pointer to a constant UTF8 string -** which will become the new schema name in place of "main". ^SQLite -** does not make a copy of the new main schema name string, so the application -** must ensure that the argument passed into this DBCONFIG option is unchanged -** until after the database connection closes. +** schema. This option does not follow the +** [DBCONFIG arguments|usual SQLITE_DBCONFIG argument format]. +** This option takes exactly one additional argument so that the +** [sqlite3_db_config()] call has a total of three parameters. The +** extra argument must be a pointer to a constant UTF8 string which +** will become the new schema name in place of "main". ^SQLite does +** not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into SQLITE_DBCONFIG MAINDBNAME +** is unchanged until after the database connection closes. **
    ** ** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]] **
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    -**
    Usually, when a database in wal mode is closed or detached from a -** database handle, SQLite checks if this will mean that there are now no -** connections at all to the database. If so, it performs a checkpoint -** operation before closing the connection. This option may be used to -** override this behavior. The first parameter passed to this operation -** is an integer - positive to disable checkpoints-on-close, or zero (the -** default) to enable them, and negative to leave the setting unchanged. -** The second parameter is a pointer to an integer +**
    Usually, when a database in [WAL mode] is closed or detached from a +** database handle, SQLite checks if if there are other connections to the +** same database, and if there are no other database connection (if the +** connection being closed is the last open connection to the database), +** then SQLite performs a [checkpoint] before closing the connection and +** deletes the WAL file. The SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE option can +** be used to override that behavior. The first argument passed to this +** operation (the third parameter to [sqlite3_db_config()]) is an integer +** which is positive to disable checkpoints-on-close, or zero (the default) +** to enable them, and negative to leave the setting unchanged. +** The second argument (the fourth parameter) is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. **
    @@ -2486,7 +2552,7 @@ struct sqlite3_mem_methods { ** statistics. For statistics to be collected, the flag must be set on ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) -** by default. This option takes two arguments: an integer and a pointer to +** by default.

    This option takes two arguments: an integer and a pointer to ** an integer.. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after @@ -2500,7 +2566,7 @@ struct sqlite3_mem_methods { ** in which tables and indexes are scanned so that the scans start at the end ** and work toward the beginning rather than starting at the beginning and ** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the -** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** same as setting [PRAGMA reverse_unordered_selects].

    This option takes ** two arguments which are an integer and a pointer to an integer. The first ** argument is 1, 0, or -1 to enable, disable, or leave unchanged the ** reverse scan order flag, respectively. If the second argument is not NULL, @@ -2509,7 +2575,76 @@ struct sqlite3_mem_methods { ** first argument. ** ** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE]] +**

    SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE option enables or disables +** the ability of the [ATTACH DATABASE] SQL command to create a new database +** file if the database filed named in the ATTACH command does not already +** exist. This ability of ATTACH to create a new database is enabled by +** default. Applications can disable or reenable the ability for ATTACH to +** create new database files using this DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the attach-create flag, respectively. If the second +** argument is not NULL, then 0 or 1 is written into the integer that the +** second argument points to depending on if the attach-create flag is set +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE]] +**
    SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the +** ability of the [ATTACH DATABASE] SQL command to open a database for writing. +** This capability is enabled by default. Applications can disable or +** reenable this capability using the current DBCONFIG option. If the +** the this capability is disabled, the [ATTACH] command will still work, +** but the database will be opened read-only. If this option is disabled, +** then the ability to create a new database using [ATTACH] is also disabled, +** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] +** option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to ATTACH another database for writing, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer to which the second argument points, depending on whether +** the ability to ATTACH a read/write database is enabled or disabled +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_COMMENTS]] +**
    SQLITE_DBCONFIG_ENABLE_COMMENTS
    +**
    The SQLITE_DBCONFIG_ENABLE_COMMENTS option enables or disables the +** ability to include comments in SQL text. Comments are enabled by default. +** An application can disable or reenable comments in SQL text using this +** DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to use comments in SQL text, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer that the second argument points to depending on if +** comments are allowed in SQL text after processing the first argument. +**

    +** ** +** +** [[DBCONFIG arguments]]

    Arguments To SQLITE_DBCONFIG Options

    +** +**

    Most of the SQLITE_DBCONFIG options take two arguments, so that the +** overall call to [sqlite3_db_config()] has a total of four parameters. +** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The second argument is a pointer to an integer. If the first argument is 1, +** then the option becomes enabled. If the first integer argument is 0, then the +** option is disabled. If the first argument is -1, then the option setting +** is unchanged. The second argument, the pointer to an integer, may be NULL. +** If the second argument is not NULL, then a value of 0 or 1 is written into +** the integer to which the second argument points, depending on whether the +** setting is disabled or enabled after applying any changes specified by +** the first argument. +** +**

    While most SQLITE_DBCONFIG options use the argument format +** described in the previous paragraph, the [SQLITE_DBCONFIG_MAINDBNAME] +** and [SQLITE_DBCONFIG_LOOKASIDE] options are different. See the +** documentation of those exceptional options for details. */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ @@ -2531,7 +2666,10 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ #define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ #define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE 1020 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE 1021 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_COMMENTS 1022 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1022 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2623,10 +2761,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. ** The two functions are identical except for the type of the return value -** and that if the number of rows modified by the most recent INSERT, UPDATE +** and that if the number of rows modified by the most recent INSERT, UPDATE, ** or DELETE is greater than the maximum value supported by type "int", then ** the return value of sqlite3_changes() is undefined. ^Executing any other ** type of SQL statement does not modify the value returned by these functions. +** For the purposes of this interface, a CREATE TABLE AS SELECT statement +** does not count as an INSERT, UPDATE or DELETE statement and hence the rows +** added to the new table by the CREATE TABLE AS SELECT statement are not +** counted. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -2881,6 +3023,44 @@ SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); */ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); +/* +** CAPI3REF: Set the Setlk Timeout +** METHOD: sqlite3 +** +** This routine is only useful in SQLITE_ENABLE_SETLK_TIMEOUT builds. If +** the VFS supports blocking locks, it sets the timeout in ms used by +** eligible locks taken on wal mode databases by the specified database +** handle. In non-SQLITE_ENABLE_SETLK_TIMEOUT builds, or if the VFS does +** not support blocking locks, this function is a no-op. +** +** Passing 0 to this function disables blocking locks altogether. Passing +** -1 to this function requests that the VFS blocks for a long time - +** indefinitely if possible. The results of passing any other negative value +** are undefined. +** +** Internally, each SQLite database handle store two timeout values - the +** busy-timeout (used for rollback mode databases, or if the VFS does not +** support blocking locks) and the setlk-timeout (used for blocking locks +** on wal-mode databases). The sqlite3_busy_timeout() method sets both +** values, this function sets only the setlk-timeout value. Therefore, +** to configure separate busy-timeout and setlk-timeout values for a single +** database handle, call sqlite3_busy_timeout() followed by this function. +** +** Whenever the number of connections to a wal mode database falls from +** 1 to 0, the last connection takes an exclusive lock on the database, +** then checkpoints and deletes the wal file. While it is doing this, any +** new connection that tries to read from the database fails with an +** SQLITE_BUSY error. Or, if the SQLITE_SETLK_BLOCK_ON_CONNECT flag is +** passed to this API, the new connection blocks until the exclusive lock +** has been released. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); + +/* +** CAPI3REF: Flags for sqlite3_setlk_timeout() +*/ +#define SQLITE_SETLK_BLOCK_ON_CONNECT 0x01 + /* ** CAPI3REF: Convenience Routines For Running Queries ** METHOD: sqlite3 @@ -3571,8 +3751,8 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** ** [[OPEN_EXRESCODE]] ^(

    [SQLITE_OPEN_EXRESCODE]
    **
    The database connection comes up in "extended result code mode". -** In other words, the database behaves has if -** [sqlite3_extended_result_codes(db,1)] where called on the database +** In other words, the database behaves as if +** [sqlite3_extended_result_codes(db,1)] were called on the database ** connection as soon as the connection is created. In addition to setting ** the extended result code mode, this flag also causes [sqlite3_open_v2()] ** to return an extended result code.
    @@ -3900,7 +4080,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of ** database filename D with corresponding journal file J and WAL file W and -** with N URI parameters key/values pairs in the array P. The result from +** an array P of N URI Key/Value pairs. The result from ** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that ** is safe to pass to routines like: **
      @@ -4186,11 +4366,22 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); **
      The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler ** to return an error (error code SQLITE_ERROR) if the statement uses ** any virtual tables. +** +** [[SQLITE_PREPARE_DONT_LOG]]
      SQLITE_PREPARE_DONT_LOG
      +**
      The SQLITE_PREPARE_DONT_LOG flag prevents SQL compiler +** errors from being sent to the error log defined by +** [SQLITE_CONFIG_LOG]. This can be used, for example, to do test +** compiles to see if some SQL syntax is well-formed, without generating +** messages on the global error log when it is not. If the test compile +** fails, the sqlite3_prepare_v3() call returns the same error indications +** with or without this flag; it just omits the call to [sqlite3_log()] that +** logs the error. ** */ #define SQLITE_PREPARE_PERSISTENT 0x01 #define SQLITE_PREPARE_NORMALIZE 0x02 #define SQLITE_PREPARE_NO_VTAB 0x04 +#define SQLITE_PREPARE_DONT_LOG 0x10 /* ** CAPI3REF: Compiling An SQL Statement @@ -4223,13 +4414,17 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** and sqlite3_prepare16_v3() use UTF-16. ** ** ^If the nByte argument is negative, then zSql is read up to the -** first zero terminator. ^If nByte is positive, then it is the -** number of bytes read from zSql. ^If nByte is zero, then no prepared +** first zero terminator. ^If nByte is positive, then it is the maximum +** number of bytes read from zSql. When nByte is positive, zSql is read +** up to the first zero terminator or until the nByte bytes have been read, +** whichever comes first. ^If nByte is zero, then no prepared ** statement is generated. ** If the caller knows that the supplied string is nul-terminated, then ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. +** Note that nByte measure the length of the input in bytes, not +** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte ** past the end of the first SQL statement in zSql. These routines only @@ -4566,7 +4761,7 @@ typedef struct sqlite3_context sqlite3_context; ** METHOD: sqlite3_stmt ** ** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following +** literals may be replaced by a [parameter] that matches one of the following ** templates: ** **
        @@ -4611,7 +4806,7 @@ typedef struct sqlite3_context sqlite3_context; ** ** [[byte-order determination rules]] ^The byte-order of ** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF) -** found in first character, which is removed, or in the absence of a BOM +** found in the first character, which is removed, or in the absence of a BOM ** the byte order is the native byte order of the host ** machine for sqlite3_bind_text16() or the byte order specified in ** the 6th parameter for sqlite3_bind_text64().)^ @@ -4631,7 +4826,7 @@ typedef struct sqlite3_context sqlite3_context; ** or sqlite3_bind_text16() or sqlite3_bind_text64() then ** that parameter must be the byte offset ** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occurs at byte offsets less than +** terminated. If any NUL characters occur at byte offsets less than ** the value of the fourth parameter then the resulting string value will ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. @@ -4843,7 +5038,7 @@ SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); ** METHOD: sqlite3_stmt ** ** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in +** table column that is the origin of a particular result column in a ** [SELECT] statement. ** ^The name of the database or table or column can be returned as ** either a UTF-8 or UTF-16 string. ^The _database_ routines return @@ -4981,7 +5176,7 @@ SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); ** other than [SQLITE_ROW] before any subsequent invocation of ** sqlite3_step(). Failure to reset the prepared statement using ** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1]), ** sqlite3_step() began ** calling [sqlite3_reset()] automatically in this circumstance rather ** than returning [SQLITE_MISUSE]. This is not considered a compatibility @@ -5412,8 +5607,8 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be -** used inside of triggers, view, CHECK constraints, or other elements of -** the database schema. This flags is especially recommended for SQL +** used inside of triggers, views, CHECK constraints, or other elements of +** the database schema. This flag is especially recommended for SQL ** functions that have side effects or reveal internal application state. ** Without this flag, an attacker might be able to modify the schema of ** a database file to include invocations of the function with parameters @@ -5444,7 +5639,7 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** [user-defined window functions|available here]. ** ** ^(If the final parameter to sqlite3_create_function_v2() or -** sqlite3_create_window_function() is not NULL, then it is destructor for +** sqlite3_create_window_function() is not NULL, then it is the destructor for ** the application data pointer. The destructor is invoked when the function ** is deleted, either by being overloaded or when the database connection ** closes.)^ ^The destructor is also invoked if the call to @@ -5600,7 +5795,7 @@ SQLITE_API int sqlite3_create_window_function( ** This flag instructs SQLite to omit some corner-case optimizations that ** might disrupt the operation of the [sqlite3_value_subtype()] function, ** causing it to return zero rather than the correct subtype(). -** SQL functions that invokes [sqlite3_value_subtype()] should have this +** All SQL functions that invoke [sqlite3_value_subtype()] should have this ** property. If the SQLITE_SUBTYPE property is omitted, then the return ** value from [sqlite3_value_subtype()] might sometimes be zero even though ** a non-zero subtype was specified by the function argument expression. @@ -5616,6 +5811,15 @@ SQLITE_API int sqlite3_create_window_function( ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are ** incompatible with subtypes. +** +** [[SQLITE_SELFORDER1]]
        SQLITE_SELFORDER1
        +** The SQLITE_SELFORDER1 flag indicates that the function is an aggregate +** that internally orders the values provided to the first argument. The +** ordered-set aggregate SQL notation with a single ORDER BY term can be +** used to invoke this function. If the ordered-set aggregate notation is +** used on a function that lacks this flag, then an error is raised. Note +** that the ordered-set aggregate syntax is only available if SQLite is +** built using the -DSQLITE_ENABLE_ORDERED_SET_AGGREGATES compile-time option. **
        ** */ @@ -5624,6 +5828,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 #define SQLITE_RESULT_SUBTYPE 0x001000000 +#define SQLITE_SELFORDER1 0x002000000 /* ** CAPI3REF: Deprecated Functions @@ -5821,7 +6026,7 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. ** -** Every [application-defined SQL function] that invoke this interface +** Every [application-defined SQL function] that invokes this interface ** should include the [SQLITE_SUBTYPE] property in the text ** encoding argument when the function is [sqlite3_create_function|registered]. ** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() @@ -5834,7 +6039,7 @@ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); ** METHOD: sqlite3_value ** ** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] -** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** object V and returns a pointer to that copy. ^The [sqlite3_value] returned ** is a [protected sqlite3_value] object even if the input is not. ** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a ** memory allocation fails. ^If V is a [pointer value], then the result @@ -5872,7 +6077,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the +** determined by the N parameter on the first successful call. Changing the ** value of N in any subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory ** allocation.)^ Within the xFinal callback, it is customary to set @@ -6034,7 +6239,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** ** Security Warning: These interfaces should not be exposed in scripting ** languages or in other circumstances where it might be possible for an -** an attacker to invoke them. Any agent that can invoke these interfaces +** attacker to invoke them. Any agent that can invoke these interfaces ** can probably also take control of the process. ** ** Database connection client data is only available for SQLite @@ -6148,7 +6353,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur +** appear if the string were NUL terminated. If any NUL characters occur ** in the string at a byte offset that is less than the value of the 3rd ** parameter, then the resulting string will contain embedded NULs and the ** result of expressions operating on strings with embedded NULs is undefined. @@ -6206,7 +6411,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** string and preferably a string literal. The sqlite3_result_pointer() ** routine is part of the [pointer passing interface] added for SQLite 3.20.0. ** -** If these routines are called from within the different thread +** If these routines are called from within a different thread ** than the one containing the application-defined function that received ** the [sqlite3_context] pointer, the results are undefined. */ @@ -6612,7 +6817,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name -** for the N-th database on database connection D, or a NULL pointer of N is +** for the N-th database on database connection D, or a NULL pointer if N is ** out of range. An N value of 0 means the main database file. An N of 1 is ** the "temp" schema. Larger values of N correspond to various ATTACH-ed ** databases. @@ -6707,7 +6912,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_READ state means that the database is currently ** in a read transaction. Content has been read from the database file ** but nothing in the database file has changed. The transaction state -** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** will be advanced to SQLITE_TXN_WRITE if any changes occur and there are ** no other conflicting concurrent write transactions. The transaction ** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or ** [COMMIT].
        @@ -6716,7 +6921,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_WRITE state means that the database is currently ** in a write transaction. Content has been written to the database file ** but has not yet committed. The transaction state will change to -** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
        +** SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
      */ #define SQLITE_TXN_NONE 0 #define SQLITE_TXN_READ 1 @@ -6867,6 +7072,8 @@ SQLITE_API int sqlite3_autovacuum_pages( ** ** ^The second argument is a pointer to the function to invoke when a ** row is updated, inserted or deleted in a rowid table. +** ^The update hook is disabled by invoking sqlite3_update_hook() +** with a NULL pointer as the second parameter. ** ^The first argument to the callback is a copy of the third argument ** to sqlite3_update_hook(). ** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], @@ -6995,7 +7202,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** CAPI3REF: Impose A Limit On Heap Size ** ** These interfaces impose limits on the amount of heap memory that will be -** by all database connections within a single process. +** used by all database connections within a single process. ** ** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the ** soft limit on the amount of heap memory that may be allocated by SQLite. @@ -7053,7 +7260,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); **
    )^ ** ** The circumstances under which SQLite will enforce the heap limits may -** changes in future releases of SQLite. +** change in future releases of SQLite. */ SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N); @@ -7168,8 +7375,8 @@ SQLITE_API int sqlite3_table_column_metadata( ** ^The entry point is zProc. ** ^(zProc may be 0, in which case SQLite will try to come up with an ** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic +** If that does not work, it constructs a name "sqlite3_X_init" where +** X consists of the lower-case equivalent of all ASCII alphabetic ** characters in the filename from the last "/" to the first following ** "." and omitting any initial "lib".)^ ** ^The sqlite3_load_extension() interface returns @@ -7240,7 +7447,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three ** arguments and expects an integer result as if the signature of the -** entry point where as follows: +** entry point were as follows: ** **
     **    int xEntryPoint(
    @@ -7404,7 +7611,7 @@ struct sqlite3_module {
     ** virtual table and might not be checked again by the byte code.)^ ^(The
     ** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
     ** is left in its default setting of false, the constraint will always be
    -** checked separately in byte code.  If the omit flag is change to true, then
    +** checked separately in byte code.  If the omit flag is changed to true, then
     ** the constraint may or may not be checked in byte code.  In other words,
     ** when the omit flag is true there is no guarantee that the constraint will
     ** not be checked again using byte code.)^
    @@ -7428,9 +7635,11 @@ struct sqlite3_module {
     ** will be returned by the strategy.
     **
     ** The xBestIndex method may optionally populate the idxFlags field with a
    -** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
    -** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
    -** assumes that the strategy may visit at most one row.
    +** mask of SQLITE_INDEX_SCAN_* flags. One such flag is
    +** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN]
    +** output to show the idxNum as hex instead of as decimal.  Another flag is
    +** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will
    +** return at most one row.
     **
     ** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
     ** SQLite also assumes that if a call to the xUpdate() method is made as
    @@ -7494,7 +7703,9 @@ struct sqlite3_index_info {
     ** [sqlite3_index_info].idxFlags field to some combination of
     ** these bits.
     */
    -#define SQLITE_INDEX_SCAN_UNIQUE      1     /* Scan visits at most 1 row */
    +#define SQLITE_INDEX_SCAN_UNIQUE 0x00000001 /* Scan visits at most 1 row */
    +#define SQLITE_INDEX_SCAN_HEX    0x00000002 /* Display idxNum as hex */
    +                                            /* in EXPLAIN QUERY PLAN */
     
     /*
     ** CAPI3REF: Virtual Table Constraint Operator Codes
    @@ -7567,7 +7778,7 @@ struct sqlite3_index_info {
     ** the implementation of the [virtual table module].   ^The fourth
     ** parameter is an arbitrary client data pointer that is passed through
     ** into the [xCreate] and [xConnect] methods of the virtual table module
    -** when a new virtual table is be being created or reinitialized.
    +** when a new virtual table is being created or reinitialized.
     **
     ** ^The sqlite3_create_module_v2() interface has a fifth parameter which
     ** is a pointer to a destructor for the pClientData.  ^SQLite will
    @@ -7732,7 +7943,7 @@ typedef struct sqlite3_blob sqlite3_blob;
     ** in *ppBlob. Otherwise an [error code] is returned and, unless the error
     ** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
     ** the API is not misused, it is always safe to call [sqlite3_blob_close()]
    -** on *ppBlob after this function it returns.
    +** on *ppBlob after this function returns.
     **
     ** This function fails with SQLITE_ERROR if any of the following are true:
     ** 
      @@ -7852,7 +8063,7 @@ SQLITE_API int sqlite3_blob_close(sqlite3_blob *); ** ** ^Returns the size in bytes of the BLOB accessible via the ** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing +** incremental blob I/O routines can only read or overwrite existing ** blob content; they cannot change the size of a blob. ** ** This routine only works on a [BLOB handle] which has been created @@ -8002,7 +8213,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ^The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() ** routine returns NULL if it is unable to allocate the requested -** mutex. The argument to sqlite3_mutex_alloc() must one of these +** mutex. The argument to sqlite3_mutex_alloc() must be one of these ** integer constants: ** **
        @@ -8235,7 +8446,7 @@ SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** -** ^This interface returns a pointer the [sqlite3_mutex] object that +** ^This interface returns a pointer to the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. ** ^If the [threading mode] is Single-thread or Multi-thread then this @@ -8331,6 +8542,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ +#define SQLITE_TESTCTRL_GETOPT 16 #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ #define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17 #define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 @@ -8350,14 +8562,14 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 /* NOT USED */ #define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking ** ** These routines provide access to the set of SQL language keywords -** recognized by SQLite. Applications can uses these routines to determine +** recognized by SQLite. Applications can use these routines to determine ** whether or not a specific identifier needs to be escaped (for example, ** by enclosing in double-quotes) so as not to confuse the parser. ** @@ -8525,7 +8737,7 @@ SQLITE_API void sqlite3_str_reset(sqlite3_str*); ** content of the dynamic string under construction in X. The value ** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X ** and might be freed or altered by any subsequent method on the same -** [sqlite3_str] object. Applications must not used the pointer returned +** [sqlite3_str] object. Applications must not use the pointer returned by ** [sqlite3_str_value(X)] after any subsequent method call on the same ** object. ^Applications may change the content of the string returned ** by [sqlite3_str_value(X)] as long as they do not write into any bytes @@ -8611,7 +8823,7 @@ SQLITE_API int sqlite3_status64( ** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] ** buffer and where forced to overflow to [sqlite3_malloc()]. The ** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to +** were too large (they were larger than the "sz" parameter to ** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because ** no space was left in the page cache.)^ ** @@ -8695,28 +8907,29 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
        SQLITE_DBSTATUS_LOOKASIDE_HIT
        **
        This parameter returns the number of malloc attempts that were ** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
        )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] ** ^(
        SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
        -**
        This parameter returns the number malloc attempts that might have +**
        This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to the amount of ** memory requested being larger than the lookaside slot size. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
        )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] ** ^(
        SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
        -**
        This parameter returns the number malloc attempts that might have +**
        This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to all lookaside ** memory already being in use. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
        )^ ** ** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
        SQLITE_DBSTATUS_CACHE_USED
        **
        This parameter returns the approximate number of bytes of heap ** memory used by all pager caches associated with the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +**
        ** ** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] ** ^(
        SQLITE_DBSTATUS_CACHE_USED_SHARED
        @@ -8725,10 +8938,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** memory used by that pager cache is divided evenly between the attached ** connections.)^ In other words, if none of the pager caches associated ** with the database connection are shared, this request returns the same -** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** value as DBSTATUS_CACHE_USED. Or, if one or more of the pager caches are ** shared, the value returned by this call will be smaller than that returned ** by DBSTATUS_CACHE_USED. ^The highwater mark associated with -** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. ** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
        SQLITE_DBSTATUS_SCHEMA_USED
        **
        This parameter returns the approximate number of bytes of heap @@ -8738,6 +8951,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** schema memory is shared with other database connections due to ** [shared cache mode] being enabled. ** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +**
        ** ** [[SQLITE_DBSTATUS_STMT_USED]] ^(
        SQLITE_DBSTATUS_STMT_USED
        **
        This parameter returns the approximate number of bytes of heap @@ -8774,7 +8988,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** been written to disk in the middle of a transaction due to the page ** cache overflowing. Transactions are more efficient if they are written ** to disk all at once. When pages spill mid-transaction, that introduces -** additional overhead. This parameter can be used help identify +** additional overhead. This parameter can be used to help identify ** inefficiencies that can be resolved by increasing the cache size. **
        ** @@ -8845,13 +9059,13 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** [[SQLITE_STMTSTATUS_SORT]]
        SQLITE_STMTSTATUS_SORT
        **
        ^This is the number of sort operations that have occurred. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
        +** improve performance through careful use of indices. ** ** [[SQLITE_STMTSTATUS_AUTOINDEX]]
        SQLITE_STMTSTATUS_AUTOINDEX
        **
        ^This is the number of rows inserted into transient indices that ** were created automatically in order to help joins run faster. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not +** improve performance by adding permanent indices that do not ** need to be reinitialized each time the statement is run.
        ** ** [[SQLITE_STMTSTATUS_VM_STEP]]
        SQLITE_STMTSTATUS_VM_STEP
        @@ -8860,19 +9074,19 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** to 2147483647. The number of virtual machine operations can be ** used as a proxy for the total work done by the prepared statement. ** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. +** then the value returned by this statement status code is undefined. ** ** [[SQLITE_STMTSTATUS_REPREPARE]]
        SQLITE_STMTSTATUS_REPREPARE
        **
        ^This is the number of times that the prepare statement has been ** automatically regenerated due to schema changes or changes to -** [bound parameters] that might affect the query plan. +** [bound parameters] that might affect the query plan.
        ** ** [[SQLITE_STMTSTATUS_RUN]]
        SQLITE_STMTSTATUS_RUN
        **
        ^This is the number of times that the prepared statement has ** been run. A single "run" for the purposes of this counter is one ** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()]. ** The counter is incremented on the first [sqlite3_step()] call of each -** cycle. +** cycle.
        ** ** [[SQLITE_STMTSTATUS_FILTER_MISS]] ** [[SQLITE_STMTSTATUS_FILTER HIT]] @@ -8882,7 +9096,7 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** step was bypassed because a Bloom filter returned not-found. The ** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of ** times that the Bloom filter returned a find, and thus the join step -** had to be processed as normal. +** had to be processed as normal. ** ** [[SQLITE_STMTSTATUS_MEMUSED]]
        SQLITE_STMTSTATUS_MEMUSED
        **
        ^This is the approximate number of bytes of heap memory @@ -8987,9 +9201,9 @@ struct sqlite3_pcache_page { ** SQLite will typically create one cache instance for each open database file, ** though this is not guaranteed. ^The ** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The +** be allocated by the cache. ^szPage will always be a power of two. ^The ** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will +** associated with each page cache entry. ^The szExtra parameter will be ** a number less than 250. SQLite will use the ** extra szExtra bytes on each page to store metadata about the underlying ** database page on disk. The value passed into szExtra depends @@ -8997,17 +9211,17 @@ struct sqlite3_pcache_page { ** ^The third argument to xCreate(), bPurgeable, is true if the cache being ** created will be used to cache database pages of a file stored on disk, or ** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; +** does not have to do anything special based upon the value of bPurgeable; ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will ** never invoke xUnpin() except to deliberately delete a page. ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to ** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will +** ^Hence, a cache created with bPurgeable set to false will ** never contain any unpinned pages. ** ** [[the xCachesize() page cache method]] ** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache +** suggested maximum cache-size (number of pages stored) for the cache ** instance passed as the first argument. This is the value configured using ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable ** parameter, the implementation is not required to do anything with this @@ -9034,12 +9248,12 @@ struct sqlite3_pcache_page { ** implementation must return a pointer to the page buffer with its content ** intact. If the requested page is not already in the cache, then the ** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: +** parameter to help it determine what action to take: ** **
  • **
    createFlag Behavior when page is not already in cache **
    0 Do not allocate a new page. Return NULL. -**
    1 Allocate a new page if it easy and convenient to do so. +**
    1 Allocate a new page if it is easy and convenient to do so. ** Otherwise return NULL. **
    2 Make every effort to allocate a new page. Only return ** NULL if allocating a new page is effectively impossible. @@ -9056,7 +9270,7 @@ struct sqlite3_pcache_page { ** as its second argument. If the third parameter, discard, is non-zero, ** then the page must be evicted from the cache. ** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of +** zero, then the page may be discarded or retained at the discretion of the ** page cache implementation. ^The page cache implementation ** may choose to evict unpinned pages at any time. ** @@ -9074,7 +9288,7 @@ struct sqlite3_pcache_page { ** When SQLite calls the xTruncate() method, the cache must discard all ** existing cache entries with page numbers (keys) greater than or equal ** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that +** of these pages are pinned, they become implicitly unpinned, meaning that ** they can be safely discarded. ** ** [[the xDestroy() page cache method]] @@ -9254,7 +9468,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** external process or via a database connection other than the one being ** used by the backup operation, then the backup will be automatically ** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used +** database is modified by using the same database connection as is used ** by the backup operation, then the backup database is automatically ** updated at the same time. ** @@ -9271,7 +9485,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** and may not be used following a call to sqlite3_backup_finish(). ** ** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() errors occurred, regardless of whether or not ** sqlite3_backup_step() completed. ** ^If an out-of-memory condition or IO error occurred during any prior ** sqlite3_backup_step() call on the same [sqlite3_backup] object, then @@ -9326,6 +9540,16 @@ typedef struct sqlite3_backup sqlite3_backup; ** APIs are not strictly speaking threadsafe. If they are invoked at the ** same time as another thread is invoking sqlite3_backup_step() it is ** possible that they return invalid values. +** +** Alternatives To Using The Backup API +** +** Other techniques for safely creating a consistent backup of an SQLite +** database include: +** +**
      +**
    • The [VACUUM INTO] command. +**
    • The [sqlite3_rsync] utility program. +**
    */ SQLITE_API sqlite3_backup *sqlite3_backup_init( sqlite3 *pDest, /* Destination database handle */ @@ -9363,7 +9587,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** application receives an SQLITE_LOCKED error, it may call the ** sqlite3_unlock_notify() method with the blocked connection handle as ** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The +** when the blocking connection's current transaction is concluded. ^The ** callback is invoked from within the [sqlite3_step] or [sqlite3_close] ** call that concludes the blocking connection's transaction. ** @@ -9383,7 +9607,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** blocked connection already has a registered unlock-notify callback, ** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is ** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback is canceled. ^The blocked connection's ** unlock-notify callback may also be canceled by closing the blocked ** connection using [sqlite3_close()]. ** @@ -9781,7 +10005,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** support constraints. In this configuration (which is the default) if ** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire ** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual +** specified as part of the user's SQL statement, regardless of the actual ** ON CONFLICT mode specified. ** ** If X is non-zero, then the virtual table implementation guarantees @@ -9815,7 +10039,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
    SQLITE_VTAB_INNOCUOUS
    **
    Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a @@ -9983,7 +10207,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
    ** ** ^For the purposes of comparing virtual table output values to see if the -** values are same value for sorting purposes, two NULL values are considered +** values are the same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" ** (or "IS NOT DISTINCT FROM") and not "==". ** @@ -9993,7 +10217,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); ** ** ^A virtual table implementation is always free to return rows in any order ** it wants, as long as the "orderByConsumed" flag is not set. ^When the -** the "orderByConsumed" flag is unset, the query planner will add extra +** "orderByConsumed" flag is unset, the query planner will add extra ** [bytecode] to ensure that the final results returned by the SQL query are ** ordered correctly. The use of the "orderByConsumed" flag and the ** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful @@ -10090,7 +10314,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint -** processing use the [sqlite3_vtab_in()] interface in the +** processing using the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint ** processing, then these routines return [SQLITE_ERROR].)^ @@ -10145,7 +10369,7 @@ SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); ** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) ** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th ** constraint is not available. ^The sqlite3_vtab_rhs_value() interface -** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** can return a result code other than SQLITE_OK or SQLITE_NOTFOUND if ** something goes wrong. ** ** The sqlite3_vtab_rhs_value() interface is usually only successful if @@ -10173,8 +10397,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** KEYWORDS: {conflict resolution mode} ** ** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. +** inform a [virtual table] implementation of the [ON CONFLICT] mode +** for the SQL statement being evaluated. ** ** Note that the [SQLITE_IGNORE] constant is also used as a potential ** return value from the [sqlite3_set_authorizer()] callback and that @@ -10214,39 +10438,39 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** [[SQLITE_SCANSTAT_EST]]

    SQLITE_SCANSTAT_EST
    **
    ^The "double" variable pointed to by the V parameter will be set to the ** query planner's estimate for the average number of rows output from each -** iteration of the X-th loop. If the query planner's estimates was accurate, +** iteration of the X-th loop. If the query planner's estimate was accurate, ** then this value will approximate the quotient NVISIT/NLOOP and the ** product of this value for all prior loops with the same SELECTID will -** be the NLOOP value for the current loop. +** be the NLOOP value for the current loop.
    ** ** [[SQLITE_SCANSTAT_NAME]]
    SQLITE_SCANSTAT_NAME
    **
    ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the name of the index or table -** used for the X-th loop. +** used for the X-th loop.
    ** ** [[SQLITE_SCANSTAT_EXPLAIN]]
    SQLITE_SCANSTAT_EXPLAIN
    **
    ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] -** description for the X-th loop. +** description for the X-th loop.
    ** ** [[SQLITE_SCANSTAT_SELECTID]]
    SQLITE_SCANSTAT_SELECTID
    **
    ^The "int" variable pointed to by the V parameter will be set to the ** id for the X-th query plan element. The id value is unique within the ** statement. The select-id is the same value as is output in the first -** column of an [EXPLAIN QUERY PLAN] query. +** column of an [EXPLAIN QUERY PLAN] query.
    ** ** [[SQLITE_SCANSTAT_PARENTID]]
    SQLITE_SCANSTAT_PARENTID
    **
    The "int" variable pointed to by the V parameter will be set to the -** the id of the parent of the current query element, if applicable, or +** id of the parent of the current query element, if applicable, or ** to zero if the query element has no parent. This is the same value as -** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** returned in the second column of an [EXPLAIN QUERY PLAN] query.
    ** ** [[SQLITE_SCANSTAT_NCYCLE]]
    SQLITE_SCANSTAT_NCYCLE
    **
    The sqlite3_int64 output value is set to the number of cycles, ** according to the processor time-stamp counter, that elapsed while the ** query element was being processed. This value is not available for ** all query elements - if it is unavailable the output variable is -** set to -1. +** set to -1.
    ** */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -10287,8 +10511,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. ** ** Parameter "idx" identifies the specific query element to retrieve statistics -** for. Query elements are numbered starting from zero. A value of -1 may be -** to query for statistics regarding the entire query. ^If idx is out of range +** for. Query elements are numbered starting from zero. A value of -1 may +** retrieve statistics for the entire query. ^If idx is out of range ** - less than -1 or greater than or equal to the total number of query ** elements used to implement the statement - a non-zero value is returned and ** the variable that pOut points to is unchanged. @@ -10331,7 +10555,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the -** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** [sqlite3_db_cacheflush(D)] interface is invoked, any dirty ** pages in the pager-cache that are not currently in use are written out ** to disk. A dirty page may be in use if a database cursor created by an ** active SQL statement is reading from it, or if it is page 1 of a database @@ -10445,8 +10669,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; and so forth. ** ** When the [sqlite3_blob_write()] API is used to update a blob column, -** the pre-update hook is invoked with SQLITE_DELETE. This is because the -** in this case the new values are not available. In this case, when a +** the pre-update hook is invoked with SQLITE_DELETE, because +** the new values are not yet available. In this case, when a ** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the @@ -10525,6 +10749,14 @@ typedef struct sqlite3_snapshot { ** If there is not already a read-transaction open on schema S when ** this function is called, one is opened automatically. ** +** If a read-transaction is opened by this function, then it is guaranteed +** that the returned snapshot object may not be invalidated by a database +** writer or checkpointer until after the read-transaction is closed. This +** is not guaranteed if a read-transaction is already open when this +** function is called. In that case, any subsequent write or checkpoint +** operation on the database may invalidate the returned snapshot handle, +** even while the read-transaction remains open. +** ** The following must be true for this function to succeed. If any of ** the following statements are false when sqlite3_snapshot_get() is ** called, SQLITE_ERROR is returned. The final value of *P is undefined @@ -10682,15 +10914,16 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c /* ** CAPI3REF: Serialize a database ** -** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory -** that is a serialization of the S database on [database connection] D. +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to +** memory that is a serialization of the S database on +** [database connection] D. If S is a NULL pointer, the main database is used. ** If P is not a NULL pointer, then the size of the database in bytes ** is written into *P. ** ** For an ordinary on-disk database file, the serialization is just a ** copy of the disk file. For an in-memory database or a "TEMP" database, ** the serialization is the same sequence of bytes which would be written -** to disk if that database where backed up to disk. +** to disk if that database were backed up to disk. ** ** The usual case is that sqlite3_serialize() copies the serialization of ** the database into memory obtained from [sqlite3_malloc64()] and returns @@ -10699,7 +10932,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations ** are made, and the sqlite3_serialize() function will return a pointer ** to the contiguous memory representation of the database that SQLite -** is currently using for that database, or NULL if the no such contiguous +** is currently using for that database, or NULL if no such contiguous ** memory representation of the database exists. A contiguous memory ** representation of the database will usually only exist if there has ** been a prior call to [sqlite3_deserialize(D,S,...)] with the same @@ -10770,7 +11003,7 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** -** It is not possible to deserialized into the TEMP database. If the +** It is not possible to deserialize into the TEMP database. If the ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** @@ -10792,7 +11025,7 @@ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ const char *zSchema, /* Which DB to reopen with the deserialization */ unsigned char *pData, /* The serialized database content */ - sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szDb, /* Number of bytes in the deserialization */ sqlite3_int64 szBuf, /* Total size of buffer pData[] */ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ ); @@ -10800,7 +11033,7 @@ SQLITE_API int sqlite3_deserialize( /* ** CAPI3REF: Flags for sqlite3_deserialize() ** -** The following are allowed values for 6th argument (the F argument) to +** The following are allowed values for the 6th argument (the F argument) to ** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. ** ** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization @@ -10833,8 +11066,6 @@ SQLITE_API int sqlite3_deserialize( #if defined(__wasi__) # undef SQLITE_WASI # define SQLITE_WASI 1 -# undef SQLITE_OMIT_WAL -# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ # ifndef SQLITE_OMIT_LOAD_EXTENSION # define SQLITE_OMIT_LOAD_EXTENSION # endif @@ -10846,7 +11077,7 @@ SQLITE_API int sqlite3_deserialize( #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif -#endif /* SQLITE3_H */ +/* #endif for SQLITE3_H will be added by mksqlite3.tcl */ /******** Begin file sqlite3rtree.h *********/ /* @@ -11327,9 +11558,10 @@ SQLITE_API void sqlite3session_table_filter( ** is inserted while a session object is enabled, then later deleted while ** the same session object is disabled, no INSERT record will appear in the ** changeset, even though the delete took place while the session was disabled. -** Or, if one field of a row is updated while a session is disabled, and -** another field of the same row is updated while the session is enabled, the -** resulting changeset will contain an UPDATE change that updates both fields. +** Or, if one field of a row is updated while a session is enabled, and +** then another field of the same row is updated while the session is disabled, +** the resulting changeset will contain an UPDATE change that updates both +** fields. */ SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ @@ -11401,8 +11633,9 @@ SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession ** database zFrom the contents of the two compatible tables would be ** identical. ** -** It an error if database zFrom does not exist or does not contain the -** required compatible table. +** Unless the call to this function is a no-op as described above, it is an +** error if database zFrom does not exist or does not contain the required +** compatible table. ** ** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite ** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg @@ -11537,7 +11770,7 @@ SQLITE_API int sqlite3changeset_start_v2( ** The following flags may passed via the 4th parameter to ** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]: ** -**
    SQLITE_CHANGESETAPPLY_INVERT
    +**
    SQLITE_CHANGESETSTART_INVERT
    ** Invert the changeset while iterating through it. This is equivalent to ** inverting a changeset using sqlite3changeset_invert() before applying it. ** It is an error to specify this flag with a patchset. @@ -11852,19 +12085,6 @@ SQLITE_API int sqlite3changeset_concat( void **ppOut /* OUT: Buffer containing output changeset */ ); - -/* -** CAPI3REF: Upgrade the Schema of a Changeset/Patchset -*/ -SQLITE_API int sqlite3changeset_upgrade( - sqlite3 *db, - const char *zDb, - int nIn, const void *pIn, /* Input changeset */ - int *pnOut, void **ppOut /* OUT: Inverse of input */ -); - - - /* ** CAPI3REF: Changegroup Handle ** @@ -13037,6 +13257,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -13093,19 +13317,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -13147,6 +13409,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -13167,7 +13438,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -13191,7 +13462,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -13215,6 +13486,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -13238,6 +13516,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -13346,6 +13648,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -13365,6 +13694,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -13384,7 +13714,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -13411,6 +13741,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -13424,103 +13773,8 @@ struct fts5_api { #endif /* _FTS5_H */ /******** End of fts5.h *********/ +#endif /* SQLITE3_H */ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the application interface definitions for the -** user-authentication extension feature. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation header file ("sqlite3.h"), then add -** the SQLITE_USER_AUTHENTICATION compile-time option. See the -** user-auth.txt file in the same source directory as this file for -** additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION - -#ifdef __cplusplus -extern "C" { -#endif - -/* -** If a database contains the SQLITE_USER table, then the -** sqlite3_user_authenticate() interface must be invoked with an -** appropriate username and password prior to enable read and write -** access to the database. -** -** Return SQLITE_OK on success or SQLITE_ERROR if the username/password -** combination is incorrect or unknown. -** -** If the SQLITE_USER table is not present in the database file, then -** this interface is a harmless no-op returnning SQLITE_OK. -*/ -int sqlite3_user_authenticate( - sqlite3 *db, /* The database connection */ - const char *zUsername, /* Username */ - const char *aPW, /* Password or credentials */ - int nPW /* Number of bytes in aPW[] */ -); - -/* -** The sqlite3_user_add() interface can be used (by an admin user only) -** to create a new user. When called on a no-authentication-required -** database, this routine converts the database into an authentication- -** required database, automatically makes the added user an -** administrator, and logs in the current connection as that user. -** The sqlite3_user_add() interface only works for the "main" database, not -** for any ATTACH-ed databases. Any call to sqlite3_user_add() by a -** non-admin user results in an error. -*/ -int sqlite3_user_add( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to be added */ - const char *aPW, /* Password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* True to give new user admin privilege */ -); - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* New password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -); - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -); - -#ifdef __cplusplus -} /* end of the 'extern "C"' block */ -#endif - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3.go index ed2a9e2a3..a967cab09 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -381,7 +381,7 @@ type SQLiteStmt struct { s *C.sqlite3_stmt t string closed bool - cls bool + cls bool // True if the statement was created by SQLiteConn.Query } // SQLiteResult implements sql.Result. @@ -393,12 +393,12 @@ type SQLiteResult struct { // SQLiteRows implements driver.Rows. type SQLiteRows struct { s *SQLiteStmt - nc int + nc int32 // Number of columns + cls bool // True if we need to close the parent statement in Close cols []string decltype []string - cls bool - closed bool ctx context.Context // no better alternative to pass context into Next() method + closemu sync.Mutex } type functionInfo struct { @@ -929,6 +929,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []driver.Name s.(*SQLiteStmt).cls = true na := s.NumInput() if len(args)-start < na { + s.Close() return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args)-start) } // consume the number of arguments used in the current @@ -1875,6 +1876,9 @@ func (c *SQLiteConn) SetLimit(id int, newVal int) int { // This method is not thread-safe as the returned error code can be changed by // another call if invoked concurrently. // +// Use SetFileControlInt64 instead if the argument for the opcode is documented +// as a pointer to a sqlite3_int64. +// // See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { if dbName == "" { @@ -1892,6 +1896,34 @@ func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { return nil } +// SetFileControlInt64 invokes the xFileControl method on a given database. The +// dbName is the name of the database. It will default to "main" if left blank. +// The op is one of the opcodes prefixed by "SQLITE_FCNTL_". The arg argument +// and return code are both opcode-specific. Please see the SQLite documentation. +// +// This method is not thread-safe as the returned error code can be changed by +// another call if invoked concurrently. +// +// Only use this method if the argument for the opcode is documented as a pointer +// to a sqlite3_int64. +// +// See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html +func (c *SQLiteConn) SetFileControlInt64(dbName string, op int, arg int64) error { + if dbName == "" { + dbName = "main" + } + + cDBName := C.CString(dbName) + defer C.free(unsafe.Pointer(cDBName)) + + cArg := C.sqlite3_int64(arg) + rv := C.sqlite3_file_control(c.db, cDBName, C.int(op), unsafe.Pointer(&cArg)) + if rv != C.SQLITE_OK { + return c.lastError() + } + return nil +} + // Close the statement. func (s *SQLiteStmt) Close() error { s.mu.Lock() @@ -2007,14 +2039,12 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive rows := &SQLiteRows{ s: s, - nc: int(C.sqlite3_column_count(s.s)), + nc: int32(C.sqlite3_column_count(s.s)), + cls: s.cls, cols: nil, decltype: nil, - cls: s.cls, - closed: false, ctx: ctx, } - runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2111,24 +2141,28 @@ func (s *SQLiteStmt) Readonly() bool { // Close the rows. func (rc *SQLiteRows) Close() error { - rc.s.mu.Lock() - if rc.s.closed || rc.closed { - rc.s.mu.Unlock() + rc.closemu.Lock() + defer rc.closemu.Unlock() + s := rc.s + if s == nil { + return nil + } + rc.s = nil // remove reference to SQLiteStmt + s.mu.Lock() + if s.closed { + s.mu.Unlock() return nil } - rc.closed = true if rc.cls { - rc.s.mu.Unlock() - return rc.s.Close() + s.mu.Unlock() + return s.Close() } - rv := C.sqlite3_reset(rc.s.s) + rv := C.sqlite3_reset(s.s) if rv != C.SQLITE_OK { - rc.s.mu.Unlock() - return rc.s.c.lastError() + s.mu.Unlock() + return s.c.lastError() } - rc.s.mu.Unlock() - rc.s = nil - runtime.SetFinalizer(rc, nil) + s.mu.Unlock() return nil } @@ -2136,9 +2170,9 @@ func (rc *SQLiteRows) Close() error { func (rc *SQLiteRows) Columns() []string { rc.s.mu.Lock() defer rc.s.mu.Unlock() - if rc.s.s != nil && rc.nc != len(rc.cols) { + if rc.s.s != nil && int(rc.nc) != len(rc.cols) { rc.cols = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) } } @@ -2148,7 +2182,7 @@ func (rc *SQLiteRows) Columns() []string { func (rc *SQLiteRows) declTypes() []string { if rc.s.s != nil && rc.decltype == nil { rc.decltype = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) } } diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c index fc37b336c..3a00f43de 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c @@ -5,7 +5,11 @@ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern int unlock_notify_wait(sqlite3 *db); diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index 76f7bbfb6..3ac8050a4 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -12,7 +12,11 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern void unlock_notify_callback(void *arg, int argc); */ diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index 76d840164..5a4927665 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -16,53 +16,10 @@ package sqlite3 #else #include #endif -#include - -static int -_sqlite3_user_authenticate(sqlite3* db, const char* zUsername, const char* aPW, int nPW) -{ - return sqlite3_user_authenticate(db, zUsername, aPW, nPW); -} - -static int -_sqlite3_user_add(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_add(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_change(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_change(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_delete(sqlite3* db, const char* zUsername) -{ - return sqlite3_user_delete(db, zUsername); -} - -static int -_sqlite3_auth_enabled(sqlite3* db) -{ - int exists = -1; - - sqlite3_stmt *stmt; - sqlite3_prepare_v2(db, "select count(type) from sqlite_master WHERE type='table' and name='sqlite_user';", -1, &stmt, NULL); - - while ( sqlite3_step(stmt) == SQLITE_ROW) { - exists = sqlite3_column_int(stmt, 0); - } - - sqlite3_finalize(stmt); - - return exists; -} */ import "C" import ( "errors" - "unsafe" ) const ( @@ -70,8 +27,9 @@ const ( ) var ( - ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") - ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") + ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + errUserAuthNoLongerSupported = errors.New("sqlite3: the sqlite_userauth tag is no longer supported as the userauth extension is no longer supported by the SQLite authors, see https://github.com/mattn/go-sqlite3/issues/1341") ) // Authenticate will perform an authentication of the provided username @@ -88,15 +46,7 @@ var ( // If the SQLITE_USER table is not present in the database file, then // this interface is a harmless no-op returning SQLITE_OK. func (c *SQLiteConn) Authenticate(username, password string) error { - rv := c.authenticate(username, password) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrUnauthorized - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authenticate provides the actual authentication to SQLite. @@ -109,17 +59,7 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_authenticate(c.db, cuser, cpass, C.int(len(password)))) + return 1 } // AuthUserAdd can be used (by an admin user only) @@ -131,20 +71,7 @@ func (c *SQLiteConn) authenticate(username, password string) int { // for any ATTACH-ed databases. Any call to AuthUserAdd by a // non-admin user results in an error. func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserAdd(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserAdd enables the User Authentication if not enabled. @@ -162,17 +89,7 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_add(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserChange can be used to change a users @@ -181,20 +98,7 @@ func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // credentials or admin privilege setting. No user may change their own // admin privilege setting. func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserChange(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserChange allows to modify a user. @@ -215,17 +119,7 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_change(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserDelete can be used (by an admin user only) @@ -234,15 +128,7 @@ func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // the database cannot be converted into a no-authentication-required // database. func (c *SQLiteConn) AuthUserDelete(username string) error { - rv := c.authUserDelete(username) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserDelete can be used to delete a user. @@ -258,25 +144,12 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { - // Allocate C Variables - cuser := C.CString(username) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - }() - - return int(C._sqlite3_user_delete(c.db, cuser)) + return 1 } // AuthEnabled checks if the database is protected by user authentication func (c *SQLiteConn) AuthEnabled() (exists bool) { - rv := c.authEnabled() - if rv == 1 { - exists = true - } - - return + return false } // authEnabled perform the actual check for user authentication. @@ -289,7 +162,7 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // 0 - Disabled // 1 - Enabled func (c *SQLiteConn) authEnabled() int { - return int(C._sqlite3_auth_enabled(c.db)) + return 0 } // EOF diff --git a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index 935437bb6..3a5e0a4ed 100644 --- a/tools/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/tools/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -371,6 +371,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -704,6 +706,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) diff --git a/tools/vendor/github.com/miekg/pkcs11/.gitignore b/tools/vendor/github.com/miekg/pkcs11/.gitignore new file mode 100644 index 000000000..5fde17f99 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/.gitignore @@ -0,0 +1,3 @@ +tags +test_db/*/generation +test_db/*/*.lock diff --git a/tools/vendor/github.com/syndtr/gocapability/LICENSE b/tools/vendor/github.com/miekg/pkcs11/LICENSE similarity index 60% rename from tools/vendor/github.com/syndtr/gocapability/LICENSE rename to tools/vendor/github.com/miekg/pkcs11/LICENSE index 80dd96de7..ce25d13ab 100644 --- a/tools/vendor/github.com/syndtr/gocapability/LICENSE +++ b/tools/vendor/github.com/miekg/pkcs11/LICENSE @@ -1,21 +1,24 @@ -Copyright 2013 Suryandaru Triandana -All rights reserved. +Copyright (c) 2013 Miek Gieben. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Miek Gieben nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY diff --git a/tools/vendor/github.com/miekg/pkcs11/Makefile.release b/tools/vendor/github.com/miekg/pkcs11/Makefile.release new file mode 100644 index 000000000..4f58165f9 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/Makefile.release @@ -0,0 +1,57 @@ +# Makefile for releasing. +# +# The release is controlled from version.go. The version found there is +# used to tag the git repo, we're not building any artifects so there is nothing +# to upload to github. +# +# * Up the version in version.go +# * Run: make -f Makefile.release release +# * will *commit* your change with 'Release $VERSION' +# * push to github +# + +define GO +//+build ignore + +package main + +import ( + "fmt" + + "github.com/miekg/pkcs11" +) + +func main() { + fmt.Println(pkcs11.Release.String()) +} +endef + +$(file > version_release.go,$(GO)) +VERSION:=$(shell go run -tags release version_release.go) +TAG="v$(VERSION)" + +all: + rm -f version_release.go + @echo Use the \'release\' target to start a release $(VERSION) + +.PHONY: run +run: + rm -f version_release.go + @echo $(VERSION) + +.PHONY: release +release: commit push + @echo Released $(VERSION) + +.PHONY: commit +commit: + rm -f version_release.go + @echo Committing release $(VERSION) + git commit -am"Release $(VERSION)" + git tag $(TAG) + +.PHONY: push +push: + @echo Pushing release $(VERSION) to master + git push --tags + git push diff --git a/tools/vendor/github.com/miekg/pkcs11/README.md b/tools/vendor/github.com/miekg/pkcs11/README.md new file mode 100644 index 000000000..18a361a99 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/README.md @@ -0,0 +1,68 @@ +# PKCS#11 + +This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where +it makes sense. It has been tested with SoftHSM. + +## SoftHSM + + * Make it use a custom configuration file `export SOFTHSM_CONF=$PWD/softhsm.conf` + + * Then use `softhsm` to init it + + ~~~ + softhsm --init-token --slot 0 --label test --pin 1234 + ~~~ + + * Then use `libsofthsm2.so` as the pkcs11 module: + + ~~~ go + p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so") + ~~~ + +## Examples + +A skeleton program would look somewhat like this (yes, pkcs#11 is verbose): + +~~~ go +p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so") +err := p.Initialize() +if err != nil { + panic(err) +} + +defer p.Destroy() +defer p.Finalize() + +slots, err := p.GetSlotList(true) +if err != nil { + panic(err) +} + +session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) +if err != nil { + panic(err) +} +defer p.CloseSession(session) + +err = p.Login(session, pkcs11.CKU_USER, "1234") +if err != nil { + panic(err) +} +defer p.Logout(session) + +p.DigestInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_SHA_1, nil)}) +hash, err := p.Digest(session, []byte("this is a string")) +if err != nil { + panic(err) +} + +for _, d := range hash { + fmt.Printf("%x", d) +} +fmt.Println() +~~~ + +Further examples are included in the tests. + +To expose PKCS#11 keys using the [crypto.Signer interface](https://golang.org/pkg/crypto/#Signer), +please see [github.com/thalesignite/crypto11](https://github.com/thalesignite/crypto11). diff --git a/tools/vendor/github.com/miekg/pkcs11/error.go b/tools/vendor/github.com/miekg/pkcs11/error.go new file mode 100644 index 000000000..7df0e93a6 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/error.go @@ -0,0 +1,98 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +// awk '/#define CKR_/{ print $3":\""$2"\"," }' pkcs11t.h + +var strerror = map[uint]string{ + 0x00000000: "CKR_OK", + 0x00000001: "CKR_CANCEL", + 0x00000002: "CKR_HOST_MEMORY", + 0x00000003: "CKR_SLOT_ID_INVALID", + 0x00000005: "CKR_GENERAL_ERROR", + 0x00000006: "CKR_FUNCTION_FAILED", + 0x00000007: "CKR_ARGUMENTS_BAD", + 0x00000008: "CKR_NO_EVENT", + 0x00000009: "CKR_NEED_TO_CREATE_THREADS", + 0x0000000A: "CKR_CANT_LOCK", + 0x00000010: "CKR_ATTRIBUTE_READ_ONLY", + 0x00000011: "CKR_ATTRIBUTE_SENSITIVE", + 0x00000012: "CKR_ATTRIBUTE_TYPE_INVALID", + 0x00000013: "CKR_ATTRIBUTE_VALUE_INVALID", + 0x00000020: "CKR_DATA_INVALID", + 0x00000021: "CKR_DATA_LEN_RANGE", + 0x00000030: "CKR_DEVICE_ERROR", + 0x00000031: "CKR_DEVICE_MEMORY", + 0x00000032: "CKR_DEVICE_REMOVED", + 0x00000040: "CKR_ENCRYPTED_DATA_INVALID", + 0x00000041: "CKR_ENCRYPTED_DATA_LEN_RANGE", + 0x00000050: "CKR_FUNCTION_CANCELED", + 0x00000051: "CKR_FUNCTION_NOT_PARALLEL", + 0x00000054: "CKR_FUNCTION_NOT_SUPPORTED", + 0x00000060: "CKR_KEY_HANDLE_INVALID", + 0x00000062: "CKR_KEY_SIZE_RANGE", + 0x00000063: "CKR_KEY_TYPE_INCONSISTENT", + 0x00000064: "CKR_KEY_NOT_NEEDED", + 0x00000065: "CKR_KEY_CHANGED", + 0x00000066: "CKR_KEY_NEEDED", + 0x00000067: "CKR_KEY_INDIGESTIBLE", + 0x00000068: "CKR_KEY_FUNCTION_NOT_PERMITTED", + 0x00000069: "CKR_KEY_NOT_WRAPPABLE", + 0x0000006A: "CKR_KEY_UNEXTRACTABLE", + 0x00000070: "CKR_MECHANISM_INVALID", + 0x00000071: "CKR_MECHANISM_PARAM_INVALID", + 0x00000082: "CKR_OBJECT_HANDLE_INVALID", + 0x00000090: "CKR_OPERATION_ACTIVE", + 0x00000091: "CKR_OPERATION_NOT_INITIALIZED", + 0x000000A0: "CKR_PIN_INCORRECT", + 0x000000A1: "CKR_PIN_INVALID", + 0x000000A2: "CKR_PIN_LEN_RANGE", + 0x000000A3: "CKR_PIN_EXPIRED", + 0x000000A4: "CKR_PIN_LOCKED", + 0x000000B0: "CKR_SESSION_CLOSED", + 0x000000B1: "CKR_SESSION_COUNT", + 0x000000B3: "CKR_SESSION_HANDLE_INVALID", + 0x000000B4: "CKR_SESSION_PARALLEL_NOT_SUPPORTED", + 0x000000B5: "CKR_SESSION_READ_ONLY", + 0x000000B6: "CKR_SESSION_EXISTS", + 0x000000B7: "CKR_SESSION_READ_ONLY_EXISTS", + 0x000000B8: "CKR_SESSION_READ_WRITE_SO_EXISTS", + 0x000000C0: "CKR_SIGNATURE_INVALID", + 0x000000C1: "CKR_SIGNATURE_LEN_RANGE", + 0x000000D0: "CKR_TEMPLATE_INCOMPLETE", + 0x000000D1: "CKR_TEMPLATE_INCONSISTENT", + 0x000000E0: "CKR_TOKEN_NOT_PRESENT", + 0x000000E1: "CKR_TOKEN_NOT_RECOGNIZED", + 0x000000E2: "CKR_TOKEN_WRITE_PROTECTED", + 0x000000F0: "CKR_UNWRAPPING_KEY_HANDLE_INVALID", + 0x000000F1: "CKR_UNWRAPPING_KEY_SIZE_RANGE", + 0x000000F2: "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT", + 0x00000100: "CKR_USER_ALREADY_LOGGED_IN", + 0x00000101: "CKR_USER_NOT_LOGGED_IN", + 0x00000102: "CKR_USER_PIN_NOT_INITIALIZED", + 0x00000103: "CKR_USER_TYPE_INVALID", + 0x00000104: "CKR_USER_ANOTHER_ALREADY_LOGGED_IN", + 0x00000105: "CKR_USER_TOO_MANY_TYPES", + 0x00000110: "CKR_WRAPPED_KEY_INVALID", + 0x00000112: "CKR_WRAPPED_KEY_LEN_RANGE", + 0x00000113: "CKR_WRAPPING_KEY_HANDLE_INVALID", + 0x00000114: "CKR_WRAPPING_KEY_SIZE_RANGE", + 0x00000115: "CKR_WRAPPING_KEY_TYPE_INCONSISTENT", + 0x00000120: "CKR_RANDOM_SEED_NOT_SUPPORTED", + 0x00000121: "CKR_RANDOM_NO_RNG", + 0x00000130: "CKR_DOMAIN_PARAMS_INVALID", + 0x00000150: "CKR_BUFFER_TOO_SMALL", + 0x00000160: "CKR_SAVED_STATE_INVALID", + 0x00000170: "CKR_INFORMATION_SENSITIVE", + 0x00000180: "CKR_STATE_UNSAVEABLE", + 0x00000190: "CKR_CRYPTOKI_NOT_INITIALIZED", + 0x00000191: "CKR_CRYPTOKI_ALREADY_INITIALIZED", + 0x000001A0: "CKR_MUTEX_BAD", + 0x000001A1: "CKR_MUTEX_NOT_LOCKED", + 0x000001B0: "CKR_NEW_PIN_MODE", + 0x000001B1: "CKR_NEXT_OTP", + 0x00000200: "CKR_FUNCTION_REJECTED", + 0x80000000: "CKR_VENDOR_DEFINED", +} diff --git a/tools/vendor/github.com/miekg/pkcs11/hsm.db b/tools/vendor/github.com/miekg/pkcs11/hsm.db new file mode 100644 index 0000000000000000000000000000000000000000..eb3f10dadcdf6af2416c79bc91280e9f13f41c72 GIT binary patch literal 10240 zcmeHN4RBP|6@KTOx4Z9cHk;ifu;d5w@<$Q~klk!TQV`i>HyHA>5Eeq1N}HceSmh^# zg%Ht^#evp#6s&a+hejx-TI-aSs@Ph^LPc$-N}&Y-8Emam`LVPr6ouM*_r5Iwwa!=_ zEbg1x_kHKQbI;y$-rjS6k{Q$ML*by-xu|DBYgj9lWC=i;tZ9-YC9;p1ePWl15jiF> z1or6|B!3i5l*atIfIBk6B#Aas1AYTe!FncK6<4#-=*Z88-q6C1;POz%@<3mAa8~R5 zr9n?P+!JbB8V)WoRyX-P0iRae==IIk2HR-~Fgv z*dD%UZMfX4Fu2_1qG+>PZ?VRm7eyShDWf1miS)AGb#(=MI)d|q;b1@r!o?EZbD*hq z$`oIdc8Q5r<*V^G`82Pu-p68M0qBj`gDJI*F?~&wzhQ7_&DZ%@qgZFOYczkocWg|Y zp!piTk!mtIva?~;^l)q2{6Xov7_uDen5U}VH`se&Yb@EdUY6#7k9T`oQ*DE%sYR>v zwG_v`=|2y>z8cTW`heCrv%bEVXXc}yL|=tgRqwB2femUgKe(_fJnutoN{SlL=*Y-` zCD9o4%;-Ru^j=qLg&*qLpney%fyv=;KqNQnFR8Wu$%WJAC+>BSU$~wNr>vmSTyd_@vEWG4OBsGMC%fs4# zx%l`;`lm$a*)4v>GZ4?fmm&j*C?TL^V9^4-&tp~ud?~_@2N};mJOh#`lW*|Luu7tL z=>)w_n`tZUrX%z?Jxvi>Of3|mR@y)wsw5{BQ8uNKou!Z$u8B2 z-K7(oQxIuQiLNn@(@DZuotVeyL@w5eWwcQS6>yX;cNdvt06;9%6;PlP94VeqzOHD` z(@AHpPNW>>OaaU`%fLFmlFfdaMFs=l2Ols&$}-6+0_2Ynm^WNtZl=JT41rp@Kzo`% z+c1G04uNe$1)7EkR8s{iDFO}20)uveWQ!tVl0chP=q-r`SyjOzr0xWxtg^_>LIhPH zDp56=gxGEr*l7?b$;^iWq{^xcJYY1-szDX;B8wU5G`mY?0i4FKcn0DbxJomiCdily zkj)SCOZomk6V^-YO#Wfm14rN_ypKjq$2=U16L^yTN2(RAp36)O8j~wF-(hP?L0ul4{cp2voaHgP(Wwd4p*n4oREJJGKpDoVMFE?VrZamaA>G83t1JlG^E0+&y*Ou!J#c*`xk7IQHR5SF>=1?RdGuE+OW%lovJ+vWHd*bMU1I5Rr z+%#|c@H1~KnEc?_=B$&;4yLrs2|eJy(~#)aiss&tcyq=R%Ci^hew_G*Hp}vdGe>@Y z=7u%f54FR}6?d&#-G8>#9BJ-9_s!n4;XjEyeB&(#UbruGeAnLIcYgD7ZSEVER@>-x z6MV1#z_P9hLYr5gtLtB#A+6-&KMndN=!fH2j#F_l-imASe*6(WgTKZ7_$NGtXNf3@ zGN^z`=~|jfGbu>#M1NE9+vl2rsl)kCC1L@{JF<7@wOQZWxbApI>4~?x{|1NKt6CR- zWwP(-)29|4>TUemU!24Ib4HactlR%$?VJ0)o3wpb!uZGTbqty0o}=CFo^^2Y)DbaX z`Ny2nJp&`Yz3I6}ZLh4@;r}4(-Ha`ni+?rHdtlxE((A2#`%;XfhVHy|Tl%;=mhKvO z>b+b1Db8sP2XD%*q4JFrcmI8Ia+Xm7R-W`lbF@C?YfQ)LQ*=%J@de6~&CFPVV&_k+ z23d2M#3~f(CCZu`lSPd(=W&@AoOVdbXPgx&99*T;k=#I1*cL=Bh*3}rxhZRgU@qcX z?x7o$QH--n#k8YiT8m^Zj&T{xV`AJ4=CLuJ0OoNqZUl3Q&ZRYy>g3$QzTT#~V$^Oe z<&;Mion+;R#cg5=R*Sw6#mHQy2LL%_#`~l^n8*GGfXSGHKAeX=Y^J}N?cKkFn_(Vq z!zb}M+=qwpH9UcD<9RZWl^mq8(*PGuATKr0ESgK5w2+oT-{(3U{*Rs82tkw0nCfuC%msWKn)>;+~=8DE|d+qGH&MhReB zT(Ghc!}hjOEMyk^KTa-BoJo<<3T)_=h-EA@VRd8>&oRl!20z~%N3lR4DmspgIsjse z1@4KR0uD4Hkham2^lREfudq|Wx9Ds%&GB12 z1D`enVr=9PJ0|3#C5JL%gW!s +#include +#include "pkcs11go.h" + +static inline void putOAEPParams(CK_RSA_PKCS_OAEP_PARAMS_PTR params, CK_VOID_PTR pSourceData, CK_ULONG ulSourceDataLen) +{ + params->pSourceData = pSourceData; + params->ulSourceDataLen = ulSourceDataLen; +} + +static inline void putECDH1SharedParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pSharedData, CK_ULONG ulSharedDataLen) +{ + params->pSharedData = pSharedData; + params->ulSharedDataLen = ulSharedDataLen; +} + +static inline void putECDH1PublicParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pPublicData, CK_ULONG ulPublicDataLen) +{ + params->pPublicData = pPublicData; + params->ulPublicDataLen = ulPublicDataLen; +} +*/ +import "C" +import "unsafe" + +// GCMParams represents the parameters for the AES-GCM mechanism. +type GCMParams struct { + arena + params *C.CK_GCM_PARAMS + iv []byte + aad []byte + tagSize int +} + +// NewGCMParams returns a pointer to AES-GCM parameters that can be used with the CKM_AES_GCM mechanism. +// The Free() method must be called after the operation is complete. +// +// Note that some HSMs, like CloudHSM, will ignore the IV you pass in and write their +// own. As a result, to support all libraries, memory is not freed +// automatically, so that after the EncryptInit/Encrypt operation the HSM's IV +// can be read back out. It is up to the caller to ensure that Free() is called +// on the GCMParams object at an appropriate time, which is after +// +// Encrypt/Decrypt. As an example: +// +// gcmParams := pkcs11.NewGCMParams(make([]byte, 12), nil, 128) +// p.ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_AES_GCM, gcmParams)}, +// aesObjHandle) +// ct, _ := p.ctx.Encrypt(session, pt) +// iv := gcmParams.IV() +// gcmParams.Free() +// +func NewGCMParams(iv, aad []byte, tagSize int) *GCMParams { + return &GCMParams{ + iv: iv, + aad: aad, + tagSize: tagSize, + } +} + +func cGCMParams(p *GCMParams) []byte { + params := C.CK_GCM_PARAMS{ + ulTagBits: C.CK_ULONG(p.tagSize), + } + var arena arena + if len(p.iv) > 0 { + iv, ivLen := arena.Allocate(p.iv) + params.pIv = C.CK_BYTE_PTR(iv) + params.ulIvLen = ivLen + params.ulIvBits = ivLen * 8 + } + if len(p.aad) > 0 { + aad, aadLen := arena.Allocate(p.aad) + params.pAAD = C.CK_BYTE_PTR(aad) + params.ulAADLen = aadLen + } + p.Free() + p.arena = arena + p.params = ¶ms + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))) +} + +// IV returns a copy of the actual IV used for the operation. +// +// Some HSMs may ignore the user-specified IV and write their own at the end of +// the encryption operation; this method allows you to retrieve it. +func (p *GCMParams) IV() []byte { + if p == nil || p.params == nil { + return nil + } + newIv := C.GoBytes(unsafe.Pointer(p.params.pIv), C.int(p.params.ulIvLen)) + iv := make([]byte, len(newIv)) + copy(iv, newIv) + return iv +} + +// Free deallocates the memory reserved for the HSM to write back the actual IV. +// +// This must be called after the entire operation is complete, i.e. after +// Encrypt or EncryptFinal. It is safe to call Free multiple times. +func (p *GCMParams) Free() { + if p == nil || p.arena == nil { + return + } + p.arena.Free() + p.params = nil + p.arena = nil +} + +// NewPSSParams creates a CK_RSA_PKCS_PSS_PARAMS structure and returns it as a byte array for use with the CKM_RSA_PKCS_PSS mechanism. +func NewPSSParams(hashAlg, mgf, saltLength uint) []byte { + p := C.CK_RSA_PKCS_PSS_PARAMS{ + hashAlg: C.CK_MECHANISM_TYPE(hashAlg), + mgf: C.CK_RSA_PKCS_MGF_TYPE(mgf), + sLen: C.CK_ULONG(saltLength), + } + return C.GoBytes(unsafe.Pointer(&p), C.int(unsafe.Sizeof(p))) +} + +// OAEPParams can be passed to NewMechanism to implement CKM_RSA_PKCS_OAEP. +type OAEPParams struct { + HashAlg uint + MGF uint + SourceType uint + SourceData []byte +} + +// NewOAEPParams creates a CK_RSA_PKCS_OAEP_PARAMS structure suitable for use with the CKM_RSA_PKCS_OAEP mechanism. +func NewOAEPParams(hashAlg, mgf, sourceType uint, sourceData []byte) *OAEPParams { + return &OAEPParams{ + HashAlg: hashAlg, + MGF: mgf, + SourceType: sourceType, + SourceData: sourceData, + } +} + +func cOAEPParams(p *OAEPParams, arena arena) ([]byte, arena) { + params := C.CK_RSA_PKCS_OAEP_PARAMS{ + hashAlg: C.CK_MECHANISM_TYPE(p.HashAlg), + mgf: C.CK_RSA_PKCS_MGF_TYPE(p.MGF), + source: C.CK_RSA_PKCS_OAEP_SOURCE_TYPE(p.SourceType), + } + if len(p.SourceData) != 0 { + buf, len := arena.Allocate(p.SourceData) + // field is unaligned on windows so this has to call into C + C.putOAEPParams(¶ms, buf, len) + } + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena +} + +// ECDH1DeriveParams can be passed to NewMechanism to implement CK_ECDH1_DERIVE_PARAMS. +type ECDH1DeriveParams struct { + KDF uint + SharedData []byte + PublicKeyData []byte +} + +// NewECDH1DeriveParams creates a CK_ECDH1_DERIVE_PARAMS structure suitable for use with the CKM_ECDH1_DERIVE mechanism. +func NewECDH1DeriveParams(kdf uint, sharedData []byte, publicKeyData []byte) *ECDH1DeriveParams { + return &ECDH1DeriveParams{ + KDF: kdf, + SharedData: sharedData, + PublicKeyData: publicKeyData, + } +} + +func cECDH1DeriveParams(p *ECDH1DeriveParams, arena arena) ([]byte, arena) { + params := C.CK_ECDH1_DERIVE_PARAMS{ + kdf: C.CK_EC_KDF_TYPE(p.KDF), + } + + // SharedData MUST be null if key derivation function (KDF) is CKD_NULL + if len(p.SharedData) != 0 { + sharedData, sharedDataLen := arena.Allocate(p.SharedData) + C.putECDH1SharedParams(¶ms, sharedData, sharedDataLen) + } + + publicKeyData, publicKeyDataLen := arena.Allocate(p.PublicKeyData) + C.putECDH1PublicParams(¶ms, publicKeyData, publicKeyDataLen) + + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena +} diff --git a/tools/vendor/github.com/miekg/pkcs11/pkcs11.go b/tools/vendor/github.com/miekg/pkcs11/pkcs11.go new file mode 100644 index 000000000..e1b5824ec --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/pkcs11.go @@ -0,0 +1,1609 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run const_generate.go + +// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. +package pkcs11 + +// It is *assumed*, that: +// +// * Go's uint size == PKCS11's CK_ULONG size +// * CK_ULONG never overflows an Go int + +/* +#cgo windows CFLAGS: -DPACKED_STRUCTURES +#cgo linux LDFLAGS: -ldl +#cgo darwin LDFLAGS: -ldl +#cgo openbsd LDFLAGS: +#cgo freebsd LDFLAGS: -ldl + +#include +#include +#include +#include + +#include "pkcs11go.h" + +#ifdef _WIN32 +#include + +struct ctx { + HMODULE handle; + CK_FUNCTION_LIST_PTR sym; +}; + +// New initializes a ctx and fills the symbol table. +struct ctx *New(const char *module) +{ + CK_C_GetFunctionList list; + struct ctx *c = calloc(1, sizeof(struct ctx)); + c->handle = LoadLibrary(module); + if (c->handle == NULL) { + free(c); + return NULL; + } + list = (CK_C_GetFunctionList) GetProcAddress(c->handle, "C_GetFunctionList"); + if (list == NULL) { + free(c); + return NULL; + } + list(&c->sym); + return c; +} + +// Destroy cleans up a ctx. +void Destroy(struct ctx *c) +{ + if (!c) { + return; + } + free(c); +} +#else +#include + +struct ctx { + void *handle; + CK_FUNCTION_LIST_PTR sym; +}; + +// New initializes a ctx and fills the symbol table. +struct ctx *New(const char *module) +{ + CK_C_GetFunctionList list; + struct ctx *c = calloc(1, sizeof(struct ctx)); + c->handle = dlopen(module, RTLD_LAZY); + if (c->handle == NULL) { + free(c); + return NULL; + } + list = (CK_C_GetFunctionList) dlsym(c->handle, "C_GetFunctionList"); + if (list == NULL) { + free(c); + return NULL; + } + list(&c->sym); + return c; +} + +// Destroy cleans up a ctx. +void Destroy(struct ctx *c) +{ + if (!c) { + return; + } + if (c->handle == NULL) { + return; + } + if (dlclose(c->handle) < 0) { + return; + } + free(c); +} +#endif + +CK_RV Initialize(struct ctx * c) +{ + CK_C_INITIALIZE_ARGS args; + memset(&args, 0, sizeof(args)); + args.flags = CKF_OS_LOCKING_OK; + return c->sym->C_Initialize(&args); +} + +CK_RV Finalize(struct ctx * c) +{ + return c->sym->C_Finalize(NULL); +} + +CK_RV GetInfo(struct ctx * c, ckInfoPtr info) +{ + CK_INFO p; + CK_RV e = c->sym->C_GetInfo(&p); + if (e != CKR_OK) { + return e; + } + info->cryptokiVersion = p.cryptokiVersion; + memcpy(info->manufacturerID, p.manufacturerID, sizeof(p.manufacturerID)); + info->flags = p.flags; + memcpy(info->libraryDescription, p.libraryDescription, sizeof(p.libraryDescription)); + info->libraryVersion = p.libraryVersion; + return e; +} + +CK_RV GetSlotList(struct ctx * c, CK_BBOOL tokenPresent, + CK_ULONG_PTR * slotList, CK_ULONG_PTR ulCount) +{ + CK_RV e = c->sym->C_GetSlotList(tokenPresent, NULL, ulCount); + if (e != CKR_OK) { + return e; + } + *slotList = calloc(*ulCount, sizeof(CK_SLOT_ID)); + e = c->sym->C_GetSlotList(tokenPresent, *slotList, ulCount); + return e; +} + +CK_RV GetSlotInfo(struct ctx * c, CK_ULONG slotID, CK_SLOT_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetSlotInfo((CK_SLOT_ID) slotID, info); + return e; +} + +CK_RV GetTokenInfo(struct ctx * c, CK_ULONG slotID, CK_TOKEN_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetTokenInfo((CK_SLOT_ID) slotID, info); + return e; +} + +CK_RV GetMechanismList(struct ctx * c, CK_ULONG slotID, + CK_ULONG_PTR * mech, CK_ULONG_PTR mechlen) +{ + CK_RV e = + c->sym->C_GetMechanismList((CK_SLOT_ID) slotID, NULL, mechlen); + // Gemaltos PKCS11 implementation returns CKR_BUFFER_TOO_SMALL on a NULL ptr instad of CKR_OK as the spec states. + if (e != CKR_OK && e != CKR_BUFFER_TOO_SMALL) { + return e; + } + *mech = calloc(*mechlen, sizeof(CK_MECHANISM_TYPE)); + e = c->sym->C_GetMechanismList((CK_SLOT_ID) slotID, + (CK_MECHANISM_TYPE_PTR) * mech, mechlen); + return e; +} + +CK_RV GetMechanismInfo(struct ctx * c, CK_ULONG slotID, CK_MECHANISM_TYPE mech, + CK_MECHANISM_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetMechanismInfo((CK_SLOT_ID) slotID, mech, info); + return e; +} + +CK_RV InitToken(struct ctx * c, CK_ULONG slotID, char *pin, CK_ULONG pinlen, + char *label) +{ + CK_RV e = + c->sym->C_InitToken((CK_SLOT_ID) slotID, (CK_UTF8CHAR_PTR) pin, + pinlen, (CK_UTF8CHAR_PTR) label); + return e; +} + +CK_RV InitPIN(struct ctx * c, CK_SESSION_HANDLE sh, char *pin, CK_ULONG pinlen) +{ + CK_RV e = c->sym->C_InitPIN(sh, (CK_UTF8CHAR_PTR) pin, pinlen); + return e; +} + +CK_RV SetPIN(struct ctx * c, CK_SESSION_HANDLE sh, char *oldpin, + CK_ULONG oldpinlen, char *newpin, CK_ULONG newpinlen) +{ + CK_RV e = c->sym->C_SetPIN(sh, (CK_UTF8CHAR_PTR) oldpin, oldpinlen, + (CK_UTF8CHAR_PTR) newpin, newpinlen); + return e; +} + +CK_RV OpenSession(struct ctx * c, CK_ULONG slotID, CK_ULONG flags, + CK_SESSION_HANDLE_PTR session) +{ + CK_RV e = + c->sym->C_OpenSession((CK_SLOT_ID) slotID, (CK_FLAGS) flags, NULL, + NULL, session); + return e; +} + +CK_RV CloseSession(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_CloseSession(session); + return e; +} + +CK_RV CloseAllSessions(struct ctx * c, CK_ULONG slotID) +{ + CK_RV e = c->sym->C_CloseAllSessions(slotID); + return e; +} + +CK_RV GetSessionInfo(struct ctx * c, CK_SESSION_HANDLE session, + CK_SESSION_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetSessionInfo(session, info); + return e; +} + +CK_RV GetOperationState(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * state, CK_ULONG_PTR statelen) +{ + CK_RV rv = c->sym->C_GetOperationState(session, NULL, statelen); + if (rv != CKR_OK) { + return rv; + } + *state = calloc(*statelen, sizeof(CK_BYTE)); + if (*state == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_GetOperationState(session, *state, statelen); + return rv; +} + +CK_RV SetOperationState(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR state, CK_ULONG statelen, + CK_OBJECT_HANDLE encryptkey, CK_OBJECT_HANDLE authkey) +{ + return c->sym->C_SetOperationState(session, state, statelen, encryptkey, + authkey); +} + +CK_RV Login(struct ctx *c, CK_SESSION_HANDLE session, CK_USER_TYPE userType, + char *pin, CK_ULONG pinLen) +{ + if (pinLen == 0) { + pin = NULL; + } + CK_RV e = + c->sym->C_Login(session, userType, (CK_UTF8CHAR_PTR) pin, pinLen); + return e; +} + +CK_RV Logout(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_Logout(session); + return e; +} + +CK_RV CreateObject(struct ctx * c, CK_SESSION_HANDLE session, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount, + CK_OBJECT_HANDLE_PTR obj) +{ + return c->sym->C_CreateObject(session, temp, tempCount, obj); +} + +CK_RV CopyObject(struct ctx * c, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE o, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount, + CK_OBJECT_HANDLE_PTR obj) +{ + return c->sym->C_CopyObject(session, o, temp, tempCount, obj); +} + +CK_RV DestroyObject(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object) +{ + CK_RV e = c->sym->C_DestroyObject(session, object); + return e; +} + +CK_RV GetObjectSize(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ULONG_PTR size) +{ + CK_RV e = c->sym->C_GetObjectSize(session, object, size); + return e; +} + +CK_RV GetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp, + CK_ULONG templen) +{ + // Call for the first time, check the returned ulValue in the attributes, then + // allocate enough space and try again. + CK_RV e = c->sym->C_GetAttributeValue(session, object, temp, templen); + if (e != CKR_OK) { + return e; + } + CK_ULONG i; + for (i = 0; i < templen; i++) { + if ((CK_LONG) temp[i].ulValueLen == -1) { + // either access denied or no such object + continue; + } + temp[i].pValue = calloc(temp[i].ulValueLen, sizeof(CK_BYTE)); + } + return c->sym->C_GetAttributeValue(session, object, temp, templen); +} + +CK_RV SetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp, + CK_ULONG templen) +{ + return c->sym->C_SetAttributeValue(session, object, temp, templen); +} + +CK_RV FindObjectsInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount) +{ + return c->sym->C_FindObjectsInit(session, temp, tempCount); +} + +CK_RV FindObjects(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE_PTR * obj, CK_ULONG max, + CK_ULONG_PTR objCount) +{ + *obj = calloc(max, sizeof(CK_OBJECT_HANDLE)); + CK_RV e = c->sym->C_FindObjects(session, *obj, max, objCount); + return e; +} + +CK_RV FindObjectsFinal(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_FindObjectsFinal(session); + return e; +} + +CK_RV EncryptInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_EncryptInit(session, mechanism, key); +} + +CK_RV Encrypt(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * enc, CK_ULONG_PTR enclen) +{ + CK_RV rv = c->sym->C_Encrypt(session, message, mlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Encrypt(session, message, mlen, *enc, enclen); + return rv; +} + +CK_RV EncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR plain, CK_ULONG plainlen, CK_BYTE_PTR * cipher, + CK_ULONG_PTR cipherlen) +{ + CK_RV rv = + c->sym->C_EncryptUpdate(session, plain, plainlen, NULL, cipherlen); + if (rv != CKR_OK) { + return rv; + } + *cipher = calloc(*cipherlen, sizeof(CK_BYTE)); + if (*cipher == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_EncryptUpdate(session, plain, plainlen, *cipher, + cipherlen); + return rv; +} + +CK_RV EncryptFinal(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * cipher, CK_ULONG_PTR cipherlen) +{ + CK_RV rv = c->sym->C_EncryptFinal(session, NULL, cipherlen); + if (rv != CKR_OK) { + return rv; + } + *cipher = calloc(*cipherlen, sizeof(CK_BYTE)); + if (*cipher == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_EncryptFinal(session, *cipher, cipherlen); + return rv; +} + +CK_RV DecryptInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_DecryptInit(session, mechanism, key); +} + +CK_RV Decrypt(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR cipher, + CK_ULONG clen, CK_BYTE_PTR * plain, CK_ULONG_PTR plainlen) +{ + CK_RV e = c->sym->C_Decrypt(session, cipher, clen, NULL, plainlen); + if (e != CKR_OK) { + return e; + } + *plain = calloc(*plainlen, sizeof(CK_BYTE)); + if (*plain == NULL) { + return CKR_HOST_MEMORY; + } + e = c->sym->C_Decrypt(session, cipher, clen, *plain, plainlen); + return e; +} + +CK_RV DecryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, CK_BYTE_PTR * part, + CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptUpdate(session, cipher, cipherlen, NULL, partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV DecryptFinal(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * plain, CK_ULONG_PTR plainlen) +{ + CK_RV rv = c->sym->C_DecryptFinal(session, NULL, plainlen); + if (rv != CKR_OK) { + return rv; + } + *plain = calloc(*plainlen, sizeof(CK_BYTE)); + if (*plain == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptFinal(session, *plain, plainlen); + return rv; +} + +CK_RV DigestInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism) +{ + return c->sym->C_DigestInit(session, mechanism); +} + +CK_RV Digest(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * hash, CK_ULONG_PTR hashlen) +{ + CK_RV rv = c->sym->C_Digest(session, message, mlen, NULL, hashlen); + if (rv != CKR_OK) { + return rv; + } + *hash = calloc(*hashlen, sizeof(CK_BYTE)); + if (*hash == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Digest(session, message, mlen, *hash, hashlen); + return rv; +} + +CK_RV DigestUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR message, CK_ULONG mlen) +{ + CK_RV rv = c->sym->C_DigestUpdate(session, message, mlen); + return rv; +} + +CK_RV DigestKey(struct ctx * c, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE key) +{ + CK_RV rv = c->sym->C_DigestKey(session, key); + return rv; +} + +CK_RV DigestFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * hash, + CK_ULONG_PTR hashlen) +{ + CK_RV rv = c->sym->C_DigestFinal(session, NULL, hashlen); + if (rv != CKR_OK) { + return rv; + } + *hash = calloc(*hashlen, sizeof(CK_BYTE)); + if (*hash == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DigestFinal(session, *hash, hashlen); + return rv; +} + +CK_RV SignInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_SignInit(session, mechanism, key); +} + +CK_RV Sign(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * sig, CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_Sign(session, message, mlen, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Sign(session, message, mlen, *sig, siglen); + return rv; +} + +CK_RV SignUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR message, CK_ULONG mlen) +{ + CK_RV rv = c->sym->C_SignUpdate(session, message, mlen); + return rv; +} + +CK_RV SignFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * sig, + CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_SignFinal(session, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignFinal(session, *sig, siglen); + return rv; +} + +CK_RV SignRecoverInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_SignRecoverInit(session, mechanism, key); +} + +CK_RV SignRecover(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR data, + CK_ULONG datalen, CK_BYTE_PTR * sig, CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_SignRecover(session, data, datalen, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignRecover(session, data, datalen, *sig, siglen); + return rv; +} + +CK_RV VerifyInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_VerifyInit(session, mechanism, key); +} + +CK_RV Verify(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mesglen, CK_BYTE_PTR sig, CK_ULONG siglen) +{ + CK_RV rv = c->sym->C_Verify(session, message, mesglen, sig, siglen); + return rv; +} + +CK_RV VerifyUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen) +{ + CK_RV rv = c->sym->C_VerifyUpdate(session, part, partlen); + return rv; +} + +CK_RV VerifyFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR sig, + CK_ULONG siglen) +{ + CK_RV rv = c->sym->C_VerifyFinal(session, sig, siglen); + return rv; +} + +CK_RV VerifyRecoverInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_VerifyRecoverInit(session, mechanism, key); +} + +CK_RV VerifyRecover(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR sig, + CK_ULONG siglen, CK_BYTE_PTR * data, CK_ULONG_PTR datalen) +{ + CK_RV rv = c->sym->C_VerifyRecover(session, sig, siglen, NULL, datalen); + if (rv != CKR_OK) { + return rv; + } + *data = calloc(*datalen, sizeof(CK_BYTE)); + if (*data == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_VerifyRecover(session, sig, siglen, *data, datalen); + return rv; +} + +CK_RV DigestEncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen, CK_BYTE_PTR * enc, + CK_ULONG_PTR enclen) +{ + CK_RV rv = + c->sym->C_DigestEncryptUpdate(session, part, partlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DigestEncryptUpdate(session, part, partlen, *enc, + enclen); + return rv; +} + +CK_RV DecryptDigestUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, + CK_BYTE_PTR * part, CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptDigestUpdate(session, cipher, cipherlen, NULL, + partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptDigestUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV SignEncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen, CK_BYTE_PTR * enc, + CK_ULONG_PTR enclen) +{ + CK_RV rv = + c->sym->C_SignEncryptUpdate(session, part, partlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignEncryptUpdate(session, part, partlen, *enc, enclen); + return rv; +} + +CK_RV DecryptVerifyUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, + CK_BYTE_PTR * part, CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptVerifyUpdate(session, cipher, cipherlen, NULL, + partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptVerifyUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV GenerateKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR temp, + CK_ULONG tempCount, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_GenerateKey(session, mechanism, temp, tempCount, key); +} + +CK_RV GenerateKeyPair(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR pub, + CK_ULONG pubCount, CK_ATTRIBUTE_PTR priv, + CK_ULONG privCount, CK_OBJECT_HANDLE_PTR pubkey, + CK_OBJECT_HANDLE_PTR privkey) +{ + return c->sym->C_GenerateKeyPair(session, mechanism, pub, pubCount, + priv, privCount, pubkey, privkey); +} + +CK_RV WrapKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE wrappingkey, + CK_OBJECT_HANDLE key, CK_BYTE_PTR * wrapped, + CK_ULONG_PTR wrappedlen) +{ + CK_RV rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, NULL, + wrappedlen); + if (rv != CKR_OK) { + return rv; + } + *wrapped = calloc(*wrappedlen, sizeof(CK_BYTE)); + if (*wrapped == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, *wrapped, + wrappedlen); + return rv; +} + +CK_RV DeriveKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE basekey, + CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_DeriveKey(session, mechanism, basekey, a, alen, key); +} + +CK_RV UnwrapKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE unwrappingkey, + CK_BYTE_PTR wrappedkey, CK_ULONG wrappedkeylen, + CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_UnwrapKey(session, mechanism, unwrappingkey, wrappedkey, + wrappedkeylen, a, alen, key); +} + +CK_RV SeedRandom(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR seed, + CK_ULONG seedlen) +{ + CK_RV e = c->sym->C_SeedRandom(session, seed, seedlen); + return e; +} + +CK_RV GenerateRandom(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * rand, CK_ULONG length) +{ + *rand = calloc(length, sizeof(CK_BYTE)); + if (*rand == NULL) { + return CKR_HOST_MEMORY; + } + CK_RV e = c->sym->C_GenerateRandom(session, *rand, length); + return e; +} + +CK_RV WaitForSlotEvent(struct ctx * c, CK_FLAGS flags, CK_ULONG_PTR slot) +{ + CK_RV e = + c->sym->C_WaitForSlotEvent(flags, (CK_SLOT_ID_PTR) slot, NULL); + return e; +} + +static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a) +{ + return a->pValue; +} + +*/ +import "C" +import ( + "strings" + "unsafe" +) + +// Ctx contains the current pkcs11 context. +type Ctx struct { + ctx *C.struct_ctx +} + +// New creates a new context and initializes the module/library for use. +func New(module string) *Ctx { + c := new(Ctx) + mod := C.CString(module) + defer C.free(unsafe.Pointer(mod)) + c.ctx = C.New(mod) + if c.ctx == nil { + return nil + } + return c +} + +// Destroy unloads the module/library and frees any remaining memory. +func (c *Ctx) Destroy() { + if c == nil || c.ctx == nil { + return + } + C.Destroy(c.ctx) + c.ctx = nil +} + +// Initialize initializes the Cryptoki library. +func (c *Ctx) Initialize() error { + e := C.Initialize(c.ctx) + return toError(e) +} + +// Finalize indicates that an application is done with the Cryptoki library. +func (c *Ctx) Finalize() error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.Finalize(c.ctx) + return toError(e) +} + +// GetInfo returns general information about Cryptoki. +func (c *Ctx) GetInfo() (Info, error) { + var p C.ckInfo + e := C.GetInfo(c.ctx, &p) + i := Info{ + CryptokiVersion: toVersion(p.cryptokiVersion), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&p.manufacturerID[0]), 32)), " "), + Flags: uint(p.flags), + LibraryDescription: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&p.libraryDescription[0]), 32)), " "), + LibraryVersion: toVersion(p.libraryVersion), + } + return i, toError(e) +} + +// GetSlotList obtains a list of slots in the system. +func (c *Ctx) GetSlotList(tokenPresent bool) ([]uint, error) { + var ( + slotList C.CK_ULONG_PTR + ulCount C.CK_ULONG + ) + e := C.GetSlotList(c.ctx, cBBool(tokenPresent), &slotList, &ulCount) + if toError(e) != nil { + return nil, toError(e) + } + l := toList(slotList, ulCount) + return l, nil +} + +// GetSlotInfo obtains information about a particular slot in the system. +func (c *Ctx) GetSlotInfo(slotID uint) (SlotInfo, error) { + var csi C.CK_SLOT_INFO + e := C.GetSlotInfo(c.ctx, C.CK_ULONG(slotID), &csi) + s := SlotInfo{ + SlotDescription: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&csi.slotDescription[0]), 64)), " "), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&csi.manufacturerID[0]), 32)), " "), + Flags: uint(csi.flags), + HardwareVersion: toVersion(csi.hardwareVersion), + FirmwareVersion: toVersion(csi.firmwareVersion), + } + return s, toError(e) +} + +// GetTokenInfo obtains information about a particular token +// in the system. +func (c *Ctx) GetTokenInfo(slotID uint) (TokenInfo, error) { + var cti C.CK_TOKEN_INFO + e := C.GetTokenInfo(c.ctx, C.CK_ULONG(slotID), &cti) + s := TokenInfo{ + Label: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.label[0]), 32)), " "), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.manufacturerID[0]), 32)), " "), + Model: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.model[0]), 16)), " "), + SerialNumber: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.serialNumber[0]), 16)), " "), + Flags: uint(cti.flags), + MaxSessionCount: uint(cti.ulMaxSessionCount), + SessionCount: uint(cti.ulSessionCount), + MaxRwSessionCount: uint(cti.ulMaxRwSessionCount), + RwSessionCount: uint(cti.ulRwSessionCount), + MaxPinLen: uint(cti.ulMaxPinLen), + MinPinLen: uint(cti.ulMinPinLen), + TotalPublicMemory: uint(cti.ulTotalPublicMemory), + FreePublicMemory: uint(cti.ulFreePublicMemory), + TotalPrivateMemory: uint(cti.ulTotalPrivateMemory), + FreePrivateMemory: uint(cti.ulFreePrivateMemory), + HardwareVersion: toVersion(cti.hardwareVersion), + FirmwareVersion: toVersion(cti.firmwareVersion), + UTCTime: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.utcTime[0]), 16)), " "), + } + return s, toError(e) +} + +// GetMechanismList obtains a list of mechanism types supported by a token. +func (c *Ctx) GetMechanismList(slotID uint) ([]*Mechanism, error) { + var ( + mech C.CK_ULONG_PTR // in pkcs#11 we're all CK_ULONGs \o/ + mechlen C.CK_ULONG + ) + e := C.GetMechanismList(c.ctx, C.CK_ULONG(slotID), &mech, &mechlen) + if toError(e) != nil { + return nil, toError(e) + } + // Although the function returns only type, cast them back into real + // attributes as this is used in other functions. + m := make([]*Mechanism, int(mechlen)) + for i, typ := range toList(mech, mechlen) { + m[i] = NewMechanism(typ, nil) + } + return m, nil +} + +// GetMechanismInfo obtains information about a particular +// mechanism possibly supported by a token. +func (c *Ctx) GetMechanismInfo(slotID uint, m []*Mechanism) (MechanismInfo, error) { + var cm C.CK_MECHANISM_INFO + e := C.GetMechanismInfo(c.ctx, C.CK_ULONG(slotID), C.CK_MECHANISM_TYPE(m[0].Mechanism), + C.CK_MECHANISM_INFO_PTR(&cm)) + mi := MechanismInfo{ + MinKeySize: uint(cm.ulMinKeySize), + MaxKeySize: uint(cm.ulMaxKeySize), + Flags: uint(cm.flags), + } + return mi, toError(e) +} + +// InitToken initializes a token. The label must be 32 characters +// long, it is blank padded if it is not. If it is longer it is capped +// to 32 characters. +func (c *Ctx) InitToken(slotID uint, pin string, label string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + ll := len(label) + for ll < 32 { + label += " " + ll++ + } + l := C.CString(label[:32]) + defer C.free(unsafe.Pointer(l)) + e := C.InitToken(c.ctx, C.CK_ULONG(slotID), p, C.CK_ULONG(len(pin)), l) + return toError(e) +} + +// InitPIN initializes the normal user's PIN. +func (c *Ctx) InitPIN(sh SessionHandle, pin string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + e := C.InitPIN(c.ctx, C.CK_SESSION_HANDLE(sh), p, C.CK_ULONG(len(pin))) + return toError(e) +} + +// SetPIN modifies the PIN of the user who is logged in. +func (c *Ctx) SetPIN(sh SessionHandle, oldpin string, newpin string) error { + old := C.CString(oldpin) + defer C.free(unsafe.Pointer(old)) + new := C.CString(newpin) + defer C.free(unsafe.Pointer(new)) + e := C.SetPIN(c.ctx, C.CK_SESSION_HANDLE(sh), old, C.CK_ULONG(len(oldpin)), new, C.CK_ULONG(len(newpin))) + return toError(e) +} + +// OpenSession opens a session between an application and a token. +func (c *Ctx) OpenSession(slotID uint, flags uint) (SessionHandle, error) { + var s C.CK_SESSION_HANDLE + e := C.OpenSession(c.ctx, C.CK_ULONG(slotID), C.CK_ULONG(flags), C.CK_SESSION_HANDLE_PTR(&s)) + return SessionHandle(s), toError(e) +} + +// CloseSession closes a session between an application and a token. +func (c *Ctx) CloseSession(sh SessionHandle) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.CloseSession(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// CloseAllSessions closes all sessions with a token. +func (c *Ctx) CloseAllSessions(slotID uint) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.CloseAllSessions(c.ctx, C.CK_ULONG(slotID)) + return toError(e) +} + +// GetSessionInfo obtains information about the session. +func (c *Ctx) GetSessionInfo(sh SessionHandle) (SessionInfo, error) { + var csi C.CK_SESSION_INFO + e := C.GetSessionInfo(c.ctx, C.CK_SESSION_HANDLE(sh), &csi) + s := SessionInfo{SlotID: uint(csi.slotID), + State: uint(csi.state), + Flags: uint(csi.flags), + DeviceError: uint(csi.ulDeviceError), + } + return s, toError(e) +} + +// GetOperationState obtains the state of the cryptographic operation in a session. +func (c *Ctx) GetOperationState(sh SessionHandle) ([]byte, error) { + var ( + state C.CK_BYTE_PTR + statelen C.CK_ULONG + ) + e := C.GetOperationState(c.ctx, C.CK_SESSION_HANDLE(sh), &state, &statelen) + defer C.free(unsafe.Pointer(state)) + if toError(e) != nil { + return nil, toError(e) + } + b := C.GoBytes(unsafe.Pointer(state), C.int(statelen)) + return b, nil +} + +// SetOperationState restores the state of the cryptographic operation in a session. +func (c *Ctx) SetOperationState(sh SessionHandle, state []byte, encryptKey, authKey ObjectHandle) error { + e := C.SetOperationState(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_BYTE_PTR(unsafe.Pointer(&state[0])), + C.CK_ULONG(len(state)), C.CK_OBJECT_HANDLE(encryptKey), C.CK_OBJECT_HANDLE(authKey)) + return toError(e) +} + +// Login logs a user into a token. +func (c *Ctx) Login(sh SessionHandle, userType uint, pin string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + e := C.Login(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_USER_TYPE(userType), p, C.CK_ULONG(len(pin))) + return toError(e) +} + +// Logout logs a user out from a token. +func (c *Ctx) Logout(sh SessionHandle) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.Logout(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// CreateObject creates a new object. +func (c *Ctx) CreateObject(sh SessionHandle, temp []*Attribute) (ObjectHandle, error) { + var obj C.CK_OBJECT_HANDLE + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + e := C.CreateObject(c.ctx, C.CK_SESSION_HANDLE(sh), t, tcount, C.CK_OBJECT_HANDLE_PTR(&obj)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(obj), nil + } + return 0, e1 +} + +// CopyObject copies an object, creating a new object for the copy. +func (c *Ctx) CopyObject(sh SessionHandle, o ObjectHandle, temp []*Attribute) (ObjectHandle, error) { + var obj C.CK_OBJECT_HANDLE + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + + e := C.CopyObject(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), t, tcount, C.CK_OBJECT_HANDLE_PTR(&obj)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(obj), nil + } + return 0, e1 +} + +// DestroyObject destroys an object. +func (c *Ctx) DestroyObject(sh SessionHandle, oh ObjectHandle) error { + e := C.DestroyObject(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(oh)) + return toError(e) +} + +// GetObjectSize gets the size of an object in bytes. +func (c *Ctx) GetObjectSize(sh SessionHandle, oh ObjectHandle) (uint, error) { + var size C.CK_ULONG + e := C.GetObjectSize(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(oh), &size) + return uint(size), toError(e) +} + +// GetAttributeValue obtains the value of one or more object attributes. +func (c *Ctx) GetAttributeValue(sh SessionHandle, o ObjectHandle, a []*Attribute) ([]*Attribute, error) { + // copy the attribute list and make all the values nil, so that + // the C function can (allocate) fill them in + pa := make([]C.CK_ATTRIBUTE, len(a)) + for i := 0; i < len(a); i++ { + pa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type) + } + e := C.GetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), &pa[0], C.CK_ULONG(len(a))) + if err := toError(e); err != nil { + return nil, err + } + a1 := make([]*Attribute, len(a)) + for i, c := range pa { + x := new(Attribute) + x.Type = uint(c._type) + if int(c.ulValueLen) != -1 { + buf := unsafe.Pointer(C.getAttributePval(&c)) + x.Value = C.GoBytes(buf, C.int(c.ulValueLen)) + C.free(buf) + } + a1[i] = x + } + return a1, nil +} + +// SetAttributeValue modifies the value of one or more object attributes +func (c *Ctx) SetAttributeValue(sh SessionHandle, o ObjectHandle, a []*Attribute) error { + arena, pa, palen := cAttributeList(a) + defer arena.Free() + e := C.SetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), pa, palen) + return toError(e) +} + +// FindObjectsInit initializes a search for token and session +// objects that match a template. +func (c *Ctx) FindObjectsInit(sh SessionHandle, temp []*Attribute) error { + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + e := C.FindObjectsInit(c.ctx, C.CK_SESSION_HANDLE(sh), t, tcount) + return toError(e) +} + +// FindObjects continues a search for token and session +// objects that match a template, obtaining additional object +// handles. Calling the function repeatedly may yield additional results until +// an empty slice is returned. +// +// The returned boolean value is deprecated and should be ignored. +func (c *Ctx) FindObjects(sh SessionHandle, max int) ([]ObjectHandle, bool, error) { + var ( + objectList C.CK_OBJECT_HANDLE_PTR + ulCount C.CK_ULONG + ) + e := C.FindObjects(c.ctx, C.CK_SESSION_HANDLE(sh), &objectList, C.CK_ULONG(max), &ulCount) + if toError(e) != nil { + return nil, false, toError(e) + } + l := toList(C.CK_ULONG_PTR(unsafe.Pointer(objectList)), ulCount) + // Make again a new list of the correct type. + // This is copying data, but this is not an often used function. + o := make([]ObjectHandle, len(l)) + for i, v := range l { + o[i] = ObjectHandle(v) + } + return o, ulCount > C.CK_ULONG(max), nil +} + +// FindObjectsFinal finishes a search for token and session objects. +func (c *Ctx) FindObjectsFinal(sh SessionHandle) error { + e := C.FindObjectsFinal(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// EncryptInit initializes an encryption operation. +func (c *Ctx) EncryptInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.EncryptInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Encrypt encrypts single-part data. +func (c *Ctx) Encrypt(sh SessionHandle, message []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.Encrypt(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return s, nil +} + +// EncryptUpdate continues a multiple-part encryption operation. +func (c *Ctx) EncryptUpdate(sh SessionHandle, plain []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.EncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(plain), C.CK_ULONG(len(plain)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// EncryptFinal finishes a multiple-part encryption operation. +func (c *Ctx) EncryptFinal(sh SessionHandle) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.EncryptFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptInit initializes a decryption operation. +func (c *Ctx) DecryptInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.DecryptInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Decrypt decrypts encrypted data in a single part. +func (c *Ctx) Decrypt(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + plain C.CK_BYTE_PTR + plainlen C.CK_ULONG + ) + e := C.Decrypt(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &plain, &plainlen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(plain), C.int(plainlen)) + C.free(unsafe.Pointer(plain)) + return s, nil +} + +// DecryptUpdate continues a multiple-part decryption operation. +func (c *Ctx) DecryptUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// DecryptFinal finishes a multiple-part decryption operation. +func (c *Ctx) DecryptFinal(sh SessionHandle) ([]byte, error) { + var ( + plain C.CK_BYTE_PTR + plainlen C.CK_ULONG + ) + e := C.DecryptFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &plain, &plainlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(plain), C.int(plainlen)) + C.free(unsafe.Pointer(plain)) + return h, nil +} + +// DigestInit initializes a message-digesting operation. +func (c *Ctx) DigestInit(sh SessionHandle, m []*Mechanism) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.DigestInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech) + return toError(e) +} + +// Digest digests message in a single part. +func (c *Ctx) Digest(sh SessionHandle, message []byte) ([]byte, error) { + var ( + hash C.CK_BYTE_PTR + hashlen C.CK_ULONG + ) + e := C.Digest(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &hash, &hashlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(hash), C.int(hashlen)) + C.free(unsafe.Pointer(hash)) + return h, nil +} + +// DigestUpdate continues a multiple-part message-digesting operation. +func (c *Ctx) DigestUpdate(sh SessionHandle, message []byte) error { + e := C.DigestUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message))) + if toError(e) != nil { + return toError(e) + } + return nil +} + +// DigestKey continues a multi-part message-digesting +// operation, by digesting the value of a secret key as part of +// the data already digested. +func (c *Ctx) DigestKey(sh SessionHandle, key ObjectHandle) error { + e := C.DigestKey(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(key)) + if toError(e) != nil { + return toError(e) + } + return nil +} + +// DigestFinal finishes a multiple-part message-digesting operation. +func (c *Ctx) DigestFinal(sh SessionHandle) ([]byte, error) { + var ( + hash C.CK_BYTE_PTR + hashlen C.CK_ULONG + ) + e := C.DigestFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &hash, &hashlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(hash), C.int(hashlen)) + C.free(unsafe.Pointer(hash)) + return h, nil +} + +// SignInit initializes a signature (private key encryption) +// operation, where the signature is (will be) an appendix to +// the data, and plaintext cannot be recovered from the signature. +func (c *Ctx) SignInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.SignInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Sign signs (encrypts with private key) data in a single part, where the signature +// is (will be) an appendix to the data, and plaintext cannot be recovered from the signature. +func (c *Ctx) Sign(sh SessionHandle, message []byte) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.Sign(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return s, nil +} + +// SignUpdate continues a multiple-part signature operation, +// where the signature is (will be) an appendix to the data, +// and plaintext cannot be recovered from the signature. +func (c *Ctx) SignUpdate(sh SessionHandle, message []byte) error { + e := C.SignUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message))) + return toError(e) +} + +// SignFinal finishes a multiple-part signature operation returning the signature. +func (c *Ctx) SignFinal(sh SessionHandle) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.SignFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return h, nil +} + +// SignRecoverInit initializes a signature operation, where the data can be recovered from the signature. +func (c *Ctx) SignRecoverInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.SignRecoverInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// SignRecover signs data in a single operation, where the data can be recovered from the signature. +func (c *Ctx) SignRecover(sh SessionHandle, data []byte) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.SignRecover(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(data), C.CK_ULONG(len(data)), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return h, nil +} + +// VerifyInit initializes a verification operation, where the +// signature is an appendix to the data, and plaintext cannot +// be recovered from the signature (e.g. DSA). +func (c *Ctx) VerifyInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.VerifyInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// Verify verifies a signature in a single-part operation, +// where the signature is an appendix to the data, and plaintext +// cannot be recovered from the signature. +func (c *Ctx) Verify(sh SessionHandle, data []byte, signature []byte) error { + e := C.Verify(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(data), C.CK_ULONG(len(data)), cMessage(signature), C.CK_ULONG(len(signature))) + return toError(e) +} + +// VerifyUpdate continues a multiple-part verification +// operation, where the signature is an appendix to the data, +// and plaintext cannot be recovered from the signature. +func (c *Ctx) VerifyUpdate(sh SessionHandle, part []byte) error { + e := C.VerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part))) + return toError(e) +} + +// VerifyFinal finishes a multiple-part verification +// operation, checking the signature. +func (c *Ctx) VerifyFinal(sh SessionHandle, signature []byte) error { + e := C.VerifyFinal(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(signature), C.CK_ULONG(len(signature))) + return toError(e) +} + +// VerifyRecoverInit initializes a signature verification +// operation, where the data is recovered from the signature. +func (c *Ctx) VerifyRecoverInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.VerifyRecoverInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// VerifyRecover verifies a signature in a single-part +// operation, where the data is recovered from the signature. +func (c *Ctx) VerifyRecover(sh SessionHandle, signature []byte) ([]byte, error) { + var ( + data C.CK_BYTE_PTR + datalen C.CK_ULONG + ) + e := C.DecryptVerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(signature), C.CK_ULONG(len(signature)), &data, &datalen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(data), C.int(datalen)) + C.free(unsafe.Pointer(data)) + return h, nil +} + +// DigestEncryptUpdate continues a multiple-part digesting and encryption operation. +func (c *Ctx) DigestEncryptUpdate(sh SessionHandle, part []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.DigestEncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptDigestUpdate continues a multiple-part decryption and digesting operation. +func (c *Ctx) DecryptDigestUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptDigestUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// SignEncryptUpdate continues a multiple-part signing and encryption operation. +func (c *Ctx) SignEncryptUpdate(sh SessionHandle, part []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.SignEncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptVerifyUpdate continues a multiple-part decryption and verify operation. +func (c *Ctx) DecryptVerifyUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptVerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// GenerateKey generates a secret key, creating a new key object. +func (c *Ctx) GenerateKey(sh SessionHandle, m []*Mechanism, temp []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, t, tcount := cAttributeList(temp) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.GenerateKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, t, tcount, C.CK_OBJECT_HANDLE_PTR(&key)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(key), nil + } + return 0, e1 +} + +// GenerateKeyPair generates a public-key/private-key pair creating new key objects. +func (c *Ctx) GenerateKeyPair(sh SessionHandle, m []*Mechanism, public, private []*Attribute) (ObjectHandle, ObjectHandle, error) { + var ( + pubkey C.CK_OBJECT_HANDLE + privkey C.CK_OBJECT_HANDLE + ) + pubarena, pub, pubcount := cAttributeList(public) + defer pubarena.Free() + privarena, priv, privcount := cAttributeList(private) + defer privarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.GenerateKeyPair(c.ctx, C.CK_SESSION_HANDLE(sh), mech, pub, pubcount, priv, privcount, C.CK_OBJECT_HANDLE_PTR(&pubkey), C.CK_OBJECT_HANDLE_PTR(&privkey)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(pubkey), ObjectHandle(privkey), nil + } + return 0, 0, e1 +} + +// WrapKey wraps (i.e., encrypts) a key. +func (c *Ctx) WrapKey(sh SessionHandle, m []*Mechanism, wrappingkey, key ObjectHandle) ([]byte, error) { + var ( + wrappedkey C.CK_BYTE_PTR + wrappedkeylen C.CK_ULONG + ) + arena, mech := cMechanism(m) + defer arena.Free() + e := C.WrapKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(wrappingkey), C.CK_OBJECT_HANDLE(key), &wrappedkey, &wrappedkeylen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(wrappedkey), C.int(wrappedkeylen)) + C.free(unsafe.Pointer(wrappedkey)) + return h, nil +} + +// UnwrapKey unwraps (decrypts) a wrapped key, creating a new key object. +func (c *Ctx) UnwrapKey(sh SessionHandle, m []*Mechanism, unwrappingkey ObjectHandle, wrappedkey []byte, a []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, ac, aclen := cAttributeList(a) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.UnwrapKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(unwrappingkey), C.CK_BYTE_PTR(unsafe.Pointer(&wrappedkey[0])), C.CK_ULONG(len(wrappedkey)), ac, aclen, &key) + return ObjectHandle(key), toError(e) +} + +// DeriveKey derives a key from a base key, creating a new key object. +func (c *Ctx) DeriveKey(sh SessionHandle, m []*Mechanism, basekey ObjectHandle, a []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, ac, aclen := cAttributeList(a) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.DeriveKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(basekey), ac, aclen, &key) + return ObjectHandle(key), toError(e) +} + +// SeedRandom mixes additional seed material into the token's +// random number generator. +func (c *Ctx) SeedRandom(sh SessionHandle, seed []byte) error { + e := C.SeedRandom(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_BYTE_PTR(unsafe.Pointer(&seed[0])), C.CK_ULONG(len(seed))) + return toError(e) +} + +// GenerateRandom generates random data. +func (c *Ctx) GenerateRandom(sh SessionHandle, length int) ([]byte, error) { + var rand C.CK_BYTE_PTR + e := C.GenerateRandom(c.ctx, C.CK_SESSION_HANDLE(sh), &rand, C.CK_ULONG(length)) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(rand), C.int(length)) + C.free(unsafe.Pointer(rand)) + return h, nil +} + +// WaitForSlotEvent returns a channel which returns a slot event +// (token insertion, removal, etc.) when it occurs. +func (c *Ctx) WaitForSlotEvent(flags uint) chan SlotEvent { + sl := make(chan SlotEvent, 1) // hold one element + go c.waitForSlotEventHelper(flags, sl) + return sl +} + +func (c *Ctx) waitForSlotEventHelper(f uint, sl chan SlotEvent) { + var slotID C.CK_ULONG + C.WaitForSlotEvent(c.ctx, C.CK_FLAGS(f), &slotID) + sl <- SlotEvent{uint(slotID)} + close(sl) // TODO(miek): Sending and then closing ...? +} diff --git a/tools/vendor/github.com/miekg/pkcs11/pkcs11.h b/tools/vendor/github.com/miekg/pkcs11/pkcs11.h new file mode 100644 index 000000000..0d78dd711 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/pkcs11.h @@ -0,0 +1,265 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +#ifndef _PKCS11_H_ +#define _PKCS11_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* Before including this file (pkcs11.h) (or pkcs11t.h by + * itself), 5 platform-specific macros must be defined. These + * macros are described below, and typical definitions for them + * are also given. Be advised that these definitions can depend + * on both the platform and the compiler used (and possibly also + * on whether a Cryptoki library is linked statically or + * dynamically). + * + * In addition to defining these 5 macros, the packing convention + * for Cryptoki structures should be set. The Cryptoki + * convention on packing is that structures should be 1-byte + * aligned. + * + * If you're using Microsoft Developer Studio 5.0 to produce + * Win32 stuff, this might be done by using the following + * preprocessor directive before including pkcs11.h or pkcs11t.h: + * + * #pragma pack(push, cryptoki, 1) + * + * and using the following preprocessor directive after including + * pkcs11.h or pkcs11t.h: + * + * #pragma pack(pop, cryptoki) + * + * If you're using an earlier version of Microsoft Developer + * Studio to produce Win16 stuff, this might be done by using + * the following preprocessor directive before including + * pkcs11.h or pkcs11t.h: + * + * #pragma pack(1) + * + * In a UNIX environment, you're on your own for this. You might + * not need to do (or be able to do!) anything. + * + * + * Now for the macros: + * + * + * 1. CK_PTR: The indirection string for making a pointer to an + * object. It can be used like this: + * + * typedef CK_BYTE CK_PTR CK_BYTE_PTR; + * + * If you're using Microsoft Developer Studio 5.0 to produce + * Win32 stuff, it might be defined by: + * + * #define CK_PTR * + * + * If you're using an earlier version of Microsoft Developer + * Studio to produce Win16 stuff, it might be defined by: + * + * #define CK_PTR far * + * + * In a typical UNIX environment, it might be defined by: + * + * #define CK_PTR * + * + * + * 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes + * an importable Cryptoki library function declaration out of a + * return type and a function name. It should be used in the + * following fashion: + * + * extern CK_DECLARE_FUNCTION(CK_RV, C_Initialize)( + * CK_VOID_PTR pReserved + * ); + * + * If you're using Microsoft Developer Studio 5.0 to declare a + * function in a Win32 Cryptoki .dll, it might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType __declspec(dllimport) name + * + * If you're using an earlier version of Microsoft Developer + * Studio to declare a function in a Win16 Cryptoki .dll, it + * might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType __export _far _pascal name + * + * In a UNIX environment, it might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType name + * + * + * 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro + * which makes a Cryptoki API function pointer declaration or + * function pointer type declaration out of a return type and a + * function name. It should be used in the following fashion: + * + * // Define funcPtr to be a pointer to a Cryptoki API function + * // taking arguments args and returning CK_RV. + * CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args); + * + * or + * + * // Define funcPtrType to be the type of a pointer to a + * // Cryptoki API function taking arguments args and returning + * // CK_RV, and then define funcPtr to be a variable of type + * // funcPtrType. + * typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args); + * funcPtrType funcPtr; + * + * If you're using Microsoft Developer Studio 5.0 to access + * functions in a Win32 Cryptoki .dll, in might be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType __declspec(dllimport) (* name) + * + * If you're using an earlier version of Microsoft Developer + * Studio to access functions in a Win16 Cryptoki .dll, it might + * be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType __export _far _pascal (* name) + * + * In a UNIX environment, it might be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType (* name) + * + * + * 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes + * a function pointer type for an application callback out of + * a return type for the callback and a name for the callback. + * It should be used in the following fashion: + * + * CK_CALLBACK_FUNCTION(CK_RV, myCallback)(args); + * + * to declare a function pointer, myCallback, to a callback + * which takes arguments args and returns a CK_RV. It can also + * be used like this: + * + * typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args); + * myCallbackType myCallback; + * + * If you're using Microsoft Developer Studio 5.0 to do Win32 + * Cryptoki development, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType (* name) + * + * If you're using an earlier version of Microsoft Developer + * Studio to do Win16 development, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType _far _pascal (* name) + * + * In a UNIX environment, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType (* name) + * + * + * 5. NULL_PTR: This macro is the value of a NULL pointer. + * + * In any ANSI/ISO C environment (and in many others as well), + * this should best be defined by + * + * #ifndef NULL_PTR + * #define NULL_PTR 0 + * #endif + */ + + +/* All the various Cryptoki types and #define'd values are in the + * file pkcs11t.h. + */ +#include "pkcs11t.h" + +#define __PASTE(x,y) x##y + + +/* ============================================================== + * Define the "extern" form of all the entry points. + * ============================================================== + */ + +#define CK_NEED_ARG_LIST 1 +#define CK_PKCS11_FUNCTION_INFO(name) \ + extern CK_DECLARE_FUNCTION(CK_RV, name) + +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +#undef CK_NEED_ARG_LIST +#undef CK_PKCS11_FUNCTION_INFO + + +/* ============================================================== + * Define the typedef form of all the entry points. That is, for + * each Cryptoki function C_XXX, define a type CK_C_XXX which is + * a pointer to that kind of function. + * ============================================================== + */ + +#define CK_NEED_ARG_LIST 1 +#define CK_PKCS11_FUNCTION_INFO(name) \ + typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name)) + +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +#undef CK_NEED_ARG_LIST +#undef CK_PKCS11_FUNCTION_INFO + + +/* ============================================================== + * Define structed vector of entry points. A CK_FUNCTION_LIST + * contains a CK_VERSION indicating a library's Cryptoki version + * and then a whole slew of function pointers to the routines in + * the library. This type was declared, but not defined, in + * pkcs11t.h. + * ============================================================== + */ + +#define CK_PKCS11_FUNCTION_INFO(name) \ + __PASTE(CK_,name) name; + +struct CK_FUNCTION_LIST { + + CK_VERSION version; /* Cryptoki version */ + +/* Pile all the function pointers into the CK_FUNCTION_LIST. */ +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +}; + +#undef CK_PKCS11_FUNCTION_INFO + + +#undef __PASTE + +#ifdef __cplusplus +} +#endif + +#endif /* _PKCS11_H_ */ + diff --git a/tools/vendor/github.com/miekg/pkcs11/pkcs11f.h b/tools/vendor/github.com/miekg/pkcs11/pkcs11f.h new file mode 100644 index 000000000..ed90affc5 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/pkcs11f.h @@ -0,0 +1,939 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +/* This header file contains pretty much everything about all the + * Cryptoki function prototypes. Because this information is + * used for more than just declaring function prototypes, the + * order of the functions appearing herein is important, and + * should not be altered. + */ + +/* General-purpose */ + +/* C_Initialize initializes the Cryptoki library. */ +CK_PKCS11_FUNCTION_INFO(C_Initialize) +#ifdef CK_NEED_ARG_LIST +( + CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets + * cast to CK_C_INITIALIZE_ARGS_PTR + * and dereferenced + */ +); +#endif + + +/* C_Finalize indicates that an application is done with the + * Cryptoki library. + */ +CK_PKCS11_FUNCTION_INFO(C_Finalize) +#ifdef CK_NEED_ARG_LIST +( + CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */ +); +#endif + + +/* C_GetInfo returns general information about Cryptoki. */ +CK_PKCS11_FUNCTION_INFO(C_GetInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_INFO_PTR pInfo /* location that receives information */ +); +#endif + + +/* C_GetFunctionList returns the function list. */ +CK_PKCS11_FUNCTION_INFO(C_GetFunctionList) +#ifdef CK_NEED_ARG_LIST +( + CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to + * function list + */ +); +#endif + + + +/* Slot and token management */ + +/* C_GetSlotList obtains a list of slots in the system. */ +CK_PKCS11_FUNCTION_INFO(C_GetSlotList) +#ifdef CK_NEED_ARG_LIST +( + CK_BBOOL tokenPresent, /* only slots with tokens */ + CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */ + CK_ULONG_PTR pulCount /* receives number of slots */ +); +#endif + + +/* C_GetSlotInfo obtains information about a particular slot in + * the system. + */ +CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* the ID of the slot */ + CK_SLOT_INFO_PTR pInfo /* receives the slot information */ +); +#endif + + +/* C_GetTokenInfo obtains information about a particular token + * in the system. + */ +CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_TOKEN_INFO_PTR pInfo /* receives the token information */ +); +#endif + + +/* C_GetMechanismList obtains a list of mechanism types + * supported by a token. + */ +CK_PKCS11_FUNCTION_INFO(C_GetMechanismList) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of token's slot */ + CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */ + CK_ULONG_PTR pulCount /* gets # of mechs. */ +); +#endif + + +/* C_GetMechanismInfo obtains information about a particular + * mechanism possibly supported by a token. + */ +CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_MECHANISM_TYPE type, /* type of mechanism */ + CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */ +); +#endif + + +/* C_InitToken initializes a token. */ +CK_PKCS11_FUNCTION_INFO(C_InitToken) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */ + CK_ULONG ulPinLen, /* length in bytes of the PIN */ + CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */ +); +#endif + + +/* C_InitPIN initializes the normal user's PIN. */ +CK_PKCS11_FUNCTION_INFO(C_InitPIN) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */ + CK_ULONG ulPinLen /* length in bytes of the PIN */ +); +#endif + + +/* C_SetPIN modifies the PIN of the user who is logged in. */ +CK_PKCS11_FUNCTION_INFO(C_SetPIN) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_UTF8CHAR_PTR pOldPin, /* the old PIN */ + CK_ULONG ulOldLen, /* length of the old PIN */ + CK_UTF8CHAR_PTR pNewPin, /* the new PIN */ + CK_ULONG ulNewLen /* length of the new PIN */ +); +#endif + + + +/* Session management */ + +/* C_OpenSession opens a session between an application and a + * token. + */ +CK_PKCS11_FUNCTION_INFO(C_OpenSession) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* the slot's ID */ + CK_FLAGS flags, /* from CK_SESSION_INFO */ + CK_VOID_PTR pApplication, /* passed to callback */ + CK_NOTIFY Notify, /* callback function */ + CK_SESSION_HANDLE_PTR phSession /* gets session handle */ +); +#endif + + +/* C_CloseSession closes a session between an application and a + * token. + */ +CK_PKCS11_FUNCTION_INFO(C_CloseSession) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_CloseAllSessions closes all sessions with a token. */ +CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID /* the token's slot */ +); +#endif + + +/* C_GetSessionInfo obtains information about the session. */ +CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_SESSION_INFO_PTR pInfo /* receives session info */ +); +#endif + + +/* C_GetOperationState obtains the state of the cryptographic operation + * in a session. + */ +CK_PKCS11_FUNCTION_INFO(C_GetOperationState) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pOperationState, /* gets state */ + CK_ULONG_PTR pulOperationStateLen /* gets state length */ +); +#endif + + +/* C_SetOperationState restores the state of the cryptographic + * operation in a session. + */ +CK_PKCS11_FUNCTION_INFO(C_SetOperationState) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pOperationState, /* holds state */ + CK_ULONG ulOperationStateLen, /* holds state length */ + CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */ + CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */ +); +#endif + + +/* C_Login logs a user into a token. */ +CK_PKCS11_FUNCTION_INFO(C_Login) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_USER_TYPE userType, /* the user type */ + CK_UTF8CHAR_PTR pPin, /* the user's PIN */ + CK_ULONG ulPinLen /* the length of the PIN */ +); +#endif + + +/* C_Logout logs a user out from a token. */ +CK_PKCS11_FUNCTION_INFO(C_Logout) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + + +/* Object management */ + +/* C_CreateObject creates a new object. */ +CK_PKCS11_FUNCTION_INFO(C_CreateObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* the object's template */ + CK_ULONG ulCount, /* attributes in template */ + CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */ +); +#endif + + +/* C_CopyObject copies an object, creating a new object for the + * copy. + */ +CK_PKCS11_FUNCTION_INFO(C_CopyObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* template for new object */ + CK_ULONG ulCount, /* attributes in template */ + CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */ +); +#endif + + +/* C_DestroyObject destroys an object. */ +CK_PKCS11_FUNCTION_INFO(C_DestroyObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject /* the object's handle */ +); +#endif + + +/* C_GetObjectSize gets the size of an object in bytes. */ +CK_PKCS11_FUNCTION_INFO(C_GetObjectSize) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ULONG_PTR pulSize /* receives size of object */ +); +#endif + + +/* C_GetAttributeValue obtains the value of one or more object + * attributes. + */ +CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */ + CK_ULONG ulCount /* attributes in template */ +); +#endif + + +/* C_SetAttributeValue modifies the value of one or more object + * attributes. + */ +CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */ + CK_ULONG ulCount /* attributes in template */ +); +#endif + + +/* C_FindObjectsInit initializes a search for token and session + * objects that match a template. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */ + CK_ULONG ulCount /* attrs in search template */ +); +#endif + + +/* C_FindObjects continues a search for token and session + * objects that match a template, obtaining additional object + * handles. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjects) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */ + CK_ULONG ulMaxObjectCount, /* max handles to get */ + CK_ULONG_PTR pulObjectCount /* actual # returned */ +); +#endif + + +/* C_FindObjectsFinal finishes a search for token and session + * objects. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + + +/* Encryption and decryption */ + +/* C_EncryptInit initializes an encryption operation. */ +CK_PKCS11_FUNCTION_INFO(C_EncryptInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ + CK_OBJECT_HANDLE hKey /* handle of encryption key */ +); +#endif + + +/* C_Encrypt encrypts single-part data. */ +CK_PKCS11_FUNCTION_INFO(C_Encrypt) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pData, /* the plaintext data */ + CK_ULONG ulDataLen, /* bytes of plaintext */ + CK_BYTE_PTR pEncryptedData, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */ +); +#endif + + +/* C_EncryptUpdate continues a multiple-part encryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext data len */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */ +); +#endif + + +/* C_EncryptFinal finishes a multiple-part encryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_EncryptFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session handle */ + CK_BYTE_PTR pLastEncryptedPart, /* last c-text */ + CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */ +); +#endif + + +/* C_DecryptInit initializes a decryption operation. */ +CK_PKCS11_FUNCTION_INFO(C_DecryptInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ + CK_OBJECT_HANDLE hKey /* handle of decryption key */ +); +#endif + + +/* C_Decrypt decrypts encrypted data in a single part. */ +CK_PKCS11_FUNCTION_INFO(C_Decrypt) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedData, /* ciphertext */ + CK_ULONG ulEncryptedDataLen, /* ciphertext length */ + CK_BYTE_PTR pData, /* gets plaintext */ + CK_ULONG_PTR pulDataLen /* gets p-text size */ +); +#endif + + +/* C_DecryptUpdate continues a multiple-part decryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* encrypted data */ + CK_ULONG ulEncryptedPartLen, /* input length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* p-text size */ +); +#endif + + +/* C_DecryptFinal finishes a multiple-part decryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pLastPart, /* gets plaintext */ + CK_ULONG_PTR pulLastPartLen /* p-text size */ +); +#endif + + + +/* Message digesting */ + +/* C_DigestInit initializes a message-digesting operation. */ +CK_PKCS11_FUNCTION_INFO(C_DigestInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism /* the digesting mechanism */ +); +#endif + + +/* C_Digest digests data in a single part. */ +CK_PKCS11_FUNCTION_INFO(C_Digest) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* data to be digested */ + CK_ULONG ulDataLen, /* bytes of data to digest */ + CK_BYTE_PTR pDigest, /* gets the message digest */ + CK_ULONG_PTR pulDigestLen /* gets digest length */ +); +#endif + + +/* C_DigestUpdate continues a multiple-part message-digesting + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* data to be digested */ + CK_ULONG ulPartLen /* bytes of data to be digested */ +); +#endif + + +/* C_DigestKey continues a multi-part message-digesting + * operation, by digesting the value of a secret key as part of + * the data already digested. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hKey /* secret key to digest */ +); +#endif + + +/* C_DigestFinal finishes a multiple-part message-digesting + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pDigest, /* gets the message digest */ + CK_ULONG_PTR pulDigestLen /* gets byte count of digest */ +); +#endif + + + +/* Signing and MACing */ + +/* C_SignInit initializes a signature (private key encryption) + * operation, where the signature is (will be) an appendix to + * the data, and plaintext cannot be recovered from the + * signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ + CK_OBJECT_HANDLE hKey /* handle of signature key */ +); +#endif + + +/* C_Sign signs (encrypts with private key) data in a single + * part, where the signature is (will be) an appendix to the + * data, and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_Sign) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* the data to sign */ + CK_ULONG ulDataLen, /* count of bytes to sign */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + +/* C_SignUpdate continues a multiple-part signature operation, + * where the signature is (will be) an appendix to the data, + * and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* the data to sign */ + CK_ULONG ulPartLen /* count of bytes to sign */ +); +#endif + + +/* C_SignFinal finishes a multiple-part signature operation, + * returning the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + +/* C_SignRecoverInit initializes a signature operation, where + * the data can be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ + CK_OBJECT_HANDLE hKey /* handle of the signature key */ +); +#endif + + +/* C_SignRecover signs data in a single operation, where the + * data can be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignRecover) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* the data to sign */ + CK_ULONG ulDataLen, /* count of bytes to sign */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + + +/* Verifying signatures and MACs */ + +/* C_VerifyInit initializes a verification operation, where the + * signature is an appendix to the data, and plaintext cannot + * cannot be recovered from the signature (e.g. DSA). + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ + CK_OBJECT_HANDLE hKey /* verification key */ +); +#endif + + +/* C_Verify verifies a signature in a single-part operation, + * where the signature is an appendix to the data, and plaintext + * cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_Verify) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* signed data */ + CK_ULONG ulDataLen, /* length of signed data */ + CK_BYTE_PTR pSignature, /* signature */ + CK_ULONG ulSignatureLen /* signature length*/ +); +#endif + + +/* C_VerifyUpdate continues a multiple-part verification + * operation, where the signature is an appendix to the data, + * and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* signed data */ + CK_ULONG ulPartLen /* length of signed data */ +); +#endif + + +/* C_VerifyFinal finishes a multiple-part verification + * operation, checking the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* signature to verify */ + CK_ULONG ulSignatureLen /* signature length */ +); +#endif + + +/* C_VerifyRecoverInit initializes a signature verification + * operation, where the data is recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ + CK_OBJECT_HANDLE hKey /* verification key */ +); +#endif + + +/* C_VerifyRecover verifies a signature in a single-part + * operation, where the data is recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyRecover) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* signature to verify */ + CK_ULONG ulSignatureLen, /* signature length */ + CK_BYTE_PTR pData, /* gets signed data */ + CK_ULONG_PTR pulDataLen /* gets signed data len */ +); +#endif + + + +/* Dual-function cryptographic operations */ + +/* C_DigestEncryptUpdate continues a multiple-part digesting + * and encryption operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext length */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +); +#endif + + +/* C_DecryptDigestUpdate continues a multiple-part decryption and + * digesting operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* ciphertext */ + CK_ULONG ulEncryptedPartLen, /* ciphertext length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* gets plaintext len */ +); +#endif + + +/* C_SignEncryptUpdate continues a multiple-part signing and + * encryption operation. + */ +CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext length */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +); +#endif + + +/* C_DecryptVerifyUpdate continues a multiple-part decryption and + * verify operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* ciphertext */ + CK_ULONG ulEncryptedPartLen, /* ciphertext length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* gets p-text length */ +); +#endif + + + +/* Key management */ + +/* C_GenerateKey generates a secret key, creating a new key + * object. + */ +CK_PKCS11_FUNCTION_INFO(C_GenerateKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* key generation mech. */ + CK_ATTRIBUTE_PTR pTemplate, /* template for new key */ + CK_ULONG ulCount, /* # of attrs in template */ + CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */ +); +#endif + + +/* C_GenerateKeyPair generates a public-key/private-key pair, + * creating new key objects. + */ +CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session handle */ + CK_MECHANISM_PTR pMechanism, /* key-gen mech. */ + CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */ + CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */ + CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */ + CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */ + CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */ + CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */ +); +#endif + + +/* C_WrapKey wraps (i.e., encrypts) a key. */ +CK_PKCS11_FUNCTION_INFO(C_WrapKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */ + CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */ + CK_OBJECT_HANDLE hKey, /* key to be wrapped */ + CK_BYTE_PTR pWrappedKey, /* gets wrapped key */ + CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */ +); +#endif + + +/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new + * key object. + */ +CK_PKCS11_FUNCTION_INFO(C_UnwrapKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */ + CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */ + CK_BYTE_PTR pWrappedKey, /* the wrapped key */ + CK_ULONG ulWrappedKeyLen, /* wrapped key len */ + CK_ATTRIBUTE_PTR pTemplate, /* new key template */ + CK_ULONG ulAttributeCount, /* template length */ + CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +); +#endif + + +/* C_DeriveKey derives a key from a base key, creating a new key + * object. + */ +CK_PKCS11_FUNCTION_INFO(C_DeriveKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */ + CK_OBJECT_HANDLE hBaseKey, /* base key */ + CK_ATTRIBUTE_PTR pTemplate, /* new key template */ + CK_ULONG ulAttributeCount, /* template length */ + CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +); +#endif + + + +/* Random number generation */ + +/* C_SeedRandom mixes additional seed material into the token's + * random number generator. + */ +CK_PKCS11_FUNCTION_INFO(C_SeedRandom) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSeed, /* the seed material */ + CK_ULONG ulSeedLen /* length of seed material */ +); +#endif + + +/* C_GenerateRandom generates random data. */ +CK_PKCS11_FUNCTION_INFO(C_GenerateRandom) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR RandomData, /* receives the random data */ + CK_ULONG ulRandomLen /* # of bytes to generate */ +); +#endif + + + +/* Parallel function management */ + +/* C_GetFunctionStatus is a legacy function; it obtains an + * updated status of a function running in parallel with an + * application. + */ +CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_CancelFunction is a legacy function; it cancels a function + * running in parallel. + */ +CK_PKCS11_FUNCTION_INFO(C_CancelFunction) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_WaitForSlotEvent waits for a slot event (token insertion, + * removal, etc.) to occur. + */ +CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent) +#ifdef CK_NEED_ARG_LIST +( + CK_FLAGS flags, /* blocking/nonblocking flag */ + CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */ + CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */ +); +#endif + diff --git a/tools/vendor/github.com/miekg/pkcs11/pkcs11go.h b/tools/vendor/github.com/miekg/pkcs11/pkcs11go.h new file mode 100644 index 000000000..1b98bad21 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/pkcs11go.h @@ -0,0 +1,33 @@ +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// + +#define CK_PTR * +#ifndef NULL_PTR +#define NULL_PTR 0 +#endif +#define CK_DEFINE_FUNCTION(returnType, name) returnType name +#define CK_DECLARE_FUNCTION(returnType, name) returnType name +#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name) +#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name) + +#include +#ifdef PACKED_STRUCTURES +# pragma pack(push, 1) +# include "pkcs11.h" +# pragma pack(pop) +#else +# include "pkcs11.h" +#endif + +// Copy of CK_INFO but with default alignment (not packed). Go hides unaligned +// struct fields so copying to an aligned struct is necessary to read CK_INFO +// from Go on Windows where packing is required. +typedef struct ckInfo { + CK_VERSION cryptokiVersion; + CK_UTF8CHAR manufacturerID[32]; + CK_FLAGS flags; + CK_UTF8CHAR libraryDescription[32]; + CK_VERSION libraryVersion; +} ckInfo, *ckInfoPtr; diff --git a/tools/vendor/github.com/miekg/pkcs11/pkcs11t.h b/tools/vendor/github.com/miekg/pkcs11/pkcs11t.h new file mode 100644 index 000000000..321c3075a --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/pkcs11t.h @@ -0,0 +1,2047 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +/* See top of pkcs11.h for information about the macros that + * must be defined and the structure-packing conventions that + * must be set before including this file. + */ + +#ifndef _PKCS11T_H_ +#define _PKCS11T_H_ 1 + +#define CRYPTOKI_VERSION_MAJOR 2 +#define CRYPTOKI_VERSION_MINOR 40 +#define CRYPTOKI_VERSION_AMENDMENT 0 + +#define CK_TRUE 1 +#define CK_FALSE 0 + +#ifndef CK_DISABLE_TRUE_FALSE +#ifndef FALSE +#define FALSE CK_FALSE +#endif +#ifndef TRUE +#define TRUE CK_TRUE +#endif +#endif + +/* an unsigned 8-bit value */ +typedef unsigned char CK_BYTE; + +/* an unsigned 8-bit character */ +typedef CK_BYTE CK_CHAR; + +/* an 8-bit UTF-8 character */ +typedef CK_BYTE CK_UTF8CHAR; + +/* a BYTE-sized Boolean flag */ +typedef CK_BYTE CK_BBOOL; + +/* an unsigned value, at least 32 bits long */ +typedef unsigned long int CK_ULONG; + +/* a signed value, the same size as a CK_ULONG */ +typedef long int CK_LONG; + +/* at least 32 bits; each bit is a Boolean flag */ +typedef CK_ULONG CK_FLAGS; + + +/* some special values for certain CK_ULONG variables */ +#define CK_UNAVAILABLE_INFORMATION (~0UL) +#define CK_EFFECTIVELY_INFINITE 0UL + + +typedef CK_BYTE CK_PTR CK_BYTE_PTR; +typedef CK_CHAR CK_PTR CK_CHAR_PTR; +typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR; +typedef CK_ULONG CK_PTR CK_ULONG_PTR; +typedef void CK_PTR CK_VOID_PTR; + +/* Pointer to a CK_VOID_PTR-- i.e., pointer to pointer to void */ +typedef CK_VOID_PTR CK_PTR CK_VOID_PTR_PTR; + + +/* The following value is always invalid if used as a session + * handle or object handle + */ +#define CK_INVALID_HANDLE 0UL + + +typedef struct CK_VERSION { + CK_BYTE major; /* integer portion of version number */ + CK_BYTE minor; /* 1/100ths portion of version number */ +} CK_VERSION; + +typedef CK_VERSION CK_PTR CK_VERSION_PTR; + + +typedef struct CK_INFO { + CK_VERSION cryptokiVersion; /* Cryptoki interface ver */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_FLAGS flags; /* must be zero */ + CK_UTF8CHAR libraryDescription[32]; /* blank padded */ + CK_VERSION libraryVersion; /* version of library */ +} CK_INFO; + +typedef CK_INFO CK_PTR CK_INFO_PTR; + + +/* CK_NOTIFICATION enumerates the types of notifications that + * Cryptoki provides to an application + */ +typedef CK_ULONG CK_NOTIFICATION; +#define CKN_SURRENDER 0UL +#define CKN_OTP_CHANGED 1UL + +typedef CK_ULONG CK_SLOT_ID; + +typedef CK_SLOT_ID CK_PTR CK_SLOT_ID_PTR; + + +/* CK_SLOT_INFO provides information about a slot */ +typedef struct CK_SLOT_INFO { + CK_UTF8CHAR slotDescription[64]; /* blank padded */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_FLAGS flags; + + CK_VERSION hardwareVersion; /* version of hardware */ + CK_VERSION firmwareVersion; /* version of firmware */ +} CK_SLOT_INFO; + +/* flags: bit flags that provide capabilities of the slot + * Bit Flag Mask Meaning + */ +#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */ +#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/ +#define CKF_HW_SLOT 0x00000004UL /* hardware slot */ + +typedef CK_SLOT_INFO CK_PTR CK_SLOT_INFO_PTR; + + +/* CK_TOKEN_INFO provides information about a token */ +typedef struct CK_TOKEN_INFO { + CK_UTF8CHAR label[32]; /* blank padded */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_UTF8CHAR model[16]; /* blank padded */ + CK_CHAR serialNumber[16]; /* blank padded */ + CK_FLAGS flags; /* see below */ + + CK_ULONG ulMaxSessionCount; /* max open sessions */ + CK_ULONG ulSessionCount; /* sess. now open */ + CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */ + CK_ULONG ulRwSessionCount; /* R/W sess. now open */ + CK_ULONG ulMaxPinLen; /* in bytes */ + CK_ULONG ulMinPinLen; /* in bytes */ + CK_ULONG ulTotalPublicMemory; /* in bytes */ + CK_ULONG ulFreePublicMemory; /* in bytes */ + CK_ULONG ulTotalPrivateMemory; /* in bytes */ + CK_ULONG ulFreePrivateMemory; /* in bytes */ + CK_VERSION hardwareVersion; /* version of hardware */ + CK_VERSION firmwareVersion; /* version of firmware */ + CK_CHAR utcTime[16]; /* time */ +} CK_TOKEN_INFO; + +/* The flags parameter is defined as follows: + * Bit Flag Mask Meaning + */ +#define CKF_RNG 0x00000001UL /* has random # generator */ +#define CKF_WRITE_PROTECTED 0x00000002UL /* token is write-protected */ +#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must login */ +#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's PIN is set */ + +/* CKF_RESTORE_KEY_NOT_NEEDED. If it is set, + * that means that *every* time the state of cryptographic + * operations of a session is successfully saved, all keys + * needed to continue those operations are stored in the state + */ +#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL + +/* CKF_CLOCK_ON_TOKEN. If it is set, that means + * that the token has some sort of clock. The time on that + * clock is returned in the token info structure + */ +#define CKF_CLOCK_ON_TOKEN 0x00000040UL + +/* CKF_PROTECTED_AUTHENTICATION_PATH. If it is + * set, that means that there is some way for the user to login + * without sending a PIN through the Cryptoki library itself + */ +#define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL + +/* CKF_DUAL_CRYPTO_OPERATIONS. If it is true, + * that means that a single session with the token can perform + * dual simultaneous cryptographic operations (digest and + * encrypt; decrypt and digest; sign and encrypt; and decrypt + * and sign) + */ +#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL + +/* CKF_TOKEN_INITIALIZED. If it is true, the + * token has been initialized using C_InitializeToken or an + * equivalent mechanism outside the scope of PKCS #11. + * Calling C_InitializeToken when this flag is set will cause + * the token to be reinitialized. + */ +#define CKF_TOKEN_INITIALIZED 0x00000400UL + +/* CKF_SECONDARY_AUTHENTICATION. If it is + * true, the token supports secondary authentication for + * private key objects. + */ +#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL + +/* CKF_USER_PIN_COUNT_LOW. If it is true, an + * incorrect user login PIN has been entered at least once + * since the last successful authentication. + */ +#define CKF_USER_PIN_COUNT_LOW 0x00010000UL + +/* CKF_USER_PIN_FINAL_TRY. If it is true, + * supplying an incorrect user PIN will it to become locked. + */ +#define CKF_USER_PIN_FINAL_TRY 0x00020000UL + +/* CKF_USER_PIN_LOCKED. If it is true, the + * user PIN has been locked. User login to the token is not + * possible. + */ +#define CKF_USER_PIN_LOCKED 0x00040000UL + +/* CKF_USER_PIN_TO_BE_CHANGED. If it is true, + * the user PIN value is the default value set by token + * initialization or manufacturing, or the PIN has been + * expired by the card. + */ +#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL + +/* CKF_SO_PIN_COUNT_LOW. If it is true, an + * incorrect SO login PIN has been entered at least once since + * the last successful authentication. + */ +#define CKF_SO_PIN_COUNT_LOW 0x00100000UL + +/* CKF_SO_PIN_FINAL_TRY. If it is true, + * supplying an incorrect SO PIN will it to become locked. + */ +#define CKF_SO_PIN_FINAL_TRY 0x00200000UL + +/* CKF_SO_PIN_LOCKED. If it is true, the SO + * PIN has been locked. SO login to the token is not possible. + */ +#define CKF_SO_PIN_LOCKED 0x00400000UL + +/* CKF_SO_PIN_TO_BE_CHANGED. If it is true, + * the SO PIN value is the default value set by token + * initialization or manufacturing, or the PIN has been + * expired by the card. + */ +#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL + +#define CKF_ERROR_STATE 0x01000000UL + +typedef CK_TOKEN_INFO CK_PTR CK_TOKEN_INFO_PTR; + + +/* CK_SESSION_HANDLE is a Cryptoki-assigned value that + * identifies a session + */ +typedef CK_ULONG CK_SESSION_HANDLE; + +typedef CK_SESSION_HANDLE CK_PTR CK_SESSION_HANDLE_PTR; + + +/* CK_USER_TYPE enumerates the types of Cryptoki users */ +typedef CK_ULONG CK_USER_TYPE; +/* Security Officer */ +#define CKU_SO 0UL +/* Normal user */ +#define CKU_USER 1UL +/* Context specific */ +#define CKU_CONTEXT_SPECIFIC 2UL + +/* CK_STATE enumerates the session states */ +typedef CK_ULONG CK_STATE; +#define CKS_RO_PUBLIC_SESSION 0UL +#define CKS_RO_USER_FUNCTIONS 1UL +#define CKS_RW_PUBLIC_SESSION 2UL +#define CKS_RW_USER_FUNCTIONS 3UL +#define CKS_RW_SO_FUNCTIONS 4UL + +/* CK_SESSION_INFO provides information about a session */ +typedef struct CK_SESSION_INFO { + CK_SLOT_ID slotID; + CK_STATE state; + CK_FLAGS flags; /* see below */ + CK_ULONG ulDeviceError; /* device-dependent error code */ +} CK_SESSION_INFO; + +/* The flags are defined in the following table: + * Bit Flag Mask Meaning + */ +#define CKF_RW_SESSION 0x00000002UL /* session is r/w */ +#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */ + +typedef CK_SESSION_INFO CK_PTR CK_SESSION_INFO_PTR; + + +/* CK_OBJECT_HANDLE is a token-specific identifier for an + * object + */ +typedef CK_ULONG CK_OBJECT_HANDLE; + +typedef CK_OBJECT_HANDLE CK_PTR CK_OBJECT_HANDLE_PTR; + + +/* CK_OBJECT_CLASS is a value that identifies the classes (or + * types) of objects that Cryptoki recognizes. It is defined + * as follows: + */ +typedef CK_ULONG CK_OBJECT_CLASS; + +/* The following classes of objects are defined: */ +#define CKO_DATA 0x00000000UL +#define CKO_CERTIFICATE 0x00000001UL +#define CKO_PUBLIC_KEY 0x00000002UL +#define CKO_PRIVATE_KEY 0x00000003UL +#define CKO_SECRET_KEY 0x00000004UL +#define CKO_HW_FEATURE 0x00000005UL +#define CKO_DOMAIN_PARAMETERS 0x00000006UL +#define CKO_MECHANISM 0x00000007UL +#define CKO_OTP_KEY 0x00000008UL + +#define CKO_VENDOR_DEFINED 0x80000000UL + +typedef CK_OBJECT_CLASS CK_PTR CK_OBJECT_CLASS_PTR; + +/* CK_HW_FEATURE_TYPE is a value that identifies the hardware feature type + * of an object with CK_OBJECT_CLASS equal to CKO_HW_FEATURE. + */ +typedef CK_ULONG CK_HW_FEATURE_TYPE; + +/* The following hardware feature types are defined */ +#define CKH_MONOTONIC_COUNTER 0x00000001UL +#define CKH_CLOCK 0x00000002UL +#define CKH_USER_INTERFACE 0x00000003UL +#define CKH_VENDOR_DEFINED 0x80000000UL + +/* CK_KEY_TYPE is a value that identifies a key type */ +typedef CK_ULONG CK_KEY_TYPE; + +/* the following key types are defined: */ +#define CKK_RSA 0x00000000UL +#define CKK_DSA 0x00000001UL +#define CKK_DH 0x00000002UL +#define CKK_ECDSA 0x00000003UL /* Deprecated */ +#define CKK_EC 0x00000003UL +#define CKK_X9_42_DH 0x00000004UL +#define CKK_KEA 0x00000005UL +#define CKK_GENERIC_SECRET 0x00000010UL +#define CKK_RC2 0x00000011UL +#define CKK_RC4 0x00000012UL +#define CKK_DES 0x00000013UL +#define CKK_DES2 0x00000014UL +#define CKK_DES3 0x00000015UL +#define CKK_CAST 0x00000016UL +#define CKK_CAST3 0x00000017UL +#define CKK_CAST5 0x00000018UL /* Deprecated */ +#define CKK_CAST128 0x00000018UL +#define CKK_RC5 0x00000019UL +#define CKK_IDEA 0x0000001AUL +#define CKK_SKIPJACK 0x0000001BUL +#define CKK_BATON 0x0000001CUL +#define CKK_JUNIPER 0x0000001DUL +#define CKK_CDMF 0x0000001EUL +#define CKK_AES 0x0000001FUL +#define CKK_BLOWFISH 0x00000020UL +#define CKK_TWOFISH 0x00000021UL +#define CKK_SECURID 0x00000022UL +#define CKK_HOTP 0x00000023UL +#define CKK_ACTI 0x00000024UL +#define CKK_CAMELLIA 0x00000025UL +#define CKK_ARIA 0x00000026UL + +#define CKK_MD5_HMAC 0x00000027UL +#define CKK_SHA_1_HMAC 0x00000028UL +#define CKK_RIPEMD128_HMAC 0x00000029UL +#define CKK_RIPEMD160_HMAC 0x0000002AUL +#define CKK_SHA256_HMAC 0x0000002BUL +#define CKK_SHA384_HMAC 0x0000002CUL +#define CKK_SHA512_HMAC 0x0000002DUL +#define CKK_SHA224_HMAC 0x0000002EUL + +#define CKK_SEED 0x0000002FUL +#define CKK_GOSTR3410 0x00000030UL +#define CKK_GOSTR3411 0x00000031UL +#define CKK_GOST28147 0x00000032UL + +#define CKK_SHA3_224_HMAC 0x00000033UL +#define CKK_SHA3_256_HMAC 0x00000034UL +#define CKK_SHA3_384_HMAC 0x00000035UL +#define CKK_SHA3_512_HMAC 0x00000036UL + + + +#define CKK_VENDOR_DEFINED 0x80000000UL + + +/* CK_CERTIFICATE_TYPE is a value that identifies a certificate + * type + */ +typedef CK_ULONG CK_CERTIFICATE_TYPE; + +#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL +#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL +#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL +#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL + +#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL +#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL +#define CK_SECURITY_DOMAIN_OPERATOR 2UL +#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL + + +/* The following certificate types are defined: */ +#define CKC_X_509 0x00000000UL +#define CKC_X_509_ATTR_CERT 0x00000001UL +#define CKC_WTLS 0x00000002UL +#define CKC_VENDOR_DEFINED 0x80000000UL + + +/* CK_ATTRIBUTE_TYPE is a value that identifies an attribute + * type + */ +typedef CK_ULONG CK_ATTRIBUTE_TYPE; + +/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which + * consists of an array of values. + */ +#define CKF_ARRAY_ATTRIBUTE 0x40000000UL + +/* The following OTP-related defines relate to the CKA_OTP_FORMAT attribute */ +#define CK_OTP_FORMAT_DECIMAL 0UL +#define CK_OTP_FORMAT_HEXADECIMAL 1UL +#define CK_OTP_FORMAT_ALPHANUMERIC 2UL +#define CK_OTP_FORMAT_BINARY 3UL + +/* The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT + * attributes + */ +#define CK_OTP_PARAM_IGNORED 0UL +#define CK_OTP_PARAM_OPTIONAL 1UL +#define CK_OTP_PARAM_MANDATORY 2UL + +/* The following attribute types are defined: */ +#define CKA_CLASS 0x00000000UL +#define CKA_TOKEN 0x00000001UL +#define CKA_PRIVATE 0x00000002UL +#define CKA_LABEL 0x00000003UL +#define CKA_APPLICATION 0x00000010UL +#define CKA_VALUE 0x00000011UL +#define CKA_OBJECT_ID 0x00000012UL +#define CKA_CERTIFICATE_TYPE 0x00000080UL +#define CKA_ISSUER 0x00000081UL +#define CKA_SERIAL_NUMBER 0x00000082UL +#define CKA_AC_ISSUER 0x00000083UL +#define CKA_OWNER 0x00000084UL +#define CKA_ATTR_TYPES 0x00000085UL +#define CKA_TRUSTED 0x00000086UL +#define CKA_CERTIFICATE_CATEGORY 0x00000087UL +#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL +#define CKA_URL 0x00000089UL +#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL +#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL +#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL +#define CKA_CHECK_VALUE 0x00000090UL + +#define CKA_KEY_TYPE 0x00000100UL +#define CKA_SUBJECT 0x00000101UL +#define CKA_ID 0x00000102UL +#define CKA_SENSITIVE 0x00000103UL +#define CKA_ENCRYPT 0x00000104UL +#define CKA_DECRYPT 0x00000105UL +#define CKA_WRAP 0x00000106UL +#define CKA_UNWRAP 0x00000107UL +#define CKA_SIGN 0x00000108UL +#define CKA_SIGN_RECOVER 0x00000109UL +#define CKA_VERIFY 0x0000010AUL +#define CKA_VERIFY_RECOVER 0x0000010BUL +#define CKA_DERIVE 0x0000010CUL +#define CKA_START_DATE 0x00000110UL +#define CKA_END_DATE 0x00000111UL +#define CKA_MODULUS 0x00000120UL +#define CKA_MODULUS_BITS 0x00000121UL +#define CKA_PUBLIC_EXPONENT 0x00000122UL +#define CKA_PRIVATE_EXPONENT 0x00000123UL +#define CKA_PRIME_1 0x00000124UL +#define CKA_PRIME_2 0x00000125UL +#define CKA_EXPONENT_1 0x00000126UL +#define CKA_EXPONENT_2 0x00000127UL +#define CKA_COEFFICIENT 0x00000128UL +#define CKA_PUBLIC_KEY_INFO 0x00000129UL +#define CKA_PRIME 0x00000130UL +#define CKA_SUBPRIME 0x00000131UL +#define CKA_BASE 0x00000132UL + +#define CKA_PRIME_BITS 0x00000133UL +#define CKA_SUBPRIME_BITS 0x00000134UL +#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS + +#define CKA_VALUE_BITS 0x00000160UL +#define CKA_VALUE_LEN 0x00000161UL +#define CKA_EXTRACTABLE 0x00000162UL +#define CKA_LOCAL 0x00000163UL +#define CKA_NEVER_EXTRACTABLE 0x00000164UL +#define CKA_ALWAYS_SENSITIVE 0x00000165UL +#define CKA_KEY_GEN_MECHANISM 0x00000166UL + +#define CKA_MODIFIABLE 0x00000170UL +#define CKA_COPYABLE 0x00000171UL + +#define CKA_DESTROYABLE 0x00000172UL + +#define CKA_ECDSA_PARAMS 0x00000180UL /* Deprecated */ +#define CKA_EC_PARAMS 0x00000180UL + +#define CKA_EC_POINT 0x00000181UL + +#define CKA_SECONDARY_AUTH 0x00000200UL /* Deprecated */ +#define CKA_AUTH_PIN_FLAGS 0x00000201UL /* Deprecated */ + +#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL + +#define CKA_WRAP_WITH_TRUSTED 0x00000210UL +#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000211UL) +#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000212UL) +#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000213UL) + +#define CKA_OTP_FORMAT 0x00000220UL +#define CKA_OTP_LENGTH 0x00000221UL +#define CKA_OTP_TIME_INTERVAL 0x00000222UL +#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL +#define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL +#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL +#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL +#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL +#define CKA_OTP_COUNTER 0x0000022EUL +#define CKA_OTP_TIME 0x0000022FUL +#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL +#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL +#define CKA_OTP_SERVICE_LOGO 0x0000022CUL +#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL + +#define CKA_GOSTR3410_PARAMS 0x00000250UL +#define CKA_GOSTR3411_PARAMS 0x00000251UL +#define CKA_GOST28147_PARAMS 0x00000252UL + +#define CKA_HW_FEATURE_TYPE 0x00000300UL +#define CKA_RESET_ON_INIT 0x00000301UL +#define CKA_HAS_RESET 0x00000302UL + +#define CKA_PIXEL_X 0x00000400UL +#define CKA_PIXEL_Y 0x00000401UL +#define CKA_RESOLUTION 0x00000402UL +#define CKA_CHAR_ROWS 0x00000403UL +#define CKA_CHAR_COLUMNS 0x00000404UL +#define CKA_COLOR 0x00000405UL +#define CKA_BITS_PER_PIXEL 0x00000406UL +#define CKA_CHAR_SETS 0x00000480UL +#define CKA_ENCODING_METHODS 0x00000481UL +#define CKA_MIME_TYPES 0x00000482UL +#define CKA_MECHANISM_TYPE 0x00000500UL +#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL +#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL +#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL +#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE|0x00000600UL) + +#define CKA_VENDOR_DEFINED 0x80000000UL + +/* CK_ATTRIBUTE is a structure that includes the type, length + * and value of an attribute + */ +typedef struct CK_ATTRIBUTE { + CK_ATTRIBUTE_TYPE type; + CK_VOID_PTR pValue; + CK_ULONG ulValueLen; /* in bytes */ +} CK_ATTRIBUTE; + +typedef CK_ATTRIBUTE CK_PTR CK_ATTRIBUTE_PTR; + +/* CK_DATE is a structure that defines a date */ +typedef struct CK_DATE{ + CK_CHAR year[4]; /* the year ("1900" - "9999") */ + CK_CHAR month[2]; /* the month ("01" - "12") */ + CK_CHAR day[2]; /* the day ("01" - "31") */ +} CK_DATE; + + +/* CK_MECHANISM_TYPE is a value that identifies a mechanism + * type + */ +typedef CK_ULONG CK_MECHANISM_TYPE; + +/* the following mechanism types are defined: */ +#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL +#define CKM_RSA_PKCS 0x00000001UL +#define CKM_RSA_9796 0x00000002UL +#define CKM_RSA_X_509 0x00000003UL + +#define CKM_MD2_RSA_PKCS 0x00000004UL +#define CKM_MD5_RSA_PKCS 0x00000005UL +#define CKM_SHA1_RSA_PKCS 0x00000006UL + +#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL +#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL +#define CKM_RSA_PKCS_OAEP 0x00000009UL + +#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL +#define CKM_RSA_X9_31 0x0000000BUL +#define CKM_SHA1_RSA_X9_31 0x0000000CUL +#define CKM_RSA_PKCS_PSS 0x0000000DUL +#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL + +#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL +#define CKM_DSA 0x00000011UL +#define CKM_DSA_SHA1 0x00000012UL +#define CKM_DSA_SHA224 0x00000013UL +#define CKM_DSA_SHA256 0x00000014UL +#define CKM_DSA_SHA384 0x00000015UL +#define CKM_DSA_SHA512 0x00000016UL +#define CKM_DSA_SHA3_224 0x00000018UL +#define CKM_DSA_SHA3_256 0x00000019UL +#define CKM_DSA_SHA3_384 0x0000001AUL +#define CKM_DSA_SHA3_512 0x0000001BUL + +#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL +#define CKM_DH_PKCS_DERIVE 0x00000021UL + +#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL +#define CKM_X9_42_DH_DERIVE 0x00000031UL +#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL +#define CKM_X9_42_MQV_DERIVE 0x00000033UL + +#define CKM_SHA256_RSA_PKCS 0x00000040UL +#define CKM_SHA384_RSA_PKCS 0x00000041UL +#define CKM_SHA512_RSA_PKCS 0x00000042UL +#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL +#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL +#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL + +#define CKM_SHA224_RSA_PKCS 0x00000046UL +#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL + +#define CKM_SHA512_224 0x00000048UL +#define CKM_SHA512_224_HMAC 0x00000049UL +#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL +#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL +#define CKM_SHA512_256 0x0000004CUL +#define CKM_SHA512_256_HMAC 0x0000004DUL +#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL +#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL + +#define CKM_SHA512_T 0x00000050UL +#define CKM_SHA512_T_HMAC 0x00000051UL +#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL +#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL + +#define CKM_SHA3_256_RSA_PKCS 0x00000060UL +#define CKM_SHA3_384_RSA_PKCS 0x00000061UL +#define CKM_SHA3_512_RSA_PKCS 0x00000062UL +#define CKM_SHA3_256_RSA_PKCS_PSS 0x00000063UL +#define CKM_SHA3_384_RSA_PKCS_PSS 0x00000064UL +#define CKM_SHA3_512_RSA_PKCS_PSS 0x00000065UL +#define CKM_SHA3_224_RSA_PKCS 0x00000066UL +#define CKM_SHA3_224_RSA_PKCS_PSS 0x00000067UL + +#define CKM_RC2_KEY_GEN 0x00000100UL +#define CKM_RC2_ECB 0x00000101UL +#define CKM_RC2_CBC 0x00000102UL +#define CKM_RC2_MAC 0x00000103UL + +#define CKM_RC2_MAC_GENERAL 0x00000104UL +#define CKM_RC2_CBC_PAD 0x00000105UL + +#define CKM_RC4_KEY_GEN 0x00000110UL +#define CKM_RC4 0x00000111UL +#define CKM_DES_KEY_GEN 0x00000120UL +#define CKM_DES_ECB 0x00000121UL +#define CKM_DES_CBC 0x00000122UL +#define CKM_DES_MAC 0x00000123UL + +#define CKM_DES_MAC_GENERAL 0x00000124UL +#define CKM_DES_CBC_PAD 0x00000125UL + +#define CKM_DES2_KEY_GEN 0x00000130UL +#define CKM_DES3_KEY_GEN 0x00000131UL +#define CKM_DES3_ECB 0x00000132UL +#define CKM_DES3_CBC 0x00000133UL +#define CKM_DES3_MAC 0x00000134UL + +#define CKM_DES3_MAC_GENERAL 0x00000135UL +#define CKM_DES3_CBC_PAD 0x00000136UL +#define CKM_DES3_CMAC_GENERAL 0x00000137UL +#define CKM_DES3_CMAC 0x00000138UL +#define CKM_CDMF_KEY_GEN 0x00000140UL +#define CKM_CDMF_ECB 0x00000141UL +#define CKM_CDMF_CBC 0x00000142UL +#define CKM_CDMF_MAC 0x00000143UL +#define CKM_CDMF_MAC_GENERAL 0x00000144UL +#define CKM_CDMF_CBC_PAD 0x00000145UL + +#define CKM_DES_OFB64 0x00000150UL +#define CKM_DES_OFB8 0x00000151UL +#define CKM_DES_CFB64 0x00000152UL +#define CKM_DES_CFB8 0x00000153UL + +#define CKM_MD2 0x00000200UL + +#define CKM_MD2_HMAC 0x00000201UL +#define CKM_MD2_HMAC_GENERAL 0x00000202UL + +#define CKM_MD5 0x00000210UL + +#define CKM_MD5_HMAC 0x00000211UL +#define CKM_MD5_HMAC_GENERAL 0x00000212UL + +#define CKM_SHA_1 0x00000220UL + +#define CKM_SHA_1_HMAC 0x00000221UL +#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL + +#define CKM_RIPEMD128 0x00000230UL +#define CKM_RIPEMD128_HMAC 0x00000231UL +#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL +#define CKM_RIPEMD160 0x00000240UL +#define CKM_RIPEMD160_HMAC 0x00000241UL +#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL + +#define CKM_SHA256 0x00000250UL +#define CKM_SHA256_HMAC 0x00000251UL +#define CKM_SHA256_HMAC_GENERAL 0x00000252UL +#define CKM_SHA224 0x00000255UL +#define CKM_SHA224_HMAC 0x00000256UL +#define CKM_SHA224_HMAC_GENERAL 0x00000257UL +#define CKM_SHA384 0x00000260UL +#define CKM_SHA384_HMAC 0x00000261UL +#define CKM_SHA384_HMAC_GENERAL 0x00000262UL +#define CKM_SHA512 0x00000270UL +#define CKM_SHA512_HMAC 0x00000271UL +#define CKM_SHA512_HMAC_GENERAL 0x00000272UL +#define CKM_SECURID_KEY_GEN 0x00000280UL +#define CKM_SECURID 0x00000282UL +#define CKM_HOTP_KEY_GEN 0x00000290UL +#define CKM_HOTP 0x00000291UL +#define CKM_ACTI 0x000002A0UL +#define CKM_ACTI_KEY_GEN 0x000002A1UL + +#define CKM_SHA3_256 0x000002B0UL +#define CKM_SHA3_256_HMAC 0x000002B1UL +#define CKM_SHA3_256_HMAC_GENERAL 0x000002B2UL +#define CKM_SHA3_256_KEY_GEN 0x000002B3UL +#define CKM_SHA3_224 0x000002B5UL +#define CKM_SHA3_224_HMAC 0x000002B6UL +#define CKM_SHA3_224_HMAC_GENERAL 0x000002B7UL +#define CKM_SHA3_224_KEY_GEN 0x000002B8UL +#define CKM_SHA3_384 0x000002C0UL +#define CKM_SHA3_384_HMAC 0x000002C1UL +#define CKM_SHA3_384_HMAC_GENERAL 0x000002C2UL +#define CKM_SHA3_384_KEY_GEN 0x000002C3UL +#define CKM_SHA3_512 0x000002D0UL +#define CKM_SHA3_512_HMAC 0x000002D1UL +#define CKM_SHA3_512_HMAC_GENERAL 0x000002D2UL +#define CKM_SHA3_512_KEY_GEN 0x000002D3UL + +#define CKM_CAST_KEY_GEN 0x00000300UL +#define CKM_CAST_ECB 0x00000301UL +#define CKM_CAST_CBC 0x00000302UL +#define CKM_CAST_MAC 0x00000303UL +#define CKM_CAST_MAC_GENERAL 0x00000304UL +#define CKM_CAST_CBC_PAD 0x00000305UL +#define CKM_CAST3_KEY_GEN 0x00000310UL +#define CKM_CAST3_ECB 0x00000311UL +#define CKM_CAST3_CBC 0x00000312UL +#define CKM_CAST3_MAC 0x00000313UL +#define CKM_CAST3_MAC_GENERAL 0x00000314UL +#define CKM_CAST3_CBC_PAD 0x00000315UL +/* Note that CAST128 and CAST5 are the same algorithm */ +#define CKM_CAST5_KEY_GEN 0x00000320UL +#define CKM_CAST128_KEY_GEN 0x00000320UL +#define CKM_CAST5_ECB 0x00000321UL +#define CKM_CAST128_ECB 0x00000321UL +#define CKM_CAST5_CBC 0x00000322UL /* Deprecated */ +#define CKM_CAST128_CBC 0x00000322UL +#define CKM_CAST5_MAC 0x00000323UL /* Deprecated */ +#define CKM_CAST128_MAC 0x00000323UL +#define CKM_CAST5_MAC_GENERAL 0x00000324UL /* Deprecated */ +#define CKM_CAST128_MAC_GENERAL 0x00000324UL +#define CKM_CAST5_CBC_PAD 0x00000325UL /* Deprecated */ +#define CKM_CAST128_CBC_PAD 0x00000325UL +#define CKM_RC5_KEY_GEN 0x00000330UL +#define CKM_RC5_ECB 0x00000331UL +#define CKM_RC5_CBC 0x00000332UL +#define CKM_RC5_MAC 0x00000333UL +#define CKM_RC5_MAC_GENERAL 0x00000334UL +#define CKM_RC5_CBC_PAD 0x00000335UL +#define CKM_IDEA_KEY_GEN 0x00000340UL +#define CKM_IDEA_ECB 0x00000341UL +#define CKM_IDEA_CBC 0x00000342UL +#define CKM_IDEA_MAC 0x00000343UL +#define CKM_IDEA_MAC_GENERAL 0x00000344UL +#define CKM_IDEA_CBC_PAD 0x00000345UL +#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL +#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL +#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL +#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL +#define CKM_XOR_BASE_AND_DATA 0x00000364UL +#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL +#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL +#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL +#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL + +#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL +#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL +#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL +#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL +#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL + +#define CKM_TLS_PRF 0x00000378UL + +#define CKM_SSL3_MD5_MAC 0x00000380UL +#define CKM_SSL3_SHA1_MAC 0x00000381UL +#define CKM_MD5_KEY_DERIVATION 0x00000390UL +#define CKM_MD2_KEY_DERIVATION 0x00000391UL +#define CKM_SHA1_KEY_DERIVATION 0x00000392UL + +#define CKM_SHA256_KEY_DERIVATION 0x00000393UL +#define CKM_SHA384_KEY_DERIVATION 0x00000394UL +#define CKM_SHA512_KEY_DERIVATION 0x00000395UL +#define CKM_SHA224_KEY_DERIVATION 0x00000396UL +#define CKM_SHA3_256_KEY_DERIVE 0x00000397UL +#define CKM_SHA3_224_KEY_DERIVE 0x00000398UL +#define CKM_SHA3_384_KEY_DERIVE 0x00000399UL +#define CKM_SHA3_512_KEY_DERIVE 0x0000039AUL +#define CKM_SHAKE_128_KEY_DERIVE 0x0000039BUL +#define CKM_SHAKE_256_KEY_DERIVE 0x0000039CUL + +#define CKM_PBE_MD2_DES_CBC 0x000003A0UL +#define CKM_PBE_MD5_DES_CBC 0x000003A1UL +#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL +#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL +#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL /* Deprecated */ +#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL +#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL /* Deprecated */ +#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL +#define CKM_PBE_SHA1_RC4_128 0x000003A6UL +#define CKM_PBE_SHA1_RC4_40 0x000003A7UL +#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL +#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL +#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL +#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL + +#define CKM_PKCS5_PBKD2 0x000003B0UL + +#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL + +#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL +#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL +#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL +#define CKM_WTLS_PRF 0x000003D3UL +#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL +#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL + +#define CKM_TLS10_MAC_SERVER 0x000003D6UL +#define CKM_TLS10_MAC_CLIENT 0x000003D7UL +#define CKM_TLS12_MAC 0x000003D8UL +#define CKM_TLS12_KDF 0x000003D9UL +#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL +#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL +#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL +#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL +#define CKM_TLS_MAC 0x000003E4UL +#define CKM_TLS_KDF 0x000003E5UL + +#define CKM_KEY_WRAP_LYNKS 0x00000400UL +#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL + +#define CKM_CMS_SIG 0x00000500UL +#define CKM_KIP_DERIVE 0x00000510UL +#define CKM_KIP_WRAP 0x00000511UL +#define CKM_KIP_MAC 0x00000512UL + +#define CKM_CAMELLIA_KEY_GEN 0x00000550UL +#define CKM_CAMELLIA_ECB 0x00000551UL +#define CKM_CAMELLIA_CBC 0x00000552UL +#define CKM_CAMELLIA_MAC 0x00000553UL +#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL +#define CKM_CAMELLIA_CBC_PAD 0x00000555UL +#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL +#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL +#define CKM_CAMELLIA_CTR 0x00000558UL + +#define CKM_ARIA_KEY_GEN 0x00000560UL +#define CKM_ARIA_ECB 0x00000561UL +#define CKM_ARIA_CBC 0x00000562UL +#define CKM_ARIA_MAC 0x00000563UL +#define CKM_ARIA_MAC_GENERAL 0x00000564UL +#define CKM_ARIA_CBC_PAD 0x00000565UL +#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL +#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL + +#define CKM_SEED_KEY_GEN 0x00000650UL +#define CKM_SEED_ECB 0x00000651UL +#define CKM_SEED_CBC 0x00000652UL +#define CKM_SEED_MAC 0x00000653UL +#define CKM_SEED_MAC_GENERAL 0x00000654UL +#define CKM_SEED_CBC_PAD 0x00000655UL +#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL +#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL + +#define CKM_SKIPJACK_KEY_GEN 0x00001000UL +#define CKM_SKIPJACK_ECB64 0x00001001UL +#define CKM_SKIPJACK_CBC64 0x00001002UL +#define CKM_SKIPJACK_OFB64 0x00001003UL +#define CKM_SKIPJACK_CFB64 0x00001004UL +#define CKM_SKIPJACK_CFB32 0x00001005UL +#define CKM_SKIPJACK_CFB16 0x00001006UL +#define CKM_SKIPJACK_CFB8 0x00001007UL +#define CKM_SKIPJACK_WRAP 0x00001008UL +#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL +#define CKM_SKIPJACK_RELAYX 0x0000100aUL +#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL +#define CKM_KEA_KEY_DERIVE 0x00001011UL +#define CKM_KEA_DERIVE 0x00001012UL +#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL +#define CKM_BATON_KEY_GEN 0x00001030UL +#define CKM_BATON_ECB128 0x00001031UL +#define CKM_BATON_ECB96 0x00001032UL +#define CKM_BATON_CBC128 0x00001033UL +#define CKM_BATON_COUNTER 0x00001034UL +#define CKM_BATON_SHUFFLE 0x00001035UL +#define CKM_BATON_WRAP 0x00001036UL + +#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL /* Deprecated */ +#define CKM_EC_KEY_PAIR_GEN 0x00001040UL + +#define CKM_ECDSA 0x00001041UL +#define CKM_ECDSA_SHA1 0x00001042UL +#define CKM_ECDSA_SHA224 0x00001043UL +#define CKM_ECDSA_SHA256 0x00001044UL +#define CKM_ECDSA_SHA384 0x00001045UL +#define CKM_ECDSA_SHA512 0x00001046UL + +#define CKM_ECDH1_DERIVE 0x00001050UL +#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL +#define CKM_ECMQV_DERIVE 0x00001052UL + +#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL +#define CKM_RSA_AES_KEY_WRAP 0x00001054UL + +#define CKM_JUNIPER_KEY_GEN 0x00001060UL +#define CKM_JUNIPER_ECB128 0x00001061UL +#define CKM_JUNIPER_CBC128 0x00001062UL +#define CKM_JUNIPER_COUNTER 0x00001063UL +#define CKM_JUNIPER_SHUFFLE 0x00001064UL +#define CKM_JUNIPER_WRAP 0x00001065UL +#define CKM_FASTHASH 0x00001070UL + +#define CKM_AES_KEY_GEN 0x00001080UL +#define CKM_AES_ECB 0x00001081UL +#define CKM_AES_CBC 0x00001082UL +#define CKM_AES_MAC 0x00001083UL +#define CKM_AES_MAC_GENERAL 0x00001084UL +#define CKM_AES_CBC_PAD 0x00001085UL +#define CKM_AES_CTR 0x00001086UL +#define CKM_AES_GCM 0x00001087UL +#define CKM_AES_CCM 0x00001088UL +#define CKM_AES_CTS 0x00001089UL +#define CKM_AES_CMAC 0x0000108AUL +#define CKM_AES_CMAC_GENERAL 0x0000108BUL + +#define CKM_AES_XCBC_MAC 0x0000108CUL +#define CKM_AES_XCBC_MAC_96 0x0000108DUL +#define CKM_AES_GMAC 0x0000108EUL + +#define CKM_BLOWFISH_KEY_GEN 0x00001090UL +#define CKM_BLOWFISH_CBC 0x00001091UL +#define CKM_TWOFISH_KEY_GEN 0x00001092UL +#define CKM_TWOFISH_CBC 0x00001093UL +#define CKM_BLOWFISH_CBC_PAD 0x00001094UL +#define CKM_TWOFISH_CBC_PAD 0x00001095UL + +#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL +#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL +#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL +#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL +#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL +#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL + +#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL +#define CKM_GOSTR3410 0x00001201UL +#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL +#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL +#define CKM_GOSTR3410_DERIVE 0x00001204UL +#define CKM_GOSTR3411 0x00001210UL +#define CKM_GOSTR3411_HMAC 0x00001211UL +#define CKM_GOST28147_KEY_GEN 0x00001220UL +#define CKM_GOST28147_ECB 0x00001221UL +#define CKM_GOST28147 0x00001222UL +#define CKM_GOST28147_MAC 0x00001223UL +#define CKM_GOST28147_KEY_WRAP 0x00001224UL + +#define CKM_DSA_PARAMETER_GEN 0x00002000UL +#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL +#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL +#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL +#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL + +#define CKM_AES_OFB 0x00002104UL +#define CKM_AES_CFB64 0x00002105UL +#define CKM_AES_CFB8 0x00002106UL +#define CKM_AES_CFB128 0x00002107UL + +#define CKM_AES_CFB1 0x00002108UL +#define CKM_AES_KEY_WRAP 0x00002109UL /* WAS: 0x00001090 */ +#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL /* WAS: 0x00001091 */ + +#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL +#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL + +#define CKM_VENDOR_DEFINED 0x80000000UL + +typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR; + + +/* CK_MECHANISM is a structure that specifies a particular + * mechanism + */ +typedef struct CK_MECHANISM { + CK_MECHANISM_TYPE mechanism; + CK_VOID_PTR pParameter; + CK_ULONG ulParameterLen; /* in bytes */ +} CK_MECHANISM; + +typedef CK_MECHANISM CK_PTR CK_MECHANISM_PTR; + + +/* CK_MECHANISM_INFO provides information about a particular + * mechanism + */ +typedef struct CK_MECHANISM_INFO { + CK_ULONG ulMinKeySize; + CK_ULONG ulMaxKeySize; + CK_FLAGS flags; +} CK_MECHANISM_INFO; + +/* The flags are defined as follows: + * Bit Flag Mask Meaning */ +#define CKF_HW 0x00000001UL /* performed by HW */ + +/* Specify whether or not a mechanism can be used for a particular task */ +#define CKF_ENCRYPT 0x00000100UL +#define CKF_DECRYPT 0x00000200UL +#define CKF_DIGEST 0x00000400UL +#define CKF_SIGN 0x00000800UL +#define CKF_SIGN_RECOVER 0x00001000UL +#define CKF_VERIFY 0x00002000UL +#define CKF_VERIFY_RECOVER 0x00004000UL +#define CKF_GENERATE 0x00008000UL +#define CKF_GENERATE_KEY_PAIR 0x00010000UL +#define CKF_WRAP 0x00020000UL +#define CKF_UNWRAP 0x00040000UL +#define CKF_DERIVE 0x00080000UL + +/* Describe a token's EC capabilities not available in mechanism + * information. + */ +#define CKF_EC_F_P 0x00100000UL +#define CKF_EC_F_2M 0x00200000UL +#define CKF_EC_ECPARAMETERS 0x00400000UL +#define CKF_EC_NAMEDCURVE 0x00800000UL +#define CKF_EC_UNCOMPRESS 0x01000000UL +#define CKF_EC_COMPRESS 0x02000000UL + +#define CKF_EXTENSION 0x80000000UL + +typedef CK_MECHANISM_INFO CK_PTR CK_MECHANISM_INFO_PTR; + +/* CK_RV is a value that identifies the return value of a + * Cryptoki function + */ +typedef CK_ULONG CK_RV; + +#define CKR_OK 0x00000000UL +#define CKR_CANCEL 0x00000001UL +#define CKR_HOST_MEMORY 0x00000002UL +#define CKR_SLOT_ID_INVALID 0x00000003UL + +#define CKR_GENERAL_ERROR 0x00000005UL +#define CKR_FUNCTION_FAILED 0x00000006UL + +#define CKR_ARGUMENTS_BAD 0x00000007UL +#define CKR_NO_EVENT 0x00000008UL +#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL +#define CKR_CANT_LOCK 0x0000000AUL + +#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL +#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL +#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL +#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL + +#define CKR_ACTION_PROHIBITED 0x0000001BUL + +#define CKR_DATA_INVALID 0x00000020UL +#define CKR_DATA_LEN_RANGE 0x00000021UL +#define CKR_DEVICE_ERROR 0x00000030UL +#define CKR_DEVICE_MEMORY 0x00000031UL +#define CKR_DEVICE_REMOVED 0x00000032UL +#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL +#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL +#define CKR_FUNCTION_CANCELED 0x00000050UL +#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL + +#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL + +#define CKR_KEY_HANDLE_INVALID 0x00000060UL + +#define CKR_KEY_SIZE_RANGE 0x00000062UL +#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL + +#define CKR_KEY_NOT_NEEDED 0x00000064UL +#define CKR_KEY_CHANGED 0x00000065UL +#define CKR_KEY_NEEDED 0x00000066UL +#define CKR_KEY_INDIGESTIBLE 0x00000067UL +#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL +#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL +#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL + +#define CKR_MECHANISM_INVALID 0x00000070UL +#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL + +#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL +#define CKR_OPERATION_ACTIVE 0x00000090UL +#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL +#define CKR_PIN_INCORRECT 0x000000A0UL +#define CKR_PIN_INVALID 0x000000A1UL +#define CKR_PIN_LEN_RANGE 0x000000A2UL + +#define CKR_PIN_EXPIRED 0x000000A3UL +#define CKR_PIN_LOCKED 0x000000A4UL + +#define CKR_SESSION_CLOSED 0x000000B0UL +#define CKR_SESSION_COUNT 0x000000B1UL +#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL +#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL +#define CKR_SESSION_READ_ONLY 0x000000B5UL +#define CKR_SESSION_EXISTS 0x000000B6UL + +#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL +#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL + +#define CKR_SIGNATURE_INVALID 0x000000C0UL +#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL +#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL +#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL +#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL +#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL +#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL +#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL +#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL +#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL +#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL +#define CKR_USER_NOT_LOGGED_IN 0x00000101UL +#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL +#define CKR_USER_TYPE_INVALID 0x00000103UL + +#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL +#define CKR_USER_TOO_MANY_TYPES 0x00000105UL + +#define CKR_WRAPPED_KEY_INVALID 0x00000110UL +#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL +#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL +#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL +#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL +#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL + +#define CKR_RANDOM_NO_RNG 0x00000121UL + +#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL + +#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL + +#define CKR_BUFFER_TOO_SMALL 0x00000150UL +#define CKR_SAVED_STATE_INVALID 0x00000160UL +#define CKR_INFORMATION_SENSITIVE 0x00000170UL +#define CKR_STATE_UNSAVEABLE 0x00000180UL + +#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL +#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL +#define CKR_MUTEX_BAD 0x000001A0UL +#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL + +#define CKR_NEW_PIN_MODE 0x000001B0UL +#define CKR_NEXT_OTP 0x000001B1UL + +#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL +#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL +#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL +#define CKR_PIN_TOO_WEAK 0x000001B8UL +#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL + +#define CKR_FUNCTION_REJECTED 0x00000200UL + +#define CKR_VENDOR_DEFINED 0x80000000UL + + +/* CK_NOTIFY is an application callback that processes events */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_NOTIFY)( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_NOTIFICATION event, + CK_VOID_PTR pApplication /* passed to C_OpenSession */ +); + + +/* CK_FUNCTION_LIST is a structure holding a Cryptoki spec + * version and pointers of appropriate types to all the + * Cryptoki functions + */ +typedef struct CK_FUNCTION_LIST CK_FUNCTION_LIST; + +typedef CK_FUNCTION_LIST CK_PTR CK_FUNCTION_LIST_PTR; + +typedef CK_FUNCTION_LIST_PTR CK_PTR CK_FUNCTION_LIST_PTR_PTR; + + +/* CK_CREATEMUTEX is an application callback for creating a + * mutex object + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_CREATEMUTEX)( + CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */ +); + + +/* CK_DESTROYMUTEX is an application callback for destroying a + * mutex object + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_DESTROYMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_LOCKMUTEX is an application callback for locking a mutex */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_LOCKMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_UNLOCKMUTEX is an application callback for unlocking a + * mutex + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_UNLOCKMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_C_INITIALIZE_ARGS provides the optional arguments to + * C_Initialize + */ +typedef struct CK_C_INITIALIZE_ARGS { + CK_CREATEMUTEX CreateMutex; + CK_DESTROYMUTEX DestroyMutex; + CK_LOCKMUTEX LockMutex; + CK_UNLOCKMUTEX UnlockMutex; + CK_FLAGS flags; + CK_VOID_PTR pReserved; +} CK_C_INITIALIZE_ARGS; + +/* flags: bit flags that provide capabilities of the slot + * Bit Flag Mask Meaning + */ +#define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL +#define CKF_OS_LOCKING_OK 0x00000002UL + +typedef CK_C_INITIALIZE_ARGS CK_PTR CK_C_INITIALIZE_ARGS_PTR; + + +/* additional flags for parameters to functions */ + +/* CKF_DONT_BLOCK is for the function C_WaitForSlotEvent */ +#define CKF_DONT_BLOCK 1 + +/* CK_RSA_PKCS_MGF_TYPE is used to indicate the Message + * Generation Function (MGF) applied to a message block when + * formatting a message block for the PKCS #1 OAEP encryption + * scheme. + */ +typedef CK_ULONG CK_RSA_PKCS_MGF_TYPE; + +typedef CK_RSA_PKCS_MGF_TYPE CK_PTR CK_RSA_PKCS_MGF_TYPE_PTR; + +/* The following MGFs are defined */ +#define CKG_MGF1_SHA1 0x00000001UL +#define CKG_MGF1_SHA256 0x00000002UL +#define CKG_MGF1_SHA384 0x00000003UL +#define CKG_MGF1_SHA512 0x00000004UL +#define CKG_MGF1_SHA224 0x00000005UL + +/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source + * of the encoding parameter when formatting a message block + * for the PKCS #1 OAEP encryption scheme. + */ +typedef CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE; + +typedef CK_RSA_PKCS_OAEP_SOURCE_TYPE CK_PTR CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR; + +/* The following encoding parameter sources are defined */ +#define CKZ_DATA_SPECIFIED 0x00000001UL + +/* CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the + * CKM_RSA_PKCS_OAEP mechanism. + */ +typedef struct CK_RSA_PKCS_OAEP_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_RSA_PKCS_OAEP_SOURCE_TYPE source; + CK_VOID_PTR pSourceData; + CK_ULONG ulSourceDataLen; +} CK_RSA_PKCS_OAEP_PARAMS; + +typedef CK_RSA_PKCS_OAEP_PARAMS CK_PTR CK_RSA_PKCS_OAEP_PARAMS_PTR; + +/* CK_RSA_PKCS_PSS_PARAMS provides the parameters to the + * CKM_RSA_PKCS_PSS mechanism(s). + */ +typedef struct CK_RSA_PKCS_PSS_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_ULONG sLen; +} CK_RSA_PKCS_PSS_PARAMS; + +typedef CK_RSA_PKCS_PSS_PARAMS CK_PTR CK_RSA_PKCS_PSS_PARAMS_PTR; + +typedef CK_ULONG CK_EC_KDF_TYPE; + +/* The following EC Key Derivation Functions are defined */ +#define CKD_NULL 0x00000001UL +#define CKD_SHA1_KDF 0x00000002UL + +/* The following X9.42 DH key derivation functions are defined */ +#define CKD_SHA1_KDF_ASN1 0x00000003UL +#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL +#define CKD_SHA224_KDF 0x00000005UL +#define CKD_SHA256_KDF 0x00000006UL +#define CKD_SHA384_KDF 0x00000007UL +#define CKD_SHA512_KDF 0x00000008UL +#define CKD_CPDIVERSIFY_KDF 0x00000009UL +#define CKD_SHA3_224_KDF 0x0000000AUL +#define CKD_SHA3_256_KDF 0x0000000BUL +#define CKD_SHA3_384_KDF 0x0000000CUL +#define CKD_SHA3_512_KDF 0x0000000DUL + +/* CK_ECDH1_DERIVE_PARAMS provides the parameters to the + * CKM_ECDH1_DERIVE and CKM_ECDH1_COFACTOR_DERIVE mechanisms, + * where each party contributes one key pair. + */ +typedef struct CK_ECDH1_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_ECDH1_DERIVE_PARAMS; + +typedef CK_ECDH1_DERIVE_PARAMS CK_PTR CK_ECDH1_DERIVE_PARAMS_PTR; + +/* + * CK_ECDH2_DERIVE_PARAMS provides the parameters to the + * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs. + */ +typedef struct CK_ECDH2_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; +} CK_ECDH2_DERIVE_PARAMS; + +typedef CK_ECDH2_DERIVE_PARAMS CK_PTR CK_ECDH2_DERIVE_PARAMS_PTR; + +typedef struct CK_ECMQV_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; + CK_OBJECT_HANDLE publicKey; +} CK_ECMQV_DERIVE_PARAMS; + +typedef CK_ECMQV_DERIVE_PARAMS CK_PTR CK_ECMQV_DERIVE_PARAMS_PTR; + +/* Typedefs and defines for the CKM_X9_42_DH_KEY_PAIR_GEN and the + * CKM_X9_42_DH_PARAMETER_GEN mechanisms + */ +typedef CK_ULONG CK_X9_42_DH_KDF_TYPE; +typedef CK_X9_42_DH_KDF_TYPE CK_PTR CK_X9_42_DH_KDF_TYPE_PTR; + +/* CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the + * CKM_X9_42_DH_DERIVE key derivation mechanism, where each party + * contributes one key pair + */ +typedef struct CK_X9_42_DH1_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_X9_42_DH1_DERIVE_PARAMS; + +typedef struct CK_X9_42_DH1_DERIVE_PARAMS CK_PTR CK_X9_42_DH1_DERIVE_PARAMS_PTR; + +/* CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the + * CKM_X9_42_DH_HYBRID_DERIVE and CKM_X9_42_MQV_DERIVE key derivation + * mechanisms, where each party contributes two key pairs + */ +typedef struct CK_X9_42_DH2_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; +} CK_X9_42_DH2_DERIVE_PARAMS; + +typedef CK_X9_42_DH2_DERIVE_PARAMS CK_PTR CK_X9_42_DH2_DERIVE_PARAMS_PTR; + +typedef struct CK_X9_42_MQV_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; + CK_OBJECT_HANDLE publicKey; +} CK_X9_42_MQV_DERIVE_PARAMS; + +typedef CK_X9_42_MQV_DERIVE_PARAMS CK_PTR CK_X9_42_MQV_DERIVE_PARAMS_PTR; + +/* CK_KEA_DERIVE_PARAMS provides the parameters to the + * CKM_KEA_DERIVE mechanism + */ +typedef struct CK_KEA_DERIVE_PARAMS { + CK_BBOOL isSender; + CK_ULONG ulRandomLen; + CK_BYTE_PTR pRandomA; + CK_BYTE_PTR pRandomB; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_KEA_DERIVE_PARAMS; + +typedef CK_KEA_DERIVE_PARAMS CK_PTR CK_KEA_DERIVE_PARAMS_PTR; + + +/* CK_RC2_PARAMS provides the parameters to the CKM_RC2_ECB and + * CKM_RC2_MAC mechanisms. An instance of CK_RC2_PARAMS just + * holds the effective keysize + */ +typedef CK_ULONG CK_RC2_PARAMS; + +typedef CK_RC2_PARAMS CK_PTR CK_RC2_PARAMS_PTR; + + +/* CK_RC2_CBC_PARAMS provides the parameters to the CKM_RC2_CBC + * mechanism + */ +typedef struct CK_RC2_CBC_PARAMS { + CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ + CK_BYTE iv[8]; /* IV for CBC mode */ +} CK_RC2_CBC_PARAMS; + +typedef CK_RC2_CBC_PARAMS CK_PTR CK_RC2_CBC_PARAMS_PTR; + + +/* CK_RC2_MAC_GENERAL_PARAMS provides the parameters for the + * CKM_RC2_MAC_GENERAL mechanism + */ +typedef struct CK_RC2_MAC_GENERAL_PARAMS { + CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ + CK_ULONG ulMacLength; /* Length of MAC in bytes */ +} CK_RC2_MAC_GENERAL_PARAMS; + +typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR \ + CK_RC2_MAC_GENERAL_PARAMS_PTR; + + +/* CK_RC5_PARAMS provides the parameters to the CKM_RC5_ECB and + * CKM_RC5_MAC mechanisms + */ +typedef struct CK_RC5_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ +} CK_RC5_PARAMS; + +typedef CK_RC5_PARAMS CK_PTR CK_RC5_PARAMS_PTR; + + +/* CK_RC5_CBC_PARAMS provides the parameters to the CKM_RC5_CBC + * mechanism + */ +typedef struct CK_RC5_CBC_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ + CK_BYTE_PTR pIv; /* pointer to IV */ + CK_ULONG ulIvLen; /* length of IV in bytes */ +} CK_RC5_CBC_PARAMS; + +typedef CK_RC5_CBC_PARAMS CK_PTR CK_RC5_CBC_PARAMS_PTR; + + +/* CK_RC5_MAC_GENERAL_PARAMS provides the parameters for the + * CKM_RC5_MAC_GENERAL mechanism + */ +typedef struct CK_RC5_MAC_GENERAL_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ + CK_ULONG ulMacLength; /* Length of MAC in bytes */ +} CK_RC5_MAC_GENERAL_PARAMS; + +typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR \ + CK_RC5_MAC_GENERAL_PARAMS_PTR; + +/* CK_MAC_GENERAL_PARAMS provides the parameters to most block + * ciphers' MAC_GENERAL mechanisms. Its value is the length of + * the MAC + */ +typedef CK_ULONG CK_MAC_GENERAL_PARAMS; + +typedef CK_MAC_GENERAL_PARAMS CK_PTR CK_MAC_GENERAL_PARAMS_PTR; + +typedef struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[8]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_DES_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_AES_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR; + +/* CK_SKIPJACK_PRIVATE_WRAP_PARAMS provides the parameters to the + * CKM_SKIPJACK_PRIVATE_WRAP mechanism + */ +typedef struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { + CK_ULONG ulPasswordLen; + CK_BYTE_PTR pPassword; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPAndGLen; + CK_ULONG ulQLen; + CK_ULONG ulRandomLen; + CK_BYTE_PTR pRandomA; + CK_BYTE_PTR pPrimeP; + CK_BYTE_PTR pBaseG; + CK_BYTE_PTR pSubprimeQ; +} CK_SKIPJACK_PRIVATE_WRAP_PARAMS; + +typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR \ + CK_SKIPJACK_PRIVATE_WRAP_PARAMS_PTR; + + +/* CK_SKIPJACK_RELAYX_PARAMS provides the parameters to the + * CKM_SKIPJACK_RELAYX mechanism + */ +typedef struct CK_SKIPJACK_RELAYX_PARAMS { + CK_ULONG ulOldWrappedXLen; + CK_BYTE_PTR pOldWrappedX; + CK_ULONG ulOldPasswordLen; + CK_BYTE_PTR pOldPassword; + CK_ULONG ulOldPublicDataLen; + CK_BYTE_PTR pOldPublicData; + CK_ULONG ulOldRandomLen; + CK_BYTE_PTR pOldRandomA; + CK_ULONG ulNewPasswordLen; + CK_BYTE_PTR pNewPassword; + CK_ULONG ulNewPublicDataLen; + CK_BYTE_PTR pNewPublicData; + CK_ULONG ulNewRandomLen; + CK_BYTE_PTR pNewRandomA; +} CK_SKIPJACK_RELAYX_PARAMS; + +typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR \ + CK_SKIPJACK_RELAYX_PARAMS_PTR; + + +typedef struct CK_PBE_PARAMS { + CK_BYTE_PTR pInitVector; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG ulPasswordLen; + CK_BYTE_PTR pSalt; + CK_ULONG ulSaltLen; + CK_ULONG ulIteration; +} CK_PBE_PARAMS; + +typedef CK_PBE_PARAMS CK_PTR CK_PBE_PARAMS_PTR; + + +/* CK_KEY_WRAP_SET_OAEP_PARAMS provides the parameters to the + * CKM_KEY_WRAP_SET_OAEP mechanism + */ +typedef struct CK_KEY_WRAP_SET_OAEP_PARAMS { + CK_BYTE bBC; /* block contents byte */ + CK_BYTE_PTR pX; /* extra data */ + CK_ULONG ulXLen; /* length of extra data in bytes */ +} CK_KEY_WRAP_SET_OAEP_PARAMS; + +typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR CK_KEY_WRAP_SET_OAEP_PARAMS_PTR; + +typedef struct CK_SSL3_RANDOM_DATA { + CK_BYTE_PTR pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE_PTR pServerRandom; + CK_ULONG ulServerRandomLen; +} CK_SSL3_RANDOM_DATA; + + +typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION_PTR pVersion; +} CK_SSL3_MASTER_KEY_DERIVE_PARAMS; + +typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_SSL3_KEY_MAT_OUT { + CK_OBJECT_HANDLE hClientMacSecret; + CK_OBJECT_HANDLE hServerMacSecret; + CK_OBJECT_HANDLE hClientKey; + CK_OBJECT_HANDLE hServerKey; + CK_BYTE_PTR pIVClient; + CK_BYTE_PTR pIVServer; +} CK_SSL3_KEY_MAT_OUT; + +typedef CK_SSL3_KEY_MAT_OUT CK_PTR CK_SSL3_KEY_MAT_OUT_PTR; + + +typedef struct CK_SSL3_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; +} CK_SSL3_KEY_MAT_PARAMS; + +typedef CK_SSL3_KEY_MAT_PARAMS CK_PTR CK_SSL3_KEY_MAT_PARAMS_PTR; + +typedef struct CK_TLS_PRF_PARAMS { + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLen; + CK_BYTE_PTR pOutput; + CK_ULONG_PTR pulOutputLen; +} CK_TLS_PRF_PARAMS; + +typedef CK_TLS_PRF_PARAMS CK_PTR CK_TLS_PRF_PARAMS_PTR; + +typedef struct CK_WTLS_RANDOM_DATA { + CK_BYTE_PTR pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE_PTR pServerRandom; + CK_ULONG ulServerRandomLen; +} CK_WTLS_RANDOM_DATA; + +typedef CK_WTLS_RANDOM_DATA CK_PTR CK_WTLS_RANDOM_DATA_PTR; + +typedef struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_BYTE_PTR pVersion; +} CK_WTLS_MASTER_KEY_DERIVE_PARAMS; + +typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_WTLS_PRF_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLen; + CK_BYTE_PTR pOutput; + CK_ULONG_PTR pulOutputLen; +} CK_WTLS_PRF_PARAMS; + +typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTLS_PRF_PARAMS_PTR; + +typedef struct CK_WTLS_KEY_MAT_OUT { + CK_OBJECT_HANDLE hMacSecret; + CK_OBJECT_HANDLE hKey; + CK_BYTE_PTR pIV; +} CK_WTLS_KEY_MAT_OUT; + +typedef CK_WTLS_KEY_MAT_OUT CK_PTR CK_WTLS_KEY_MAT_OUT_PTR; + +typedef struct CK_WTLS_KEY_MAT_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_ULONG ulSequenceNumber; + CK_BBOOL bIsExport; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_WTLS_KEY_MAT_OUT_PTR pReturnedKeyMaterial; +} CK_WTLS_KEY_MAT_PARAMS; + +typedef CK_WTLS_KEY_MAT_PARAMS CK_PTR CK_WTLS_KEY_MAT_PARAMS_PTR; + +typedef struct CK_CMS_SIG_PARAMS { + CK_OBJECT_HANDLE certificateHandle; + CK_MECHANISM_PTR pSigningMechanism; + CK_MECHANISM_PTR pDigestMechanism; + CK_UTF8CHAR_PTR pContentType; + CK_BYTE_PTR pRequestedAttributes; + CK_ULONG ulRequestedAttributesLen; + CK_BYTE_PTR pRequiredAttributes; + CK_ULONG ulRequiredAttributesLen; +} CK_CMS_SIG_PARAMS; + +typedef CK_CMS_SIG_PARAMS CK_PTR CK_CMS_SIG_PARAMS_PTR; + +typedef struct CK_KEY_DERIVATION_STRING_DATA { + CK_BYTE_PTR pData; + CK_ULONG ulLen; +} CK_KEY_DERIVATION_STRING_DATA; + +typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR \ + CK_KEY_DERIVATION_STRING_DATA_PTR; + + +/* The CK_EXTRACT_PARAMS is used for the + * CKM_EXTRACT_KEY_FROM_KEY mechanism. It specifies which bit + * of the base key should be used as the first bit of the + * derived key + */ +typedef CK_ULONG CK_EXTRACT_PARAMS; + +typedef CK_EXTRACT_PARAMS CK_PTR CK_EXTRACT_PARAMS_PTR; + +/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to + * indicate the Pseudo-Random Function (PRF) used to generate + * key bits using PKCS #5 PBKDF2. + */ +typedef CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE; + +typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR \ + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR; + +#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL +#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL +#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL +#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL +#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL + +/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the + * source of the salt value when deriving a key using PKCS #5 + * PBKDF2. + */ +typedef CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE; + +typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR \ + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR; + +/* The following salt value sources are defined in PKCS #5 v2.0. */ +#define CKZ_SALT_SPECIFIED 0x00000001UL + +/* CK_PKCS5_PBKD2_PARAMS is a structure that provides the + * parameters to the CKM_PKCS5_PBKD2 mechanism. + */ +typedef struct CK_PKCS5_PBKD2_PARAMS { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + CK_VOID_PTR pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + CK_VOID_PTR pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG_PTR ulPasswordLen; +} CK_PKCS5_PBKD2_PARAMS; + +typedef CK_PKCS5_PBKD2_PARAMS CK_PTR CK_PKCS5_PBKD2_PARAMS_PTR; + +/* CK_PKCS5_PBKD2_PARAMS2 is a corrected version of the CK_PKCS5_PBKD2_PARAMS + * structure that provides the parameters to the CKM_PKCS5_PBKD2 mechanism + * noting that the ulPasswordLen field is a CK_ULONG and not a CK_ULONG_PTR. + */ +typedef struct CK_PKCS5_PBKD2_PARAMS2 { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + CK_VOID_PTR pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + CK_VOID_PTR pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG ulPasswordLen; +} CK_PKCS5_PBKD2_PARAMS2; + +typedef CK_PKCS5_PBKD2_PARAMS2 CK_PTR CK_PKCS5_PBKD2_PARAMS2_PTR; + +typedef CK_ULONG CK_OTP_PARAM_TYPE; +typedef CK_OTP_PARAM_TYPE CK_PARAM_TYPE; /* backward compatibility */ + +typedef struct CK_OTP_PARAM { + CK_OTP_PARAM_TYPE type; + CK_VOID_PTR pValue; + CK_ULONG ulValueLen; +} CK_OTP_PARAM; + +typedef CK_OTP_PARAM CK_PTR CK_OTP_PARAM_PTR; + +typedef struct CK_OTP_PARAMS { + CK_OTP_PARAM_PTR pParams; + CK_ULONG ulCount; +} CK_OTP_PARAMS; + +typedef CK_OTP_PARAMS CK_PTR CK_OTP_PARAMS_PTR; + +typedef struct CK_OTP_SIGNATURE_INFO { + CK_OTP_PARAM_PTR pParams; + CK_ULONG ulCount; +} CK_OTP_SIGNATURE_INFO; + +typedef CK_OTP_SIGNATURE_INFO CK_PTR CK_OTP_SIGNATURE_INFO_PTR; + +#define CK_OTP_VALUE 0UL +#define CK_OTP_PIN 1UL +#define CK_OTP_CHALLENGE 2UL +#define CK_OTP_TIME 3UL +#define CK_OTP_COUNTER 4UL +#define CK_OTP_FLAGS 5UL +#define CK_OTP_OUTPUT_LENGTH 6UL +#define CK_OTP_OUTPUT_FORMAT 7UL + +#define CKF_NEXT_OTP 0x00000001UL +#define CKF_EXCLUDE_TIME 0x00000002UL +#define CKF_EXCLUDE_COUNTER 0x00000004UL +#define CKF_EXCLUDE_CHALLENGE 0x00000008UL +#define CKF_EXCLUDE_PIN 0x00000010UL +#define CKF_USER_FRIENDLY_OTP 0x00000020UL + +typedef struct CK_KIP_PARAMS { + CK_MECHANISM_PTR pMechanism; + CK_OBJECT_HANDLE hKey; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; +} CK_KIP_PARAMS; + +typedef CK_KIP_PARAMS CK_PTR CK_KIP_PARAMS_PTR; + +typedef struct CK_AES_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +} CK_AES_CTR_PARAMS; + +typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR; + +typedef struct CK_GCM_PARAMS { + CK_BYTE_PTR pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +} CK_GCM_PARAMS; + +typedef CK_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR; + +typedef struct CK_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE_PTR pNonce; + CK_ULONG ulNonceLen; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +} CK_CCM_PARAMS; + +typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR; + +/* Deprecated. Use CK_GCM_PARAMS */ +typedef struct CK_AES_GCM_PARAMS { + CK_BYTE_PTR pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +} CK_AES_GCM_PARAMS; + +typedef CK_AES_GCM_PARAMS CK_PTR CK_AES_GCM_PARAMS_PTR; + +/* Deprecated. Use CK_CCM_PARAMS */ +typedef struct CK_AES_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE_PTR pNonce; + CK_ULONG ulNonceLen; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +} CK_AES_CCM_PARAMS; + +typedef CK_AES_CCM_PARAMS CK_PTR CK_AES_CCM_PARAMS_PTR; + +typedef struct CK_CAMELLIA_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +} CK_CAMELLIA_CTR_PARAMS; + +typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR; + +typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_DSA_PARAMETER_GEN_PARAM { + CK_MECHANISM_TYPE hash; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_ULONG ulIndex; +} CK_DSA_PARAMETER_GEN_PARAM; + +typedef CK_DSA_PARAMETER_GEN_PARAM CK_PTR CK_DSA_PARAMETER_GEN_PARAM_PTR; + +typedef struct CK_ECDH_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; +} CK_ECDH_AES_KEY_WRAP_PARAMS; + +typedef CK_ECDH_AES_KEY_WRAP_PARAMS CK_PTR CK_ECDH_AES_KEY_WRAP_PARAMS_PTR; + +typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN; + +typedef CK_ULONG CK_CERTIFICATE_CATEGORY; + +typedef struct CK_RSA_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_RSA_PKCS_OAEP_PARAMS_PTR pOAEPParams; +} CK_RSA_AES_KEY_WRAP_PARAMS; + +typedef CK_RSA_AES_KEY_WRAP_PARAMS CK_PTR CK_RSA_AES_KEY_WRAP_PARAMS_PTR; + +typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION_PTR pVersion; + CK_MECHANISM_TYPE prfHashMechanism; +} CK_TLS12_MASTER_KEY_DERIVE_PARAMS; + +typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_TLS12_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; + CK_MECHANISM_TYPE prfHashMechanism; +} CK_TLS12_KEY_MAT_PARAMS; + +typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR; + +typedef struct CK_TLS_KDF_PARAMS { + CK_MECHANISM_TYPE prfMechanism; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLength; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_BYTE_PTR pContextData; + CK_ULONG ulContextDataLength; +} CK_TLS_KDF_PARAMS; + +typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR; + +typedef struct CK_TLS_MAC_PARAMS { + CK_MECHANISM_TYPE prfHashMechanism; + CK_ULONG ulMacLength; + CK_ULONG ulServerOrClient; +} CK_TLS_MAC_PARAMS; + +typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR; + +typedef struct CK_GOSTR3410_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pUKM; + CK_ULONG ulUKMLen; +} CK_GOSTR3410_DERIVE_PARAMS; + +typedef CK_GOSTR3410_DERIVE_PARAMS CK_PTR CK_GOSTR3410_DERIVE_PARAMS_PTR; + +typedef struct CK_GOSTR3410_KEY_WRAP_PARAMS { + CK_BYTE_PTR pWrapOID; + CK_ULONG ulWrapOIDLen; + CK_BYTE_PTR pUKM; + CK_ULONG ulUKMLen; + CK_OBJECT_HANDLE hKey; +} CK_GOSTR3410_KEY_WRAP_PARAMS; + +typedef CK_GOSTR3410_KEY_WRAP_PARAMS CK_PTR CK_GOSTR3410_KEY_WRAP_PARAMS_PTR; + +typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_SEED_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR; + +#endif /* _PKCS11T_H_ */ + diff --git a/tools/vendor/github.com/miekg/pkcs11/release.go b/tools/vendor/github.com/miekg/pkcs11/release.go new file mode 100644 index 000000000..d8b99f147 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/release.go @@ -0,0 +1,18 @@ +//go:build release +// +build release + +package pkcs11 + +import "fmt" + +// Release is current version of the pkcs11 library. +var Release = R{1, 1, 1} + +// R holds the version of this library. +type R struct { + Major, Minor, Patch int +} + +func (r R) String() string { + return fmt.Sprintf("%d.%d.%d", r.Major, r.Minor, r.Patch) +} diff --git a/tools/vendor/github.com/miekg/pkcs11/softhsm.conf b/tools/vendor/github.com/miekg/pkcs11/softhsm.conf new file mode 100644 index 000000000..f95862b10 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/softhsm.conf @@ -0,0 +1 @@ +0:hsm.db diff --git a/tools/vendor/github.com/miekg/pkcs11/softhsm2.conf b/tools/vendor/github.com/miekg/pkcs11/softhsm2.conf new file mode 100644 index 000000000..876990cdd --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/softhsm2.conf @@ -0,0 +1,4 @@ +log.level = INFO +objectstore.backend = file +directories.tokendir = test_data +slots.removable = false diff --git a/tools/vendor/github.com/miekg/pkcs11/types.go b/tools/vendor/github.com/miekg/pkcs11/types.go new file mode 100644 index 000000000..60eadcb71 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/types.go @@ -0,0 +1,315 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +/* +#include +#include +#include "pkcs11go.h" + +CK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i) +{ + return array[i]; +} + +static inline void putAttributePval(CK_ATTRIBUTE_PTR a, CK_VOID_PTR pValue) +{ + a->pValue = pValue; +} + +static inline void putMechanismParam(CK_MECHANISM_PTR m, CK_VOID_PTR pParameter) +{ + m->pParameter = pParameter; +} +*/ +import "C" + +import ( + "fmt" + "time" + "unsafe" +) + +type arena []unsafe.Pointer + +func (a *arena) Allocate(obj []byte) (C.CK_VOID_PTR, C.CK_ULONG) { + cobj := C.calloc(C.size_t(len(obj)), 1) + *a = append(*a, cobj) + C.memmove(cobj, unsafe.Pointer(&obj[0]), C.size_t(len(obj))) + return C.CK_VOID_PTR(cobj), C.CK_ULONG(len(obj)) +} + +func (a arena) Free() { + for _, p := range a { + C.free(p) + } +} + +// toList converts from a C style array to a []uint. +func toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint { + l := make([]uint, int(size)) + for i := 0; i < len(l); i++ { + l[i] = uint(C.Index(clist, C.CK_ULONG(i))) + } + defer C.free(unsafe.Pointer(clist)) + return l +} + +// cBBool converts a bool to a CK_BBOOL. +func cBBool(x bool) C.CK_BBOOL { + if x { + return C.CK_BBOOL(C.CK_TRUE) + } + return C.CK_BBOOL(C.CK_FALSE) +} + +func uintToBytes(x uint64) []byte { + ul := C.CK_ULONG(x) + return C.GoBytes(unsafe.Pointer(&ul), C.int(unsafe.Sizeof(ul))) +} + +// Error represents an PKCS#11 error. +type Error uint + +func (e Error) Error() string { + return fmt.Sprintf("pkcs11: 0x%X: %s", uint(e), strerror[uint(e)]) +} + +func toError(e C.CK_RV) error { + if e == C.CKR_OK { + return nil + } + return Error(e) +} + +// SessionHandle is a Cryptoki-assigned value that identifies a session. +type SessionHandle uint + +// ObjectHandle is a token-specific identifier for an object. +type ObjectHandle uint + +// Version represents any version information from the library. +type Version struct { + Major byte + Minor byte +} + +func toVersion(version C.CK_VERSION) Version { + return Version{byte(version.major), byte(version.minor)} +} + +// SlotEvent holds the SlotID which for which an slot event (token insertion, +// removal, etc.) occurred. +type SlotEvent struct { + SlotID uint +} + +// Info provides information about the library and hardware used. +type Info struct { + CryptokiVersion Version + ManufacturerID string + Flags uint + LibraryDescription string + LibraryVersion Version +} + +// SlotInfo provides information about a slot. +type SlotInfo struct { + SlotDescription string // 64 bytes. + ManufacturerID string // 32 bytes. + Flags uint + HardwareVersion Version + FirmwareVersion Version +} + +// TokenInfo provides information about a token. +type TokenInfo struct { + Label string + ManufacturerID string + Model string + SerialNumber string + Flags uint + MaxSessionCount uint + SessionCount uint + MaxRwSessionCount uint + RwSessionCount uint + MaxPinLen uint + MinPinLen uint + TotalPublicMemory uint + FreePublicMemory uint + TotalPrivateMemory uint + FreePrivateMemory uint + HardwareVersion Version + FirmwareVersion Version + UTCTime string +} + +// SessionInfo provides information about a session. +type SessionInfo struct { + SlotID uint + State uint + Flags uint + DeviceError uint +} + +// Attribute holds an attribute type/value combination. +type Attribute struct { + Type uint + Value []byte +} + +// NewAttribute allocates a Attribute and returns a pointer to it. +// Note that this is merely a convenience function, as values returned +// from the HSM are not converted back to Go values, those are just raw +// byte slices. +func NewAttribute(typ uint, x interface{}) *Attribute { + // This function nicely transforms *to* an attribute, but there is + // no corresponding function that transform back *from* an attribute, + // which in PKCS#11 is just an byte array. + a := new(Attribute) + a.Type = typ + if x == nil { + return a + } + switch v := x.(type) { + case bool: + if v { + a.Value = []byte{1} + } else { + a.Value = []byte{0} + } + case int: + a.Value = uintToBytes(uint64(v)) + case int16: + a.Value = uintToBytes(uint64(v)) + case int32: + a.Value = uintToBytes(uint64(v)) + case int64: + a.Value = uintToBytes(uint64(v)) + case uint: + a.Value = uintToBytes(uint64(v)) + case uint16: + a.Value = uintToBytes(uint64(v)) + case uint32: + a.Value = uintToBytes(uint64(v)) + case uint64: + a.Value = uintToBytes(uint64(v)) + case string: + a.Value = []byte(v) + case []byte: + a.Value = v + case time.Time: // for CKA_DATE + a.Value = cDate(v) + default: + panic("pkcs11: unhandled attribute type") + } + return a +} + +// cAttribute returns the start address and the length of an attribute list. +func cAttributeList(a []*Attribute) (arena, C.CK_ATTRIBUTE_PTR, C.CK_ULONG) { + var arena arena + if len(a) == 0 { + return nil, nil, 0 + } + pa := make([]C.CK_ATTRIBUTE, len(a)) + for i, attr := range a { + pa[i]._type = C.CK_ATTRIBUTE_TYPE(attr.Type) + if len(attr.Value) != 0 { + buf, len := arena.Allocate(attr.Value) + // field is unaligned on windows so this has to call into C + C.putAttributePval(&pa[i], buf) + pa[i].ulValueLen = len + } + } + return arena, &pa[0], C.CK_ULONG(len(a)) +} + +func cDate(t time.Time) []byte { + b := make([]byte, 8) + year, month, day := t.Date() + y := fmt.Sprintf("%4d", year) + m := fmt.Sprintf("%02d", month) + d1 := fmt.Sprintf("%02d", day) + b[0], b[1], b[2], b[3] = y[0], y[1], y[2], y[3] + b[4], b[5] = m[0], m[1] + b[6], b[7] = d1[0], d1[1] + return b +} + +// Mechanism holds an mechanism type/value combination. +type Mechanism struct { + Mechanism uint + Parameter []byte + generator interface{} +} + +// NewMechanism returns a pointer to an initialized Mechanism. +func NewMechanism(mech uint, x interface{}) *Mechanism { + m := new(Mechanism) + m.Mechanism = mech + if x == nil { + return m + } + + switch p := x.(type) { + case *GCMParams, *OAEPParams, *ECDH1DeriveParams: + // contains pointers; defer serialization until cMechanism + m.generator = p + case []byte: + m.Parameter = p + default: + panic("parameter must be one of type: []byte, *GCMParams, *OAEPParams, *ECDH1DeriveParams") + } + + return m +} + +func cMechanism(mechList []*Mechanism) (arena, *C.CK_MECHANISM) { + if len(mechList) != 1 { + panic("expected exactly one mechanism") + } + mech := mechList[0] + cmech := &C.CK_MECHANISM{mechanism: C.CK_MECHANISM_TYPE(mech.Mechanism)} + // params that contain pointers are allocated here + param := mech.Parameter + var arena arena + switch p := mech.generator.(type) { + case *GCMParams: + // uses its own arena because it has to outlive this function call (yuck) + param = cGCMParams(p) + case *OAEPParams: + param, arena = cOAEPParams(p, arena) + case *ECDH1DeriveParams: + param, arena = cECDH1DeriveParams(p, arena) + } + if len(param) != 0 { + buf, len := arena.Allocate(param) + // field is unaligned on windows so this has to call into C + C.putMechanismParam(cmech, buf) + cmech.ulParameterLen = len + } + return arena, cmech +} + +// MechanismInfo provides information about a particular mechanism. +type MechanismInfo struct { + MinKeySize uint + MaxKeySize uint + Flags uint +} + +// stubData is a persistent nonempty byte array used by cMessage. +var stubData = []byte{0} + +// cMessage returns the pointer/length pair corresponding to data. +func cMessage(data []byte) (dataPtr C.CK_BYTE_PTR) { + l := len(data) + if l == 0 { + // &data[0] is forbidden in this case, so use a nontrivial array instead. + data = stubData + } + return C.CK_BYTE_PTR(unsafe.Pointer(&data[0])) +} diff --git a/tools/vendor/github.com/miekg/pkcs11/vendor.go b/tools/vendor/github.com/miekg/pkcs11/vendor.go new file mode 100644 index 000000000..83188e500 --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/vendor.go @@ -0,0 +1,127 @@ +package pkcs11 + +// Vendor specific range for Ncipher network HSM. +const ( + NFCK_VENDOR_NCIPHER = 0xde436972 + CKA_NCIPHER = NFCK_VENDOR_NCIPHER + CKM_NCIPHER = NFCK_VENDOR_NCIPHER + CKK_NCIPHER = NFCK_VENDOR_NCIPHER +) + +// Vendor specific mechanisms for HMAC on Ncipher HSMs where Ncipher does not allow use of generic_secret keys. +const ( + CKM_NC_SHA_1_HMAC_KEY_GEN = CKM_NCIPHER + 0x3 /* no params */ + CKM_NC_MD5_HMAC_KEY_GEN = CKM_NCIPHER + 0x6 /* no params */ + CKM_NC_SHA224_HMAC_KEY_GEN = CKM_NCIPHER + 0x24 /* no params */ + CKM_NC_SHA256_HMAC_KEY_GEN = CKM_NCIPHER + 0x25 /* no params */ + CKM_NC_SHA384_HMAC_KEY_GEN = CKM_NCIPHER + 0x26 /* no params */ + CKM_NC_SHA512_HMAC_KEY_GEN = CKM_NCIPHER + 0x27 /* no params */ +) + +// Vendor specific range for Mozilla NSS. +const ( + NSSCK_VENDOR_NSS = 0x4E534350 + CKO_NSS = CKO_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKK_NSS = CKK_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKC_NSS = CKC_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKA_NSS = CKA_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKA_TRUST = CKA_NSS + 0x2000 + CKM_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKR_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKT_VENDOR_DEFINED = 0x80000000 + CKT_NSS = CKT_VENDOR_DEFINED | NSSCK_VENDOR_NSS +) + +// Vendor specific values for Mozilla NSS. +const ( + CKO_NSS_CRL = CKO_NSS + 1 + CKO_NSS_SMIME = CKO_NSS + 2 + CKO_NSS_TRUST = CKO_NSS + 3 + CKO_NSS_BUILTIN_ROOT_LIST = CKO_NSS + 4 + CKO_NSS_NEWSLOT = CKO_NSS + 5 + CKO_NSS_DELSLOT = CKO_NSS + 6 + CKK_NSS_PKCS8 = CKK_NSS + 1 + CKK_NSS_JPAKE_ROUND1 = CKK_NSS + 2 + CKK_NSS_JPAKE_ROUND2 = CKK_NSS + 3 + CKK_NSS_CHACHA20 = CKK_NSS + 4 + CKA_NSS_URL = CKA_NSS + 1 + CKA_NSS_EMAIL = CKA_NSS + 2 + CKA_NSS_SMIME_INFO = CKA_NSS + 3 + CKA_NSS_SMIME_TIMESTAMP = CKA_NSS + 4 + CKA_NSS_PKCS8_SALT = CKA_NSS + 5 + CKA_NSS_PASSWORD_CHECK = CKA_NSS + 6 + CKA_NSS_EXPIRES = CKA_NSS + 7 + CKA_NSS_KRL = CKA_NSS + 8 + CKA_NSS_PQG_COUNTER = CKA_NSS + 20 + CKA_NSS_PQG_SEED = CKA_NSS + 21 + CKA_NSS_PQG_H = CKA_NSS + 22 + CKA_NSS_PQG_SEED_BITS = CKA_NSS + 23 + CKA_NSS_MODULE_SPEC = CKA_NSS + 24 + CKA_NSS_OVERRIDE_EXTENSIONS = CKA_NSS + 25 + CKA_NSS_JPAKE_SIGNERID = CKA_NSS + 26 + CKA_NSS_JPAKE_PEERID = CKA_NSS + 27 + CKA_NSS_JPAKE_GX1 = CKA_NSS + 28 + CKA_NSS_JPAKE_GX2 = CKA_NSS + 29 + CKA_NSS_JPAKE_GX3 = CKA_NSS + 30 + CKA_NSS_JPAKE_GX4 = CKA_NSS + 31 + CKA_NSS_JPAKE_X2 = CKA_NSS + 32 + CKA_NSS_JPAKE_X2S = CKA_NSS + 33 + CKA_NSS_MOZILLA_CA_POLICY = CKA_NSS + 34 + CKA_TRUST_DIGITAL_SIGNATURE = CKA_TRUST + 1 + CKA_TRUST_NON_REPUDIATION = CKA_TRUST + 2 + CKA_TRUST_KEY_ENCIPHERMENT = CKA_TRUST + 3 + CKA_TRUST_DATA_ENCIPHERMENT = CKA_TRUST + 4 + CKA_TRUST_KEY_AGREEMENT = CKA_TRUST + 5 + CKA_TRUST_KEY_CERT_SIGN = CKA_TRUST + 6 + CKA_TRUST_CRL_SIGN = CKA_TRUST + 7 + CKA_TRUST_SERVER_AUTH = CKA_TRUST + 8 + CKA_TRUST_CLIENT_AUTH = CKA_TRUST + 9 + CKA_TRUST_CODE_SIGNING = CKA_TRUST + 10 + CKA_TRUST_EMAIL_PROTECTION = CKA_TRUST + 11 + CKA_TRUST_IPSEC_END_SYSTEM = CKA_TRUST + 12 + CKA_TRUST_IPSEC_TUNNEL = CKA_TRUST + 13 + CKA_TRUST_IPSEC_USER = CKA_TRUST + 14 + CKA_TRUST_TIME_STAMPING = CKA_TRUST + 15 + CKA_TRUST_STEP_UP_APPROVED = CKA_TRUST + 16 + CKA_CERT_SHA1_HASH = CKA_TRUST + 100 + CKA_CERT_MD5_HASH = CKA_TRUST + 101 + CKM_NSS_AES_KEY_WRAP = CKM_NSS + 1 + CKM_NSS_AES_KEY_WRAP_PAD = CKM_NSS + 2 + CKM_NSS_HKDF_SHA1 = CKM_NSS + 3 + CKM_NSS_HKDF_SHA256 = CKM_NSS + 4 + CKM_NSS_HKDF_SHA384 = CKM_NSS + 5 + CKM_NSS_HKDF_SHA512 = CKM_NSS + 6 + CKM_NSS_JPAKE_ROUND1_SHA1 = CKM_NSS + 7 + CKM_NSS_JPAKE_ROUND1_SHA256 = CKM_NSS + 8 + CKM_NSS_JPAKE_ROUND1_SHA384 = CKM_NSS + 9 + CKM_NSS_JPAKE_ROUND1_SHA512 = CKM_NSS + 10 + CKM_NSS_JPAKE_ROUND2_SHA1 = CKM_NSS + 11 + CKM_NSS_JPAKE_ROUND2_SHA256 = CKM_NSS + 12 + CKM_NSS_JPAKE_ROUND2_SHA384 = CKM_NSS + 13 + CKM_NSS_JPAKE_ROUND2_SHA512 = CKM_NSS + 14 + CKM_NSS_JPAKE_FINAL_SHA1 = CKM_NSS + 15 + CKM_NSS_JPAKE_FINAL_SHA256 = CKM_NSS + 16 + CKM_NSS_JPAKE_FINAL_SHA384 = CKM_NSS + 17 + CKM_NSS_JPAKE_FINAL_SHA512 = CKM_NSS + 18 + CKM_NSS_HMAC_CONSTANT_TIME = CKM_NSS + 19 + CKM_NSS_SSL3_MAC_CONSTANT_TIME = CKM_NSS + 20 + CKM_NSS_TLS_PRF_GENERAL_SHA256 = CKM_NSS + 21 + CKM_NSS_TLS_MASTER_KEY_DERIVE_SHA256 = CKM_NSS + 22 + CKM_NSS_TLS_KEY_AND_MAC_DERIVE_SHA256 = CKM_NSS + 23 + CKM_NSS_TLS_MASTER_KEY_DERIVE_DH_SHA256 = CKM_NSS + 24 + CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE = CKM_NSS + 25 + CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_DH = CKM_NSS + 26 + CKM_NSS_CHACHA20_KEY_GEN = CKM_NSS + 27 + CKM_NSS_CHACHA20_POLY1305 = CKM_NSS + 28 + CKM_NSS_PKCS12_PBE_SHA224_HMAC_KEY_GEN = CKM_NSS + 29 + CKM_NSS_PKCS12_PBE_SHA256_HMAC_KEY_GEN = CKM_NSS + 30 + CKM_NSS_PKCS12_PBE_SHA384_HMAC_KEY_GEN = CKM_NSS + 31 + CKM_NSS_PKCS12_PBE_SHA512_HMAC_KEY_GEN = CKM_NSS + 32 + CKR_NSS_CERTDB_FAILED = CKR_NSS + 1 + CKR_NSS_KEYDB_FAILED = CKR_NSS + 2 + CKT_NSS_TRUSTED = CKT_NSS + 1 + CKT_NSS_TRUSTED_DELEGATOR = CKT_NSS + 2 + CKT_NSS_MUST_VERIFY_TRUST = CKT_NSS + 3 + CKT_NSS_NOT_TRUSTED = CKT_NSS + 10 + CKT_NSS_TRUST_UNKNOWN = CKT_NSS + 5 +) diff --git a/tools/vendor/github.com/miekg/pkcs11/zconst.go b/tools/vendor/github.com/miekg/pkcs11/zconst.go new file mode 100644 index 000000000..41df5cfcf --- /dev/null +++ b/tools/vendor/github.com/miekg/pkcs11/zconst.go @@ -0,0 +1,766 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "go run const_generate.go"; DO NOT EDIT. + +package pkcs11 + +const ( + CK_TRUE = 1 + CK_FALSE = 0 + CK_UNAVAILABLE_INFORMATION = ^uint(0) + CK_EFFECTIVELY_INFINITE = 0 + CK_INVALID_HANDLE = 0 + CKN_SURRENDER = 0 + CKN_OTP_CHANGED = 1 + CKF_TOKEN_PRESENT = 0x00000001 + CKF_REMOVABLE_DEVICE = 0x00000002 + CKF_HW_SLOT = 0x00000004 + CKF_RNG = 0x00000001 + CKF_WRITE_PROTECTED = 0x00000002 + CKF_LOGIN_REQUIRED = 0x00000004 + CKF_USER_PIN_INITIALIZED = 0x00000008 + CKF_RESTORE_KEY_NOT_NEEDED = 0x00000020 + CKF_CLOCK_ON_TOKEN = 0x00000040 + CKF_PROTECTED_AUTHENTICATION_PATH = 0x00000100 + CKF_DUAL_CRYPTO_OPERATIONS = 0x00000200 + CKF_TOKEN_INITIALIZED = 0x00000400 + CKF_SECONDARY_AUTHENTICATION = 0x00000800 + CKF_USER_PIN_COUNT_LOW = 0x00010000 + CKF_USER_PIN_FINAL_TRY = 0x00020000 + CKF_USER_PIN_LOCKED = 0x00040000 + CKF_USER_PIN_TO_BE_CHANGED = 0x00080000 + CKF_SO_PIN_COUNT_LOW = 0x00100000 + CKF_SO_PIN_FINAL_TRY = 0x00200000 + CKF_SO_PIN_LOCKED = 0x00400000 + CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 + CKF_ERROR_STATE = 0x01000000 + CKU_SO = 0 + CKU_USER = 1 + CKU_CONTEXT_SPECIFIC = 2 + CKS_RO_PUBLIC_SESSION = 0 + CKS_RO_USER_FUNCTIONS = 1 + CKS_RW_PUBLIC_SESSION = 2 + CKS_RW_USER_FUNCTIONS = 3 + CKS_RW_SO_FUNCTIONS = 4 + CKF_RW_SESSION = 0x00000002 + CKF_SERIAL_SESSION = 0x00000004 + CKO_DATA = 0x00000000 + CKO_CERTIFICATE = 0x00000001 + CKO_PUBLIC_KEY = 0x00000002 + CKO_PRIVATE_KEY = 0x00000003 + CKO_SECRET_KEY = 0x00000004 + CKO_HW_FEATURE = 0x00000005 + CKO_DOMAIN_PARAMETERS = 0x00000006 + CKO_MECHANISM = 0x00000007 + CKO_OTP_KEY = 0x00000008 + CKO_VENDOR_DEFINED = 0x80000000 + CKH_MONOTONIC_COUNTER = 0x00000001 + CKH_CLOCK = 0x00000002 + CKH_USER_INTERFACE = 0x00000003 + CKH_VENDOR_DEFINED = 0x80000000 + CKK_RSA = 0x00000000 + CKK_DSA = 0x00000001 + CKK_DH = 0x00000002 + CKK_ECDSA = 0x00000003 // Deprecated + CKK_EC = 0x00000003 + CKK_X9_42_DH = 0x00000004 + CKK_KEA = 0x00000005 + CKK_GENERIC_SECRET = 0x00000010 + CKK_RC2 = 0x00000011 + CKK_RC4 = 0x00000012 + CKK_DES = 0x00000013 + CKK_DES2 = 0x00000014 + CKK_DES3 = 0x00000015 + CKK_CAST = 0x00000016 + CKK_CAST3 = 0x00000017 + CKK_CAST5 = 0x00000018 // Deprecated + CKK_CAST128 = 0x00000018 + CKK_RC5 = 0x00000019 + CKK_IDEA = 0x0000001A + CKK_SKIPJACK = 0x0000001B + CKK_BATON = 0x0000001C + CKK_JUNIPER = 0x0000001D + CKK_CDMF = 0x0000001E + CKK_AES = 0x0000001F + CKK_BLOWFISH = 0x00000020 + CKK_TWOFISH = 0x00000021 + CKK_SECURID = 0x00000022 + CKK_HOTP = 0x00000023 + CKK_ACTI = 0x00000024 + CKK_CAMELLIA = 0x00000025 + CKK_ARIA = 0x00000026 + CKK_MD5_HMAC = 0x00000027 + CKK_SHA_1_HMAC = 0x00000028 + CKK_RIPEMD128_HMAC = 0x00000029 + CKK_RIPEMD160_HMAC = 0x0000002A + CKK_SHA256_HMAC = 0x0000002B + CKK_SHA384_HMAC = 0x0000002C + CKK_SHA512_HMAC = 0x0000002D + CKK_SHA224_HMAC = 0x0000002E + CKK_SEED = 0x0000002F + CKK_GOSTR3410 = 0x00000030 + CKK_GOSTR3411 = 0x00000031 + CKK_GOST28147 = 0x00000032 + CKK_SHA3_224_HMAC = 0x00000033 + CKK_SHA3_256_HMAC = 0x00000034 + CKK_SHA3_384_HMAC = 0x00000035 + CKK_SHA3_512_HMAC = 0x00000036 + CKK_VENDOR_DEFINED = 0x80000000 + CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0 + CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1 + CK_CERTIFICATE_CATEGORY_AUTHORITY = 2 + CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3 + CK_SECURITY_DOMAIN_UNSPECIFIED = 0 + CK_SECURITY_DOMAIN_MANUFACTURER = 1 + CK_SECURITY_DOMAIN_OPERATOR = 2 + CK_SECURITY_DOMAIN_THIRD_PARTY = 3 + CKC_X_509 = 0x00000000 + CKC_X_509_ATTR_CERT = 0x00000001 + CKC_WTLS = 0x00000002 + CKC_VENDOR_DEFINED = 0x80000000 + CKF_ARRAY_ATTRIBUTE = 0x40000000 + CK_OTP_FORMAT_DECIMAL = 0 + CK_OTP_FORMAT_HEXADECIMAL = 1 + CK_OTP_FORMAT_ALPHANUMERIC = 2 + CK_OTP_FORMAT_BINARY = 3 + CK_OTP_PARAM_IGNORED = 0 + CK_OTP_PARAM_OPTIONAL = 1 + CK_OTP_PARAM_MANDATORY = 2 + CKA_CLASS = 0x00000000 + CKA_TOKEN = 0x00000001 + CKA_PRIVATE = 0x00000002 + CKA_LABEL = 0x00000003 + CKA_APPLICATION = 0x00000010 + CKA_VALUE = 0x00000011 + CKA_OBJECT_ID = 0x00000012 + CKA_CERTIFICATE_TYPE = 0x00000080 + CKA_ISSUER = 0x00000081 + CKA_SERIAL_NUMBER = 0x00000082 + CKA_AC_ISSUER = 0x00000083 + CKA_OWNER = 0x00000084 + CKA_ATTR_TYPES = 0x00000085 + CKA_TRUSTED = 0x00000086 + CKA_CERTIFICATE_CATEGORY = 0x00000087 + CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088 + CKA_URL = 0x00000089 + CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A + CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B + CKA_NAME_HASH_ALGORITHM = 0x0000008C + CKA_CHECK_VALUE = 0x00000090 + CKA_KEY_TYPE = 0x00000100 + CKA_SUBJECT = 0x00000101 + CKA_ID = 0x00000102 + CKA_SENSITIVE = 0x00000103 + CKA_ENCRYPT = 0x00000104 + CKA_DECRYPT = 0x00000105 + CKA_WRAP = 0x00000106 + CKA_UNWRAP = 0x00000107 + CKA_SIGN = 0x00000108 + CKA_SIGN_RECOVER = 0x00000109 + CKA_VERIFY = 0x0000010A + CKA_VERIFY_RECOVER = 0x0000010B + CKA_DERIVE = 0x0000010C + CKA_START_DATE = 0x00000110 + CKA_END_DATE = 0x00000111 + CKA_MODULUS = 0x00000120 + CKA_MODULUS_BITS = 0x00000121 + CKA_PUBLIC_EXPONENT = 0x00000122 + CKA_PRIVATE_EXPONENT = 0x00000123 + CKA_PRIME_1 = 0x00000124 + CKA_PRIME_2 = 0x00000125 + CKA_EXPONENT_1 = 0x00000126 + CKA_EXPONENT_2 = 0x00000127 + CKA_COEFFICIENT = 0x00000128 + CKA_PUBLIC_KEY_INFO = 0x00000129 + CKA_PRIME = 0x00000130 + CKA_SUBPRIME = 0x00000131 + CKA_BASE = 0x00000132 + CKA_PRIME_BITS = 0x00000133 + CKA_SUBPRIME_BITS = 0x00000134 + CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS + CKA_VALUE_BITS = 0x00000160 + CKA_VALUE_LEN = 0x00000161 + CKA_EXTRACTABLE = 0x00000162 + CKA_LOCAL = 0x00000163 + CKA_NEVER_EXTRACTABLE = 0x00000164 + CKA_ALWAYS_SENSITIVE = 0x00000165 + CKA_KEY_GEN_MECHANISM = 0x00000166 + CKA_MODIFIABLE = 0x00000170 + CKA_COPYABLE = 0x00000171 + CKA_DESTROYABLE = 0x00000172 + CKA_ECDSA_PARAMS = 0x00000180 // Deprecated + CKA_EC_PARAMS = 0x00000180 + CKA_EC_POINT = 0x00000181 + CKA_SECONDARY_AUTH = 0x00000200 // Deprecated + CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated + CKA_ALWAYS_AUTHENTICATE = 0x00000202 + CKA_WRAP_WITH_TRUSTED = 0x00000210 + CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211) + CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212) + CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213) + CKA_OTP_FORMAT = 0x00000220 + CKA_OTP_LENGTH = 0x00000221 + CKA_OTP_TIME_INTERVAL = 0x00000222 + CKA_OTP_USER_FRIENDLY_MODE = 0x00000223 + CKA_OTP_CHALLENGE_REQUIREMENT = 0x00000224 + CKA_OTP_TIME_REQUIREMENT = 0x00000225 + CKA_OTP_COUNTER_REQUIREMENT = 0x00000226 + CKA_OTP_PIN_REQUIREMENT = 0x00000227 + CKA_OTP_COUNTER = 0x0000022E + CKA_OTP_TIME = 0x0000022F + CKA_OTP_USER_IDENTIFIER = 0x0000022A + CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B + CKA_OTP_SERVICE_LOGO = 0x0000022C + CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D + CKA_GOSTR3410_PARAMS = 0x00000250 + CKA_GOSTR3411_PARAMS = 0x00000251 + CKA_GOST28147_PARAMS = 0x00000252 + CKA_HW_FEATURE_TYPE = 0x00000300 + CKA_RESET_ON_INIT = 0x00000301 + CKA_HAS_RESET = 0x00000302 + CKA_PIXEL_X = 0x00000400 + CKA_PIXEL_Y = 0x00000401 + CKA_RESOLUTION = 0x00000402 + CKA_CHAR_ROWS = 0x00000403 + CKA_CHAR_COLUMNS = 0x00000404 + CKA_COLOR = 0x00000405 + CKA_BITS_PER_PIXEL = 0x00000406 + CKA_CHAR_SETS = 0x00000480 + CKA_ENCODING_METHODS = 0x00000481 + CKA_MIME_TYPES = 0x00000482 + CKA_MECHANISM_TYPE = 0x00000500 + CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 + CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 + CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 + CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600) + CKA_VENDOR_DEFINED = 0x80000000 + CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 + CKM_RSA_PKCS = 0x00000001 + CKM_RSA_9796 = 0x00000002 + CKM_RSA_X_509 = 0x00000003 + CKM_MD2_RSA_PKCS = 0x00000004 + CKM_MD5_RSA_PKCS = 0x00000005 + CKM_SHA1_RSA_PKCS = 0x00000006 + CKM_RIPEMD128_RSA_PKCS = 0x00000007 + CKM_RIPEMD160_RSA_PKCS = 0x00000008 + CKM_RSA_PKCS_OAEP = 0x00000009 + CKM_RSA_X9_31_KEY_PAIR_GEN = 0x0000000A + CKM_RSA_X9_31 = 0x0000000B + CKM_SHA1_RSA_X9_31 = 0x0000000C + CKM_RSA_PKCS_PSS = 0x0000000D + CKM_SHA1_RSA_PKCS_PSS = 0x0000000E + CKM_DSA_KEY_PAIR_GEN = 0x00000010 + CKM_DSA = 0x00000011 + CKM_DSA_SHA1 = 0x00000012 + CKM_DSA_SHA224 = 0x00000013 + CKM_DSA_SHA256 = 0x00000014 + CKM_DSA_SHA384 = 0x00000015 + CKM_DSA_SHA512 = 0x00000016 + CKM_DSA_SHA3_224 = 0x00000018 + CKM_DSA_SHA3_256 = 0x00000019 + CKM_DSA_SHA3_384 = 0x0000001A + CKM_DSA_SHA3_512 = 0x0000001B + CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020 + CKM_DH_PKCS_DERIVE = 0x00000021 + CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030 + CKM_X9_42_DH_DERIVE = 0x00000031 + CKM_X9_42_DH_HYBRID_DERIVE = 0x00000032 + CKM_X9_42_MQV_DERIVE = 0x00000033 + CKM_SHA256_RSA_PKCS = 0x00000040 + CKM_SHA384_RSA_PKCS = 0x00000041 + CKM_SHA512_RSA_PKCS = 0x00000042 + CKM_SHA256_RSA_PKCS_PSS = 0x00000043 + CKM_SHA384_RSA_PKCS_PSS = 0x00000044 + CKM_SHA512_RSA_PKCS_PSS = 0x00000045 + CKM_SHA224_RSA_PKCS = 0x00000046 + CKM_SHA224_RSA_PKCS_PSS = 0x00000047 + CKM_SHA512_224 = 0x00000048 + CKM_SHA512_224_HMAC = 0x00000049 + CKM_SHA512_224_HMAC_GENERAL = 0x0000004A + CKM_SHA512_224_KEY_DERIVATION = 0x0000004B + CKM_SHA512_256 = 0x0000004C + CKM_SHA512_256_HMAC = 0x0000004D + CKM_SHA512_256_HMAC_GENERAL = 0x0000004E + CKM_SHA512_256_KEY_DERIVATION = 0x0000004F + CKM_SHA512_T = 0x00000050 + CKM_SHA512_T_HMAC = 0x00000051 + CKM_SHA512_T_HMAC_GENERAL = 0x00000052 + CKM_SHA512_T_KEY_DERIVATION = 0x00000053 + CKM_SHA3_256_RSA_PKCS = 0x00000060 + CKM_SHA3_384_RSA_PKCS = 0x00000061 + CKM_SHA3_512_RSA_PKCS = 0x00000062 + CKM_SHA3_256_RSA_PKCS_PSS = 0x00000063 + CKM_SHA3_384_RSA_PKCS_PSS = 0x00000064 + CKM_SHA3_512_RSA_PKCS_PSS = 0x00000065 + CKM_SHA3_224_RSA_PKCS = 0x00000066 + CKM_SHA3_224_RSA_PKCS_PSS = 0x00000067 + CKM_RC2_KEY_GEN = 0x00000100 + CKM_RC2_ECB = 0x00000101 + CKM_RC2_CBC = 0x00000102 + CKM_RC2_MAC = 0x00000103 + CKM_RC2_MAC_GENERAL = 0x00000104 + CKM_RC2_CBC_PAD = 0x00000105 + CKM_RC4_KEY_GEN = 0x00000110 + CKM_RC4 = 0x00000111 + CKM_DES_KEY_GEN = 0x00000120 + CKM_DES_ECB = 0x00000121 + CKM_DES_CBC = 0x00000122 + CKM_DES_MAC = 0x00000123 + CKM_DES_MAC_GENERAL = 0x00000124 + CKM_DES_CBC_PAD = 0x00000125 + CKM_DES2_KEY_GEN = 0x00000130 + CKM_DES3_KEY_GEN = 0x00000131 + CKM_DES3_ECB = 0x00000132 + CKM_DES3_CBC = 0x00000133 + CKM_DES3_MAC = 0x00000134 + CKM_DES3_MAC_GENERAL = 0x00000135 + CKM_DES3_CBC_PAD = 0x00000136 + CKM_DES3_CMAC_GENERAL = 0x00000137 + CKM_DES3_CMAC = 0x00000138 + CKM_CDMF_KEY_GEN = 0x00000140 + CKM_CDMF_ECB = 0x00000141 + CKM_CDMF_CBC = 0x00000142 + CKM_CDMF_MAC = 0x00000143 + CKM_CDMF_MAC_GENERAL = 0x00000144 + CKM_CDMF_CBC_PAD = 0x00000145 + CKM_DES_OFB64 = 0x00000150 + CKM_DES_OFB8 = 0x00000151 + CKM_DES_CFB64 = 0x00000152 + CKM_DES_CFB8 = 0x00000153 + CKM_MD2 = 0x00000200 + CKM_MD2_HMAC = 0x00000201 + CKM_MD2_HMAC_GENERAL = 0x00000202 + CKM_MD5 = 0x00000210 + CKM_MD5_HMAC = 0x00000211 + CKM_MD5_HMAC_GENERAL = 0x00000212 + CKM_SHA_1 = 0x00000220 + CKM_SHA_1_HMAC = 0x00000221 + CKM_SHA_1_HMAC_GENERAL = 0x00000222 + CKM_RIPEMD128 = 0x00000230 + CKM_RIPEMD128_HMAC = 0x00000231 + CKM_RIPEMD128_HMAC_GENERAL = 0x00000232 + CKM_RIPEMD160 = 0x00000240 + CKM_RIPEMD160_HMAC = 0x00000241 + CKM_RIPEMD160_HMAC_GENERAL = 0x00000242 + CKM_SHA256 = 0x00000250 + CKM_SHA256_HMAC = 0x00000251 + CKM_SHA256_HMAC_GENERAL = 0x00000252 + CKM_SHA224 = 0x00000255 + CKM_SHA224_HMAC = 0x00000256 + CKM_SHA224_HMAC_GENERAL = 0x00000257 + CKM_SHA384 = 0x00000260 + CKM_SHA384_HMAC = 0x00000261 + CKM_SHA384_HMAC_GENERAL = 0x00000262 + CKM_SHA512 = 0x00000270 + CKM_SHA512_HMAC = 0x00000271 + CKM_SHA512_HMAC_GENERAL = 0x00000272 + CKM_SECURID_KEY_GEN = 0x00000280 + CKM_SECURID = 0x00000282 + CKM_HOTP_KEY_GEN = 0x00000290 + CKM_HOTP = 0x00000291 + CKM_ACTI = 0x000002A0 + CKM_ACTI_KEY_GEN = 0x000002A1 + CKM_SHA3_256 = 0x000002B0 + CKM_SHA3_256_HMAC = 0x000002B1 + CKM_SHA3_256_HMAC_GENERAL = 0x000002B2 + CKM_SHA3_256_KEY_GEN = 0x000002B3 + CKM_SHA3_224 = 0x000002B5 + CKM_SHA3_224_HMAC = 0x000002B6 + CKM_SHA3_224_HMAC_GENERAL = 0x000002B7 + CKM_SHA3_224_KEY_GEN = 0x000002B8 + CKM_SHA3_384 = 0x000002C0 + CKM_SHA3_384_HMAC = 0x000002C1 + CKM_SHA3_384_HMAC_GENERAL = 0x000002C2 + CKM_SHA3_384_KEY_GEN = 0x000002C3 + CKM_SHA3_512 = 0x000002D0 + CKM_SHA3_512_HMAC = 0x000002D1 + CKM_SHA3_512_HMAC_GENERAL = 0x000002D2 + CKM_SHA3_512_KEY_GEN = 0x000002D3 + CKM_CAST_KEY_GEN = 0x00000300 + CKM_CAST_ECB = 0x00000301 + CKM_CAST_CBC = 0x00000302 + CKM_CAST_MAC = 0x00000303 + CKM_CAST_MAC_GENERAL = 0x00000304 + CKM_CAST_CBC_PAD = 0x00000305 + CKM_CAST3_KEY_GEN = 0x00000310 + CKM_CAST3_ECB = 0x00000311 + CKM_CAST3_CBC = 0x00000312 + CKM_CAST3_MAC = 0x00000313 + CKM_CAST3_MAC_GENERAL = 0x00000314 + CKM_CAST3_CBC_PAD = 0x00000315 + CKM_CAST5_KEY_GEN = 0x00000320 + CKM_CAST128_KEY_GEN = 0x00000320 + CKM_CAST5_ECB = 0x00000321 + CKM_CAST128_ECB = 0x00000321 + CKM_CAST5_CBC = 0x00000322 // Deprecated + CKM_CAST128_CBC = 0x00000322 + CKM_CAST5_MAC = 0x00000323 // Deprecated + CKM_CAST128_MAC = 0x00000323 + CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated + CKM_CAST128_MAC_GENERAL = 0x00000324 + CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated + CKM_CAST128_CBC_PAD = 0x00000325 + CKM_RC5_KEY_GEN = 0x00000330 + CKM_RC5_ECB = 0x00000331 + CKM_RC5_CBC = 0x00000332 + CKM_RC5_MAC = 0x00000333 + CKM_RC5_MAC_GENERAL = 0x00000334 + CKM_RC5_CBC_PAD = 0x00000335 + CKM_IDEA_KEY_GEN = 0x00000340 + CKM_IDEA_ECB = 0x00000341 + CKM_IDEA_CBC = 0x00000342 + CKM_IDEA_MAC = 0x00000343 + CKM_IDEA_MAC_GENERAL = 0x00000344 + CKM_IDEA_CBC_PAD = 0x00000345 + CKM_GENERIC_SECRET_KEY_GEN = 0x00000350 + CKM_CONCATENATE_BASE_AND_KEY = 0x00000360 + CKM_CONCATENATE_BASE_AND_DATA = 0x00000362 + CKM_CONCATENATE_DATA_AND_BASE = 0x00000363 + CKM_XOR_BASE_AND_DATA = 0x00000364 + CKM_EXTRACT_KEY_FROM_KEY = 0x00000365 + CKM_SSL3_PRE_MASTER_KEY_GEN = 0x00000370 + CKM_SSL3_MASTER_KEY_DERIVE = 0x00000371 + CKM_SSL3_KEY_AND_MAC_DERIVE = 0x00000372 + CKM_SSL3_MASTER_KEY_DERIVE_DH = 0x00000373 + CKM_TLS_PRE_MASTER_KEY_GEN = 0x00000374 + CKM_TLS_MASTER_KEY_DERIVE = 0x00000375 + CKM_TLS_KEY_AND_MAC_DERIVE = 0x00000376 + CKM_TLS_MASTER_KEY_DERIVE_DH = 0x00000377 + CKM_TLS_PRF = 0x00000378 + CKM_SSL3_MD5_MAC = 0x00000380 + CKM_SSL3_SHA1_MAC = 0x00000381 + CKM_MD5_KEY_DERIVATION = 0x00000390 + CKM_MD2_KEY_DERIVATION = 0x00000391 + CKM_SHA1_KEY_DERIVATION = 0x00000392 + CKM_SHA256_KEY_DERIVATION = 0x00000393 + CKM_SHA384_KEY_DERIVATION = 0x00000394 + CKM_SHA512_KEY_DERIVATION = 0x00000395 + CKM_SHA224_KEY_DERIVATION = 0x00000396 + CKM_SHA3_256_KEY_DERIVE = 0x00000397 + CKM_SHA3_224_KEY_DERIVE = 0x00000398 + CKM_SHA3_384_KEY_DERIVE = 0x00000399 + CKM_SHA3_512_KEY_DERIVE = 0x0000039A + CKM_SHAKE_128_KEY_DERIVE = 0x0000039B + CKM_SHAKE_256_KEY_DERIVE = 0x0000039C + CKM_PBE_MD2_DES_CBC = 0x000003A0 + CKM_PBE_MD5_DES_CBC = 0x000003A1 + CKM_PBE_MD5_CAST_CBC = 0x000003A2 + CKM_PBE_MD5_CAST3_CBC = 0x000003A3 + CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated + CKM_PBE_MD5_CAST128_CBC = 0x000003A4 + CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated + CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 + CKM_PBE_SHA1_RC4_128 = 0x000003A6 + CKM_PBE_SHA1_RC4_40 = 0x000003A7 + CKM_PBE_SHA1_DES3_EDE_CBC = 0x000003A8 + CKM_PBE_SHA1_DES2_EDE_CBC = 0x000003A9 + CKM_PBE_SHA1_RC2_128_CBC = 0x000003AA + CKM_PBE_SHA1_RC2_40_CBC = 0x000003AB + CKM_PKCS5_PBKD2 = 0x000003B0 + CKM_PBA_SHA1_WITH_SHA1_HMAC = 0x000003C0 + CKM_WTLS_PRE_MASTER_KEY_GEN = 0x000003D0 + CKM_WTLS_MASTER_KEY_DERIVE = 0x000003D1 + CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 0x000003D2 + CKM_WTLS_PRF = 0x000003D3 + CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4 + CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5 + CKM_TLS10_MAC_SERVER = 0x000003D6 + CKM_TLS10_MAC_CLIENT = 0x000003D7 + CKM_TLS12_MAC = 0x000003D8 + CKM_TLS12_KDF = 0x000003D9 + CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0 + CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1 + CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2 + CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3 + CKM_TLS_MAC = 0x000003E4 + CKM_TLS_KDF = 0x000003E5 + CKM_KEY_WRAP_LYNKS = 0x00000400 + CKM_KEY_WRAP_SET_OAEP = 0x00000401 + CKM_CMS_SIG = 0x00000500 + CKM_KIP_DERIVE = 0x00000510 + CKM_KIP_WRAP = 0x00000511 + CKM_KIP_MAC = 0x00000512 + CKM_CAMELLIA_KEY_GEN = 0x00000550 + CKM_CAMELLIA_ECB = 0x00000551 + CKM_CAMELLIA_CBC = 0x00000552 + CKM_CAMELLIA_MAC = 0x00000553 + CKM_CAMELLIA_MAC_GENERAL = 0x00000554 + CKM_CAMELLIA_CBC_PAD = 0x00000555 + CKM_CAMELLIA_ECB_ENCRYPT_DATA = 0x00000556 + CKM_CAMELLIA_CBC_ENCRYPT_DATA = 0x00000557 + CKM_CAMELLIA_CTR = 0x00000558 + CKM_ARIA_KEY_GEN = 0x00000560 + CKM_ARIA_ECB = 0x00000561 + CKM_ARIA_CBC = 0x00000562 + CKM_ARIA_MAC = 0x00000563 + CKM_ARIA_MAC_GENERAL = 0x00000564 + CKM_ARIA_CBC_PAD = 0x00000565 + CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566 + CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567 + CKM_SEED_KEY_GEN = 0x00000650 + CKM_SEED_ECB = 0x00000651 + CKM_SEED_CBC = 0x00000652 + CKM_SEED_MAC = 0x00000653 + CKM_SEED_MAC_GENERAL = 0x00000654 + CKM_SEED_CBC_PAD = 0x00000655 + CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656 + CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657 + CKM_SKIPJACK_KEY_GEN = 0x00001000 + CKM_SKIPJACK_ECB64 = 0x00001001 + CKM_SKIPJACK_CBC64 = 0x00001002 + CKM_SKIPJACK_OFB64 = 0x00001003 + CKM_SKIPJACK_CFB64 = 0x00001004 + CKM_SKIPJACK_CFB32 = 0x00001005 + CKM_SKIPJACK_CFB16 = 0x00001006 + CKM_SKIPJACK_CFB8 = 0x00001007 + CKM_SKIPJACK_WRAP = 0x00001008 + CKM_SKIPJACK_PRIVATE_WRAP = 0x00001009 + CKM_SKIPJACK_RELAYX = 0x0000100a + CKM_KEA_KEY_PAIR_GEN = 0x00001010 + CKM_KEA_KEY_DERIVE = 0x00001011 + CKM_KEA_DERIVE = 0x00001012 + CKM_FORTEZZA_TIMESTAMP = 0x00001020 + CKM_BATON_KEY_GEN = 0x00001030 + CKM_BATON_ECB128 = 0x00001031 + CKM_BATON_ECB96 = 0x00001032 + CKM_BATON_CBC128 = 0x00001033 + CKM_BATON_COUNTER = 0x00001034 + CKM_BATON_SHUFFLE = 0x00001035 + CKM_BATON_WRAP = 0x00001036 + CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated + CKM_EC_KEY_PAIR_GEN = 0x00001040 + CKM_ECDSA = 0x00001041 + CKM_ECDSA_SHA1 = 0x00001042 + CKM_ECDSA_SHA224 = 0x00001043 + CKM_ECDSA_SHA256 = 0x00001044 + CKM_ECDSA_SHA384 = 0x00001045 + CKM_ECDSA_SHA512 = 0x00001046 + CKM_ECDH1_DERIVE = 0x00001050 + CKM_ECDH1_COFACTOR_DERIVE = 0x00001051 + CKM_ECMQV_DERIVE = 0x00001052 + CKM_ECDH_AES_KEY_WRAP = 0x00001053 + CKM_RSA_AES_KEY_WRAP = 0x00001054 + CKM_JUNIPER_KEY_GEN = 0x00001060 + CKM_JUNIPER_ECB128 = 0x00001061 + CKM_JUNIPER_CBC128 = 0x00001062 + CKM_JUNIPER_COUNTER = 0x00001063 + CKM_JUNIPER_SHUFFLE = 0x00001064 + CKM_JUNIPER_WRAP = 0x00001065 + CKM_FASTHASH = 0x00001070 + CKM_AES_KEY_GEN = 0x00001080 + CKM_AES_ECB = 0x00001081 + CKM_AES_CBC = 0x00001082 + CKM_AES_MAC = 0x00001083 + CKM_AES_MAC_GENERAL = 0x00001084 + CKM_AES_CBC_PAD = 0x00001085 + CKM_AES_CTR = 0x00001086 + CKM_AES_GCM = 0x00001087 + CKM_AES_CCM = 0x00001088 + CKM_AES_CTS = 0x00001089 + CKM_AES_CMAC = 0x0000108A + CKM_AES_CMAC_GENERAL = 0x0000108B + CKM_AES_XCBC_MAC = 0x0000108C + CKM_AES_XCBC_MAC_96 = 0x0000108D + CKM_AES_GMAC = 0x0000108E + CKM_BLOWFISH_KEY_GEN = 0x00001090 + CKM_BLOWFISH_CBC = 0x00001091 + CKM_TWOFISH_KEY_GEN = 0x00001092 + CKM_TWOFISH_CBC = 0x00001093 + CKM_BLOWFISH_CBC_PAD = 0x00001094 + CKM_TWOFISH_CBC_PAD = 0x00001095 + CKM_DES_ECB_ENCRYPT_DATA = 0x00001100 + CKM_DES_CBC_ENCRYPT_DATA = 0x00001101 + CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102 + CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103 + CKM_AES_ECB_ENCRYPT_DATA = 0x00001104 + CKM_AES_CBC_ENCRYPT_DATA = 0x00001105 + CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200 + CKM_GOSTR3410 = 0x00001201 + CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202 + CKM_GOSTR3410_KEY_WRAP = 0x00001203 + CKM_GOSTR3410_DERIVE = 0x00001204 + CKM_GOSTR3411 = 0x00001210 + CKM_GOSTR3411_HMAC = 0x00001211 + CKM_GOST28147_KEY_GEN = 0x00001220 + CKM_GOST28147_ECB = 0x00001221 + CKM_GOST28147 = 0x00001222 + CKM_GOST28147_MAC = 0x00001223 + CKM_GOST28147_KEY_WRAP = 0x00001224 + CKM_DSA_PARAMETER_GEN = 0x00002000 + CKM_DH_PKCS_PARAMETER_GEN = 0x00002001 + CKM_X9_42_DH_PARAMETER_GEN = 0x00002002 + CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003 + CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004 + CKM_AES_OFB = 0x00002104 + CKM_AES_CFB64 = 0x00002105 + CKM_AES_CFB8 = 0x00002106 + CKM_AES_CFB128 = 0x00002107 + CKM_AES_CFB1 = 0x00002108 + CKM_AES_KEY_WRAP = 0x00002109 + CKM_AES_KEY_WRAP_PAD = 0x0000210A + CKM_RSA_PKCS_TPM_1_1 = 0x00004001 + CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002 + CKM_VENDOR_DEFINED = 0x80000000 + CKF_HW = 0x00000001 + CKF_ENCRYPT = 0x00000100 + CKF_DECRYPT = 0x00000200 + CKF_DIGEST = 0x00000400 + CKF_SIGN = 0x00000800 + CKF_SIGN_RECOVER = 0x00001000 + CKF_VERIFY = 0x00002000 + CKF_VERIFY_RECOVER = 0x00004000 + CKF_GENERATE = 0x00008000 + CKF_GENERATE_KEY_PAIR = 0x00010000 + CKF_WRAP = 0x00020000 + CKF_UNWRAP = 0x00040000 + CKF_DERIVE = 0x00080000 + CKF_EC_F_P = 0x00100000 + CKF_EC_F_2M = 0x00200000 + CKF_EC_ECPARAMETERS = 0x00400000 + CKF_EC_NAMEDCURVE = 0x00800000 + CKF_EC_UNCOMPRESS = 0x01000000 + CKF_EC_COMPRESS = 0x02000000 + CKF_EXTENSION = 0x80000000 + CKR_OK = 0x00000000 + CKR_CANCEL = 0x00000001 + CKR_HOST_MEMORY = 0x00000002 + CKR_SLOT_ID_INVALID = 0x00000003 + CKR_GENERAL_ERROR = 0x00000005 + CKR_FUNCTION_FAILED = 0x00000006 + CKR_ARGUMENTS_BAD = 0x00000007 + CKR_NO_EVENT = 0x00000008 + CKR_NEED_TO_CREATE_THREADS = 0x00000009 + CKR_CANT_LOCK = 0x0000000A + CKR_ATTRIBUTE_READ_ONLY = 0x00000010 + CKR_ATTRIBUTE_SENSITIVE = 0x00000011 + CKR_ATTRIBUTE_TYPE_INVALID = 0x00000012 + CKR_ATTRIBUTE_VALUE_INVALID = 0x00000013 + CKR_ACTION_PROHIBITED = 0x0000001B + CKR_DATA_INVALID = 0x00000020 + CKR_DATA_LEN_RANGE = 0x00000021 + CKR_DEVICE_ERROR = 0x00000030 + CKR_DEVICE_MEMORY = 0x00000031 + CKR_DEVICE_REMOVED = 0x00000032 + CKR_ENCRYPTED_DATA_INVALID = 0x00000040 + CKR_ENCRYPTED_DATA_LEN_RANGE = 0x00000041 + CKR_FUNCTION_CANCELED = 0x00000050 + CKR_FUNCTION_NOT_PARALLEL = 0x00000051 + CKR_FUNCTION_NOT_SUPPORTED = 0x00000054 + CKR_KEY_HANDLE_INVALID = 0x00000060 + CKR_KEY_SIZE_RANGE = 0x00000062 + CKR_KEY_TYPE_INCONSISTENT = 0x00000063 + CKR_KEY_NOT_NEEDED = 0x00000064 + CKR_KEY_CHANGED = 0x00000065 + CKR_KEY_NEEDED = 0x00000066 + CKR_KEY_INDIGESTIBLE = 0x00000067 + CKR_KEY_FUNCTION_NOT_PERMITTED = 0x00000068 + CKR_KEY_NOT_WRAPPABLE = 0x00000069 + CKR_KEY_UNEXTRACTABLE = 0x0000006A + CKR_MECHANISM_INVALID = 0x00000070 + CKR_MECHANISM_PARAM_INVALID = 0x00000071 + CKR_OBJECT_HANDLE_INVALID = 0x00000082 + CKR_OPERATION_ACTIVE = 0x00000090 + CKR_OPERATION_NOT_INITIALIZED = 0x00000091 + CKR_PIN_INCORRECT = 0x000000A0 + CKR_PIN_INVALID = 0x000000A1 + CKR_PIN_LEN_RANGE = 0x000000A2 + CKR_PIN_EXPIRED = 0x000000A3 + CKR_PIN_LOCKED = 0x000000A4 + CKR_SESSION_CLOSED = 0x000000B0 + CKR_SESSION_COUNT = 0x000000B1 + CKR_SESSION_HANDLE_INVALID = 0x000000B3 + CKR_SESSION_PARALLEL_NOT_SUPPORTED = 0x000000B4 + CKR_SESSION_READ_ONLY = 0x000000B5 + CKR_SESSION_EXISTS = 0x000000B6 + CKR_SESSION_READ_ONLY_EXISTS = 0x000000B7 + CKR_SESSION_READ_WRITE_SO_EXISTS = 0x000000B8 + CKR_SIGNATURE_INVALID = 0x000000C0 + CKR_SIGNATURE_LEN_RANGE = 0x000000C1 + CKR_TEMPLATE_INCOMPLETE = 0x000000D0 + CKR_TEMPLATE_INCONSISTENT = 0x000000D1 + CKR_TOKEN_NOT_PRESENT = 0x000000E0 + CKR_TOKEN_NOT_RECOGNIZED = 0x000000E1 + CKR_TOKEN_WRITE_PROTECTED = 0x000000E2 + CKR_UNWRAPPING_KEY_HANDLE_INVALID = 0x000000F0 + CKR_UNWRAPPING_KEY_SIZE_RANGE = 0x000000F1 + CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT = 0x000000F2 + CKR_USER_ALREADY_LOGGED_IN = 0x00000100 + CKR_USER_NOT_LOGGED_IN = 0x00000101 + CKR_USER_PIN_NOT_INITIALIZED = 0x00000102 + CKR_USER_TYPE_INVALID = 0x00000103 + CKR_USER_ANOTHER_ALREADY_LOGGED_IN = 0x00000104 + CKR_USER_TOO_MANY_TYPES = 0x00000105 + CKR_WRAPPED_KEY_INVALID = 0x00000110 + CKR_WRAPPED_KEY_LEN_RANGE = 0x00000112 + CKR_WRAPPING_KEY_HANDLE_INVALID = 0x00000113 + CKR_WRAPPING_KEY_SIZE_RANGE = 0x00000114 + CKR_WRAPPING_KEY_TYPE_INCONSISTENT = 0x00000115 + CKR_RANDOM_SEED_NOT_SUPPORTED = 0x00000120 + CKR_RANDOM_NO_RNG = 0x00000121 + CKR_DOMAIN_PARAMS_INVALID = 0x00000130 + CKR_CURVE_NOT_SUPPORTED = 0x00000140 + CKR_BUFFER_TOO_SMALL = 0x00000150 + CKR_SAVED_STATE_INVALID = 0x00000160 + CKR_INFORMATION_SENSITIVE = 0x00000170 + CKR_STATE_UNSAVEABLE = 0x00000180 + CKR_CRYPTOKI_NOT_INITIALIZED = 0x00000190 + CKR_CRYPTOKI_ALREADY_INITIALIZED = 0x00000191 + CKR_MUTEX_BAD = 0x000001A0 + CKR_MUTEX_NOT_LOCKED = 0x000001A1 + CKR_NEW_PIN_MODE = 0x000001B0 + CKR_NEXT_OTP = 0x000001B1 + CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5 + CKR_FIPS_SELF_TEST_FAILED = 0x000001B6 + CKR_LIBRARY_LOAD_FAILED = 0x000001B7 + CKR_PIN_TOO_WEAK = 0x000001B8 + CKR_PUBLIC_KEY_INVALID = 0x000001B9 + CKR_FUNCTION_REJECTED = 0x00000200 + CKR_VENDOR_DEFINED = 0x80000000 + CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 + CKF_OS_LOCKING_OK = 0x00000002 + CKF_DONT_BLOCK = 1 + CKG_MGF1_SHA1 = 0x00000001 + CKG_MGF1_SHA256 = 0x00000002 + CKG_MGF1_SHA384 = 0x00000003 + CKG_MGF1_SHA512 = 0x00000004 + CKG_MGF1_SHA224 = 0x00000005 + CKZ_DATA_SPECIFIED = 0x00000001 + CKD_NULL = 0x00000001 + CKD_SHA1_KDF = 0x00000002 + CKD_SHA1_KDF_ASN1 = 0x00000003 + CKD_SHA1_KDF_CONCATENATE = 0x00000004 + CKD_SHA224_KDF = 0x00000005 + CKD_SHA256_KDF = 0x00000006 + CKD_SHA384_KDF = 0x00000007 + CKD_SHA512_KDF = 0x00000008 + CKD_CPDIVERSIFY_KDF = 0x00000009 + CKD_SHA3_224_KDF = 0x0000000A + CKD_SHA3_256_KDF = 0x0000000B + CKD_SHA3_384_KDF = 0x0000000C + CKD_SHA3_512_KDF = 0x0000000D + CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001 + CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002 + CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003 + CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004 + CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005 + CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006 + CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007 + CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008 + CKZ_SALT_SPECIFIED = 0x00000001 + CK_OTP_VALUE = 0 + CK_OTP_PIN = 1 + CK_OTP_CHALLENGE = 2 + CK_OTP_TIME = 3 + CK_OTP_COUNTER = 4 + CK_OTP_FLAGS = 5 + CK_OTP_OUTPUT_LENGTH = 6 + CK_OTP_OUTPUT_FORMAT = 7 + CKF_NEXT_OTP = 0x00000001 + CKF_EXCLUDE_TIME = 0x00000002 + CKF_EXCLUDE_COUNTER = 0x00000004 + CKF_EXCLUDE_CHALLENGE = 0x00000008 + CKF_EXCLUDE_PIN = 0x00000010 + CKF_USER_FRIENDLY_OTP = 0x00000020 +) diff --git a/tools/vendor/github.com/moby/sys/sequential/sequential_unix.go b/tools/vendor/github.com/moby/sys/sequential/sequential_unix.go index a3c7340e3..278cdfb07 100644 --- a/tools/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ b/tools/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -5,41 +5,22 @@ package sequential import "os" -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is an alias for [os.Create] on non-Windows platforms. func Create(name string) (*os.File, error) { return os.Create(name) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is an alias for [os.Open] on non-Windows platforms. func Open(name string) (*os.File, error) { return os.Open(name) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. +// OpenFile is an alias for [os.OpenFile] on non-Windows platforms. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is an alias for [os.CreateTemp] on non-Windows platforms. func CreateTemp(dir, prefix string) (f *os.File, err error) { return os.CreateTemp(dir, prefix) } diff --git a/tools/vendor/github.com/moby/sys/sequential/sequential_windows.go b/tools/vendor/github.com/moby/sys/sequential/sequential_windows.go index 3f7f0d83e..3500ecc68 100644 --- a/tools/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ b/tools/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -5,48 +5,52 @@ import ( "path/filepath" "strconv" "sync" - "syscall" "time" "unsafe" "golang.org/x/sys/windows" ) -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is a copy of [os.Create], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) + return openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is a copy of [os.Open], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) + return openFileSequential(name, windows.O_RDONLY) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. +// OpenFile is a copy of [os.OpenFile], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} + return openFileSequential(name, flag) } -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) +func openFileSequential(name string, flag int) (file *os.File, err error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: windows.ERROR_FILE_NOT_FOUND} + } + r, e := openSequential(name, flag|windows.O_CLOEXEC) if e != nil { - return nil, e + return nil, &os.PathError{Op: "open", Path: name, Err: e} } return os.NewFile(uintptr(r), name), nil } @@ -58,7 +62,7 @@ func makeInheritSa() *windows.SecurityAttributes { return &sa } -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { +func openSequential(path string, mode int) (fd windows.Handle, err error) { if len(path) == 0 { return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } @@ -101,15 +105,16 @@ func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err err createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_FLAG_SEQUENTIAL_SCAN, 0) return h, e } // Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex +var ( + rand uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -127,17 +132,13 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is a copy of [os.CreateTemp], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func CreateTemp(dir, prefix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() @@ -146,7 +147,7 @@ func CreateTemp(dir, prefix string) (f *os.File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + f, err = openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_EXCL) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() diff --git a/tools/vendor/github.com/moby/sys/user/idtools.go b/tools/vendor/github.com/moby/sys/user/idtools.go new file mode 100644 index 000000000..595b7a927 --- /dev/null +++ b/tools/vendor/github.com/moby/sys/user/idtools.go @@ -0,0 +1,141 @@ +package user + +import ( + "fmt" + "os" +) + +// MkdirOpt is a type for options to pass to Mkdir calls +type MkdirOpt func(*mkdirOptions) + +type mkdirOptions struct { + onlyNew bool +} + +// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions +// on newly created directories. If the directory already exists, it will not be modified +func WithOnlyNew(o *mkdirOptions) { + o.onlyNew = true +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. By default, if the directory already exists, this +// function will still change ownership and permissions. If WithOnlyNew is passed as an +// option, then only the newly created directories will have ownership and permissions changed. +func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + + return mkdirAs(path, mode, uid, gid, true, options.onlyNew) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// By default, if the directory already exists, this function still changes ownership and permissions. +// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership +// and permissions changed. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + return mkdirAs(path, mode, uid, gid, false, options.onlyNew) +} + +// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) { + contID := int(m.ID + (int64(hostID) - m.ParentID)) + return contID, nil + } + } + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) { + hostID := int(m.ParentID + (int64(contID) - m.ID)) + return hostID, nil + } + } + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) +} + +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i IdentityMapping) RootPair() (int, int) { + uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) + return uid, gid +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) { + var err error + ruid, rgid := i.RootPair() + + if uid != ruid { + ruid, err = toHost(uid, i.UIDMaps) + if err != nil { + return ruid, rgid, err + } + } + + if gid != rgid { + rgid, err = toHost(gid, i.GIDMaps) + } + return ruid, rgid, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) { + ruid, err := toContainer(uid, i.UIDMaps) + if err != nil { + return -1, -1, err + } + rgid, err := toContainer(gid, i.GIDMaps) + return ruid, rgid, err +} + +// Empty returns true if there are no id mappings +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 +} diff --git a/tools/vendor/github.com/moby/sys/user/idtools_unix.go b/tools/vendor/github.com/moby/sys/user/idtools_unix.go new file mode 100644 index 000000000..4e39d2446 --- /dev/null +++ b/tools/vendor/github.com/moby/sys/user/idtools_unix.go @@ -0,0 +1,143 @@ +//go:build !windows + +package user + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "syscall" +) + +func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if onlyNew { + return nil + } + + // short-circuit -- we were called with an existing directory and chown was requested + return setPermissions(path, mode, uid, gid, stat) + } + + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If onlyNew is true, we won't + // chown the full directory path if it exists + var paths []string + if os.IsNotExist(err) { + paths = append(paths, path) + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err = os.Stat(dirPath); os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err = os.MkdirAll(path, mode); err != nil { + return err + } + } else if err = os.Mkdir(path, mode); err != nil { + return err + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil { + return err + } + } + return nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error { + if stat == nil { + var err error + stat, err = os.Stat(p) + if err != nil { + return err + } + } + if stat.Mode().Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + ssi := stat.Sys().(*syscall.Stat_t) + if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + // TODO: Consider adding support for calling out to "getent" + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err) + } + + subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubRangesFile(path string, usr User) ([]IDMap, error) { + uidstr := strconv.Itoa(usr.Uid) + rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool { + return sid.Name == usr.Name || sid.Name == uidstr + }) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) + } + + idMap := []IDMap{} + + var containerID int64 + for _, idrange := range rangeList { + idMap = append(idMap, IDMap{ + ID: containerID, + ParentID: idrange.SubID, + Count: idrange.Count, + }) + containerID = containerID + idrange.Count + } + return idMap, nil +} diff --git a/tools/vendor/github.com/moby/sys/user/idtools_windows.go b/tools/vendor/github.com/moby/sys/user/idtools_windows.go new file mode 100644 index 000000000..9de730caf --- /dev/null +++ b/tools/vendor/github.com/moby/sys/user/idtools_windows.go @@ -0,0 +1,13 @@ +package user + +import ( + "os" +) + +// This is currently a wrapper around [os.MkdirAll] since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error { + return os.MkdirAll(path, 0) +} diff --git a/tools/vendor/github.com/moby/term/term_unix.go b/tools/vendor/github.com/moby/term/term_unix.go index 2ec7706a1..579ce5530 100644 --- a/tools/vendor/github.com/moby/term/term_unix.go +++ b/tools/vendor/github.com/moby/term/term_unix.go @@ -81,7 +81,7 @@ func setRawTerminal(fd uintptr) (*State, error) { return makeRaw(fd) } -func setRawTerminalOutput(fd uintptr) (*State, error) { +func setRawTerminalOutput(uintptr) (*State, error) { return nil, nil } diff --git a/tools/vendor/github.com/onsi/gomega/types/types.go b/tools/vendor/github.com/onsi/gomega/types/types.go index 30f2beed3..685a46f37 100644 --- a/tools/vendor/github.com/onsi/gomega/types/types.go +++ b/tools/vendor/github.com/onsi/gomega/types/types.go @@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int) // A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) } -// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers +// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers type Gomega interface { - Ω(actual interface{}, extra ...interface{}) Assertion - Expect(actual interface{}, extra ...interface{}) Assertion - ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion + Ω(actual any, extra ...any) Assertion + Expect(actual any, extra ...any) Assertion + ExpectWithOffset(offset int, actual any, extra ...any) Assertion - Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx any, args ...any) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion - Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx any, args ...any) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -37,9 +37,9 @@ type Gomega interface { // // For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } /* @@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. */ type OracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool + MatchMayChangeInTheFuture(actual any) bool } -func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { +func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool { oracleMatcher, ok := matcher.(OracleMatcher) if !ok { return true @@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { // AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure // they are eventually satisfied type AsyncAssertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool + + // equivalent to above + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion @@ -76,18 +81,18 @@ type AsyncAssertion interface { Within(timeout time.Duration) AsyncAssertion ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion - WithArguments(argsToForward ...interface{}) AsyncAssertion + WithArguments(argsToForward ...any) AsyncAssertion MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers type Assertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool - To(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) Assertion diff --git a/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 7069ae44d..c3897c7ca 100644 --- a/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/tools/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index d1236ba72..1aa0693b5 100644 --- a/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -83,7 +83,7 @@ type Process struct { // Rlimits specifies rlimit options to apply to the process. Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux,zos"` // ApparmorProfile specifies the apparmor profile for the container. ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. @@ -94,10 +94,12 @@ type Process struct { SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` // IOPriority contains the I/O priority settings for the cgroup. IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` + // ExecCPUAffinity specifies CPU affinity for exec processes. + ExecCPUAffinity *CPUAffinity `json:"execCPUAffinity,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// http://man7.org/linux/man-pages/man7/capabilities.7.html +// https://man7.org/linux/man-pages/man7/capabilities.7.html type LinuxCapabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string `json:"bounding,omitempty" platform:"linux"` @@ -127,6 +129,12 @@ const ( IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" ) +// CPUAffinity specifies process' CPU affinity. +type CPUAffinity struct { + Initial string `json:"initial,omitempty"` + Final string `json:"final,omitempty"` +} + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -627,6 +635,17 @@ type WindowsCPUResources struct { // cycles per 10,000 cycles. Set processor `maximum` to a percentage times // 100. Maximum *uint16 `json:"maximum,omitempty"` + // Set of CPUs to affinitize for this container. + Affinity []WindowsCPUGroupAffinity `json:"affinity,omitempty"` +} + +// Similar to _GROUP_AFFINITY struct defined in +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/miniport/ns-miniport-_group_affinity +type WindowsCPUGroupAffinity struct { + // CPU mask relative to this CPU group. + Mask uint64 `json:"mask,omitempty"` + // Processor group the mask refers to, as returned by GetLogicalProcessorInformationEx. + Group uint32 `json:"group,omitempty"` } // WindowsStorageResources contains storage resource management settings. @@ -751,6 +770,10 @@ const ( ArchPARISC Arch = "SCMP_ARCH_PARISC" ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" + ArchLOONGARCH64 Arch = "SCMP_ARCH_LOONGARCH64" + ArchM68K Arch = "SCMP_ARCH_M68K" + ArchSH Arch = "SCMP_ARCH_SH" + ArchSHEB Arch = "SCMP_ARCH_SHEB" ) // LinuxSeccompAction taken upon Seccomp rule match @@ -826,28 +849,33 @@ type LinuxIntelRdt struct { // ZOS contains platform-specific configuration for z/OS based containers. type ZOS struct { - // Devices are a list of device nodes that are created for the container - Devices []ZOSDevice `json:"devices,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []ZOSNamespace `json:"namespaces,omitempty"` } -// ZOSDevice represents the mknod information for a z/OS special device file -type ZOSDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` +// ZOSNamespace is the configuration for a z/OS namespace +type ZOSNamespace struct { + // Type is the type of namespace + Type ZOSNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` } +// ZOSNamespaceType is one of the z/OS namespaces +type ZOSNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + ZOSPIDNamespace ZOSNamespaceType = "pid" + // MountNamespace for isolating mount points + ZOSMountNamespace ZOSNamespaceType = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + ZOSIPCNamespace ZOSNamespaceType = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + ZOSUTSNamespace ZOSNamespaceType = "uts" +) + // LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler type LinuxSchedulerPolicy string diff --git a/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 503971e05..23234a9c5 100644 --- a/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/tools/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go index 31b70bf77..3b223c969 100644 --- a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go +++ b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go @@ -28,5 +28,5 @@ var ( // and release process, this variable will be removed. // TODO: find a way to make this automated. For now manually update this before releases. - ImageVersion = "v1.37.2" + ImageVersion = "v1.42.0" ) diff --git a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/makefile.go b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/makefile.go index 08b20e511..279ccb14a 100644 --- a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/makefile.go +++ b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/ansible/v1/scaffolds/internal/templates/makefile.go @@ -119,13 +119,17 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform ##@ Deployment +ifndef ignore-not-found + ignore-not-found = false +endif + .PHONY: install install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - .PHONY: uninstall uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - + $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. @@ -134,7 +138,7 @@ deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/c .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/default | kubectl delete -f - + $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') diff --git a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/util/cleanup.go b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/util/cleanup.go index 65f890dd2..9d316f639 100644 --- a/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/util/cleanup.go +++ b/tools/vendor/github.com/operator-framework/ansible-operator-plugins/pkg/plugins/util/cleanup.go @@ -69,116 +69,227 @@ func UpdateKustomizationsInit() error { return fmt.Errorf("remove %s resources: %v", defaultKFile, err) } + if err := kbutil.ReplaceInFile(defaultKFile, + `# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment`, ""); err != nil { + return fmt.Errorf("remove %s resources: %v", defaultKFile, err) + } + + // Remove the file not used for Helm projects since we do not scaffold the cert-manager + certPatchPath := filepath.Join("config", "default", "cert_metrics_manager_patch.yaml") + if err := os.Remove(certPatchPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove unused file %s: %v", certPatchPath, err) + } + + // Remove the file not used for Helm projects since we do not scaffold the cert-manager + monitorTLSPatchPath := filepath.Join("config", "prometheus", "monitor_tls_patch.yaml") + if err := os.Remove(monitorTLSPatchPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove unused file %s: %v", monitorTLSPatchPath, err) + } + + prometheusKustomize := filepath.Join("config", "prometheus", "kustomization.yaml") + if err := kbutil.ReplaceInFile(prometheusKustomize, + `# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor`, ""); err != nil { + return fmt.Errorf("remove %s resources: %v", defaultKFile, err) + } + if err := kbutil.ReplaceInFile(defaultKFile, ` # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- path: manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -#- path: webhookcainjection_patch.yaml +# target: +# kind: Deployment # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. # Uncomment the following replacements to add the cert-manager CA injection annotations #replacements: -# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.namespace # namespace of the certificate CR -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - source: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.name -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - source: # Add cert-manager annotation to the webhook Service -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.name # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 0 -# create: true -# - source: -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.namespace # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 1 -# create: true +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true +# +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname `, ""); err != nil { return fmt.Errorf("remove %s patch and vars blocks: %v", defaultKFile, err) } diff --git a/tools/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go b/tools/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go index 94f14a22a..397190a6a 100644 --- a/tools/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go +++ b/tools/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go @@ -209,7 +209,7 @@ func loadBundle(csvName string, dir string) (*Bundle, error) { } defer fileReader.Close() - decoder := yaml.NewYAMLOrJSONDecoder(fileReader, 30) + decoder := yaml.NewYAMLToJSONDecoder(fileReader) obj := &unstructured.Unstructured{} if err = decoder.Decode(obj); err != nil { errs = append(errs, fmt.Errorf("unable to decode object: %s", err)) diff --git a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go index b5f5e3b7e..8386b2032 100644 --- a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go +++ b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -134,7 +134,7 @@ type GrpcPodConfig struct { // specified. Specifying a value other than `legacy` or `restricted` result in a validation error. When using older // catalog images, which can not run in `restricted` mode, the SecurityContextConfig should be set to `legacy`. // - // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/' + // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/ // +optional // +kubebuilder:validation:Enum=legacy;restricted SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"` @@ -163,8 +163,8 @@ type GrpcPodConfig struct { // ExtractContentConfig configures context extraction from a file-based catalog index image. type ExtractContentConfig struct { - // CacheDir is the directory storing the pre-calculated API cache. - CacheDir string `json:"cacheDir"` + // CacheDir is the (optional) directory storing the pre-calculated API cache. + CacheDir string `json:"cacheDir,omitempty"` // CatalogDir is the directory storing the file-based catalog contents. CatalogDir string `json:"catalogDir"` } diff --git a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go index 09deba525..3b1b0feed 100644 --- a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go +++ b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go @@ -277,6 +277,8 @@ type BundleLookup struct { CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"` // Conditions represents the overall state of a BundleLookup. // +optional + // +patchMergeKey=type + // +patchStrategy=merge Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // The effective properties of the unpacked bundle. // +optional diff --git a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go index 292fedf9b..7aa854f59 100644 --- a/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go +++ b/tools/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -232,6 +232,8 @@ type SubscriptionStatus struct { // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status. // It is used to determine SubscriptionStatusConditions related to CatalogSources. // +optional + // +patchMergeKey= + // +patchStrategy=merge CatalogHealth []SubscriptionCatalogHealth `json:"catalogHealth,omitempty"` // Conditions is a list of the latest available observations about a Subscription's current state. diff --git a/tools/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go b/tools/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go index a533ffe98..7207400e6 100644 --- a/tools/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go +++ b/tools/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go @@ -513,7 +513,7 @@ func (data *multiArchValidator) checkNodeAffinity(images map[string][]platform) if !imagePlatformDataValid { // Node affinity info is missing from CSV (or invalid) data.warns = append(data.warns, - fmt.Errorf("check if the CSV is missing a node affinity configuration for the image: %q. "+ + fmt.Errorf("check if the CSV is missing a node affinity configuration for the image: %q. ", image, )) } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/action/render.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/action/render.go index 07631b7c4..a124c0f8a 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/action/render.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/action/render.go @@ -22,9 +22,8 @@ import ( "github.com/operator-framework/operator-registry/alpha/property" "github.com/operator-framework/operator-registry/pkg/containertools" "github.com/operator-framework/operator-registry/pkg/image" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" "github.com/operator-framework/operator-registry/pkg/lib/bundle" - "github.com/operator-framework/operator-registry/pkg/lib/log" "github.com/operator-framework/operator-registry/pkg/registry" "github.com/operator-framework/operator-registry/pkg/sqlite" ) @@ -66,14 +65,17 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { logDeprecationMessage.Do(func() {}) } if r.Registry == nil { - reg, err := r.createRegistry() + reg, err := containersimageregistry.NewDefault() if err != nil { return nil, fmt.Errorf("create registry: %v", err) } - defer reg.Destroy() + defer func() { + _ = reg.Destroy() + }() r.Registry = reg } + // nolint:prealloc var cfgs []declcfg.DeclarativeConfig for _, ref := range r.Refs { cfg, err := r.renderReference(ctx, ref) @@ -98,31 +100,12 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { return combineConfigs(cfgs), nil } -func (r Render) createRegistry() (*containerdregistry.Registry, error) { - cacheDir, err := os.MkdirTemp("", "render-registry-") - if err != nil { - return nil, fmt.Errorf("create tempdir: %v", err) - } - - reg, err := containerdregistry.NewRegistry( - containerdregistry.WithCacheDir(cacheDir), - - // The containerd registry impl is somewhat verbose, even on the happy path, - // so discard all logger logs. Any important failures will be returned from - // registry methods and eventually logged as fatal errors. - containerdregistry.WithLog(log.Null()), - ) - if err != nil { - return nil, err - } - return reg, nil -} - func (r Render) renderReference(ctx context.Context, ref string) (*declcfg.DeclarativeConfig, error) { stat, err := os.Stat(ref) if err != nil { return r.imageToDeclcfg(ctx, ref) } + // nolint:nestif if stat.IsDir() { dirEntries, err := os.ReadDir(ref) if err != nil { @@ -178,6 +161,7 @@ func (r Render) imageToDeclcfg(ctx context.Context, imageRef string) (*declcfg.D } var cfg *declcfg.DeclarativeConfig + // nolint:nestif if dbFile, ok := labels[containertools.DbLocationLabel]; ok { if !r.AllowedRefMask.Allowed(RefSqliteImage) { return nil, fmt.Errorf("cannot render sqlite image: %w", ErrNotAllowed) @@ -279,6 +263,7 @@ func populateDBRelatedImages(ctx context.Context, cfg *declcfg.DeclarativeConfig } defer rows.Close() + // nolint:staticcheck images := map[string]sets.String{} for rows.Next() { var ( @@ -326,10 +311,10 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { return nil, fmt.Errorf("get related images for bundle %q: %v", bundle.Name, err) } - var csvJson []byte + var csvJSON []byte for _, obj := range bundle.Objects { if obj.GetKind() == "ClusterServiceVersion" { - csvJson, err = json.Marshal(obj) + csvJSON, err = json.Marshal(obj) if err != nil { return nil, fmt.Errorf("marshal CSV JSON for bundle %q: %v", bundle.Name, err) } @@ -344,7 +329,7 @@ func bundleToDeclcfg(bundle *registry.Bundle) (*declcfg.Bundle, error) { Properties: props, RelatedImages: relatedImages, Objects: objs, - CsvJSON: string(csvJson), + CsvJSON: string(csvJSON), }, nil } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg.go index 7797baa49..9e4f752ee 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg.go @@ -6,13 +6,12 @@ import ( "errors" "fmt" - prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" - "golang.org/x/text/cases" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "github.com/operator-framework/operator-registry/alpha/property" + prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" ) const ( diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg_to_model.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg_to_model.go index 2657efb16..342cab403 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg_to_model.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/declcfg_to_model.go @@ -178,7 +178,6 @@ func ConvertToModel(cfg DeclarativeConfig) (model.Model, error) { deprecationsByPackage := sets.New[string]() for i, deprecation := range cfg.Deprecations { - // no need to validate schema, since it could not be unmarshaled if missing/invalid if deprecation.Package == "" { @@ -246,6 +245,7 @@ func ConvertToModel(cfg DeclarativeConfig) (model.Model, error) { } func relatedImagesToModelRelatedImages(in []RelatedImage) []model.RelatedImage { + // nolint:prealloc var out []model.RelatedImage for _, p := range in { out = append(out, model.RelatedImage{ diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go index f811b3145..5db111b87 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go @@ -174,7 +174,7 @@ func sendPaths(ctx context.Context, root fs.FS, pathChan chan<- string) error { }) } -func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, options LoadOptions) error { +func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, walkFn WalkMetasFSFunc, _ LoadOptions) error { for { select { case <-ctx.Done(): // don't block on receiving from pathChan @@ -183,15 +183,20 @@ func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, wal if !ok { return nil } - file, err := root.Open(path) + err := func() error { // using closure to ensure file is closed immediately after use + file, err := root.Open(path) + if err != nil { + return err + } + defer file.Close() + + return WalkMetasReader(file, func(meta *Meta, err error) error { + return walkFn(path, meta, err) + }) + }() if err != nil { return err } - if err := WalkMetasReader(file, func(meta *Meta, err error) error { - return walkFn(path, meta, err) - }); err != nil { - return err - } } } } @@ -205,11 +210,11 @@ func readBundleObjects(b *Bundle) error { if err := json.Unmarshal(props.Value, &obj); err != nil { return fmt.Errorf("package %q, bundle %q: parse property at index %d as bundle object: %v", b.Package, b.Name, i, err) } - objJson, err := yaml.ToJSON(obj.Data) + objJSON, err := yaml.ToJSON(obj.Data) if err != nil { return fmt.Errorf("package %q, bundle %q: convert bundle object property at index %d to JSON: %v", b.Package, b.Name, i, err) } - b.Objects = append(b.Objects, string(objJson)) + b.Objects = append(b.Objects, string(objJSON)) } b.CsvJSON = extractCSV(b.Objects) return nil diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go index 14424d9f0..fabb0d0d2 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go @@ -103,6 +103,7 @@ func traverseModelChannels(mpkg model.Package) ([]Channel, []Bundle) { channels = append(channels, c) } + // nolint:prealloc var bundles []Bundle for _, b := range bundleMap { b.Properties = property.Deduplicate(b.Properties) @@ -120,6 +121,7 @@ func traverseModelChannels(mpkg model.Package) ([]Channel, []Bundle) { } func ModelRelatedImagesToRelatedImages(relatedImages []model.RelatedImage) []RelatedImage { + // nolint:prealloc var out []RelatedImage for _, ri := range relatedImages { out = append(out, RelatedImage{ diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go index 9856c2e1e..6a0451a26 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go @@ -5,8 +5,10 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "path/filepath" + "slices" "sort" "strings" @@ -20,6 +22,7 @@ import ( type MermaidWriter struct { MinEdgeName string SpecifiedPackageName string + DrawV0Semantics bool } type MermaidOption func(*MermaidWriter) @@ -32,6 +35,7 @@ func NewMermaidWriter(opts ...MermaidOption) *MermaidWriter { m := &MermaidWriter{ MinEdgeName: minEdgeName, SpecifiedPackageName: specifiedPackageName, + DrawV0Semantics: true, } for _, opt := range opts { @@ -52,6 +56,12 @@ func WithSpecifiedPackageName(specifiedPackageName string) MermaidOption { } } +func WithV0Semantics(drawV0Semantics bool) MermaidOption { + return func(o *MermaidWriter) { + o.DrawV0Semantics = drawV0Semantics + } +} + // writes out the channel edges of the declarative config graph in a mermaid format capable of being pasted into // mermaid renderers like github, mermaid.live, etc. // output is sorted lexicographically by package name, and then by channel name @@ -124,10 +134,14 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } var deprecatedPackage string - deprecatedChannels := []string{} + deprecatedChannelIDs := []string{} + decoratedBundleIDs := map[string][]string{"deprecated": {}, "skipped": {}, "deprecatedskipped": {}} + linkID := 0 + skippedLinkIDs := []string{} for _, c := range cfg.Channels { filteredChannel := writer.filterChannel(&c, versionMap, minVersion, minEdgePackage) + // nolint:nestif if filteredChannel != nil { pkgBuilder, ok := pkgs[c.Package] if !ok { @@ -136,58 +150,102 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } channelID := fmt.Sprintf("%s-%s", filteredChannel.Package, filteredChannel.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %%%% channel %q\n", filteredChannel.Name)) - pkgBuilder.WriteString(fmt.Sprintf(" subgraph %s[%q]\n", channelID, filteredChannel.Name)) + fmt.Fprintf(pkgBuilder, " %%%% channel %q\n", filteredChannel.Name) + fmt.Fprintf(pkgBuilder, " subgraph %s[%q]\n", channelID, filteredChannel.Name) if depByPackage.Has(filteredChannel.Package) { deprecatedPackage = filteredChannel.Package } if depByChannel.Has(filteredChannel.Name) { - deprecatedChannels = append(deprecatedChannels, channelID) + deprecatedChannelIDs = append(deprecatedChannelIDs, channelID) } - for _, ce := range filteredChannel.Entries { - if versionMap[ce.Name].GE(minVersion) { - bundleDeprecation := "" - if depByBundle.Has(ce.Name) { - bundleDeprecation = ":::deprecated" + // sort edges by decreasing version + sortedEntries := make([]*ChannelEntry, 0, len(filteredChannel.Entries)) + for i := range filteredChannel.Entries { + sortedEntries = append(sortedEntries, &filteredChannel.Entries[i]) + } + sort.Slice(sortedEntries, func(i, j int) bool { + // Sort by decreasing version: greater version comes first + return versionMap[sortedEntries[i].Name].GT(versionMap[sortedEntries[j].Name]) + }) + + skippedEntities := sets.Set[string]{} + + const ( + captureNewEntry = true + processExisting = false + ) + handleSemantics := func(edge string, linkID int, captureNew bool) { + if writer.DrawV0Semantics { + if captureNew { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } else { + skippedEntities.Insert(edge) + } + } else { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } } + } + } - entryId := fmt.Sprintf("%s-%s", channelID, ce.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryId, ce.Name, bundleDeprecation)) + for _, ce := range sortedEntries { + entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]\n", entryID, ce.Name) + + // mermaid allows specification of only a single decoration class, so any combinations must be independently represented + switch { + case depByBundle.Has(ce.Name) && skippedEntities.Has(ce.Name): + decoratedBundleIDs["deprecatedskipped"] = append(decoratedBundleIDs["deprecatedskipped"], entryID) + case depByBundle.Has(ce.Name): + decoratedBundleIDs["deprecated"] = append(decoratedBundleIDs["deprecated"], entryID) + case skippedEntities.Has(ce.Name): + decoratedBundleIDs["skipped"] = append(decoratedBundleIDs["skipped"], entryID) + } - if len(ce.Replaces) > 0 { - replacesId := fmt.Sprintf("%s-%s", channelID, ce.Replaces) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesId, ce.Replaces, "replace", entryId, ce.Name)) - } - if len(ce.Skips) > 0 { - for _, s := range ce.Skips { - skipsId := fmt.Sprintf("%s-%s", channelID, s) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsId, s, "skip", entryId, ce.Name)) - } + if len(ce.Skips) > 0 { + for _, s := range ce.Skips { + skipsID := fmt.Sprintf("%s-%s", channelID, s) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name) + handleSemantics(s, linkID, captureNewEntry) + linkID++ } - if len(ce.SkipRange) > 0 { - skipRange, err := semver.ParseRange(ce.SkipRange) - if err == nil { - for _, edgeName := range filteredChannel.Entries { - if skipRange(versionMap[edgeName.Name]) { - skipRangeId := fmt.Sprintf("%s-%s", channelID, edgeName.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeId, edgeName.Name, "skipRange", ce.SkipRange, entryId, ce.Name)) - } + } + if len(ce.SkipRange) > 0 { + skipRange, err := semver.ParseRange(ce.SkipRange) + if err == nil { + for _, edgeName := range filteredChannel.Entries { + if skipRange(versionMap[edgeName.Name]) { + skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ } - } else { - fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } + } else { + fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } } + // have to process replaces last, because applicablity can be impacted by skips + if len(ce.Replaces) > 0 { + replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ + } } - pkgBuilder.WriteString(" end\n") + fmt.Fprintf(pkgBuilder, " end\n") } } - out.Write([]byte("graph LR\n")) - out.Write([]byte(fmt.Sprintf(" classDef deprecated fill:#E8960F\n"))) + _, _ = out.Write([]byte("graph LR\n")) + _, _ = out.Write([]byte(" classDef deprecated fill:#E8960F\n")) + _, _ = out.Write([]byte(" classDef skipped stroke:#FF0000,stroke-width:4px\n")) + _, _ = out.Write([]byte(" classDef deprecatedskipped fill:#E8960F,stroke:#FF0000,stroke-width:4px\n")) pkgNames := []string{} for pname := range pkgs { pkgNames = append(pkgNames, pname) @@ -196,22 +254,36 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) return pkgNames[i] < pkgNames[j] }) for _, pkgName := range pkgNames { - out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) - out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) - out.Write([]byte(pkgs[pkgName].String())) - out.Write([]byte(" end\n")) + _, _ = fmt.Fprintf(out, " %%%% package %q\n", pkgName) + _, _ = fmt.Fprintf(out, " subgraph %q\n", pkgName) + _, _ = out.Write([]byte(pkgs[pkgName].String())) + _, _ = out.Write([]byte(" end\n")) } if deprecatedPackage != "" { - out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) + _, _ = fmt.Fprintf(out, "style %s fill:#989695\n", deprecatedPackage) + } + + if len(deprecatedChannelIDs) > 0 { + for _, deprecatedChannel := range deprecatedChannelIDs { + _, _ = fmt.Fprintf(out, "style %s fill:#DCD0FF\n", deprecatedChannel) + } } - if len(deprecatedChannels) > 0 { - for _, deprecatedChannel := range deprecatedChannels { - out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) + // express the decoration classes + sortedKeys := slices.Sorted(maps.Keys(decoratedBundleIDs)) + for _, key := range sortedKeys { + if len(decoratedBundleIDs[key]) > 0 { + b := slices.Clone(decoratedBundleIDs[key]) + slices.Sort(b) + _, _ = fmt.Fprintf(out, "class %s %s\n", strings.Join(b, ","), key) } } + if len(skippedLinkIDs) > 0 { + _, _ = fmt.Fprintf(out, "linkStyle %s %s\n", strings.Join(skippedLinkIDs, ","), "stroke:#FF0000,stroke-width:3px,stroke-dasharray:5;") + } + return nil } @@ -236,6 +308,7 @@ func (writer *MermaidWriter) filterChannel(c *Channel, versionMap map[string]sem out := &Channel{Name: c.Name, Package: c.Package, Properties: c.Properties, Entries: []ChannelEntry{}} for _, ce := range c.Entries { filteredCe := ChannelEntry{Name: ce.Name} + // nolint:nestif if writer.MinEdgeName == "" { // no minimum-edge specified filteredCe.SkipRange = ce.SkipRange @@ -535,6 +608,8 @@ func writeFile(cfg DeclarativeConfig, filename string, writeFunc WriteFunc) erro if err := writeFunc(cfg, buf); err != nil { return fmt.Errorf("write to buffer for %q: %v", filename, err) } + // we explicitly want to generate content from this function which is limited only by the user's umask (G306) + // nolint:gosec if err := os.WriteFile(filename, buf.Bytes(), 0666); err != nil { return fmt.Errorf("write file %q: %v", filename, err) } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go index 0ad0f7adb..e99cb2ca8 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go @@ -2,6 +2,7 @@ package model import ( "bytes" + "errors" "fmt" "strings" ) @@ -31,7 +32,7 @@ func (v *validationError) Error() string { func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) string { for _, s := range seen { - if v == s { + if errors.Is(v, s) { return "" } } @@ -56,7 +57,9 @@ func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) st } else { subPrefix = append(subPrefix, []rune("├── ")...) } - if verr, ok := serr.(*validationError); ok { + + var verr *validationError + if errors.As(serr, &verr) { errMsg.WriteString(verr.errorPrefix(subPrefix, subLast, seen)) } else { errMsg.WriteString(fmt.Sprintf("%s%s\n", string(subPrefix), serr)) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go index d570f93c3..af6c391e6 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go @@ -161,6 +161,7 @@ func (i *Icon) Validate() error { return result.orNil() } +// nolint:unused func (i *Icon) validateData() error { if !filetype.IsImage(i.Data) { return errors.New("icon data is not an image") @@ -287,6 +288,10 @@ func (c *Channel) validateReplacesChain() error { if _, ok := chainFrom[cur.Name]; !ok { chainFrom[cur.Name] = []string{cur.Name} } + // if the replaces edge is known to be skipped, disregard it + if skippedBundles.Has(cur.Replaces) { + break + } for k := range chainFrom { chainFrom[k] = append(chainFrom[k], cur.Replaces) } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go b/tools/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go index 6869b2e67..6fb792dda 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go @@ -7,8 +7,9 @@ import ( "fmt" "reflect" - "github.com/operator-framework/api/pkg/operators/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/operator-framework/api/pkg/operators/v1alpha1" ) type Property struct { @@ -177,6 +178,7 @@ func Deduplicate(in []Property) []Property { } props := map[key]Property{} + // nolint:prealloc var out []Property for _, p := range in { k := key{p.Type, string(p.Value)} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go index 5c0cb603a..50088ab4f 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go @@ -42,6 +42,7 @@ func ConvertAPIBundleToModelBundle(b *Bundle) (*model.Bundle, error) { } func convertAPIBundleToModelProperties(b *Bundle) ([]property.Property, error) { + // nolint:prealloc var out []property.Property providedGVKs := map[property.GVK]struct{}{} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go index e7714713d..b3368383f 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go @@ -5,10 +5,11 @@ import ( "encoding/json" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/operator-framework/api/pkg/lib/version" "github.com/operator-framework/api/pkg/operators" "github.com/operator-framework/api/pkg/operators/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/operator-framework/operator-registry/alpha/model" "github.com/operator-framework/operator-registry/alpha/property" @@ -20,8 +21,8 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { return nil, fmt.Errorf("parse properties: %v", err) } - csvJson := b.CsvJSON - if csvJson == "" && len(props.CSVMetadatas) == 1 { + csvJSON := b.CsvJSON + if csvJSON == "" && len(props.CSVMetadatas) == 1 { var icons []v1alpha1.Icon if b.Package.Icon != nil { icons = []v1alpha1.Icon{{ @@ -37,7 +38,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { // attemptint to write to a nil map. StrategyName: "deployment", } - csv.Spec.Version = version.OperatorVersion{b.Version} + csv.Spec.Version = version.OperatorVersion{Version: b.Version} csv.Spec.RelatedImages = convertModelRelatedImagesToCSVRelatedImages(b.RelatedImages) if csv.Spec.Description == "" { csv.Spec.Description = b.Package.Description @@ -46,9 +47,9 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { if err != nil { return nil, err } - csvJson = string(csvData) + csvJSON = string(csvData) if len(b.Objects) == 0 { - b.Objects = []string{csvJson} + b.Objects = []string{csvJSON} } } @@ -76,7 +77,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { Properties: convertModelPropertiesToAPIProperties(b.Properties), Replaces: b.Replaces, Skips: b.Skips, - CsvJson: csvJson, + CsvJson: csvJSON, Object: b.Objects, Deprecation: deprecation, }, nil @@ -127,6 +128,7 @@ func csvMetadataToCsv(m property.CSVMetadata) v1alpha1.ClusterServiceVersion { } func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -138,6 +140,7 @@ func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { return out } func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -150,9 +153,9 @@ func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { } func convertModelPropertiesToAPIProperties(props []property.Property) []*Property { + // nolint:prealloc var out []*Property for _, prop := range props { - // NOTE: This is a special case filter to prevent problems with existing client implementations that // project bundle properties into CSV annotations and store those CSVs in a size-constrained // storage backend (e.g. etcd via kube-apiserver). If the bundle object property has data inlined @@ -172,6 +175,7 @@ func convertModelPropertiesToAPIProperties(props []property.Property) []*Propert } func convertModelPropertiesToAPIDependencies(props []property.Property) ([]*Dependency, error) { + // nolint:prealloc var out []*Dependency for _, prop := range props { switch prop.Type { @@ -196,6 +200,7 @@ func convertModelPropertiesToAPIDependencies(props []property.Property) ([]*Depe } func convertModelRelatedImagesToCSVRelatedImages(in []model.RelatedImage) []v1alpha1.RelatedImage { + // nolint:prealloc var out []v1alpha1.RelatedImage for _, ri := range in { out = append(out, v1alpha1.RelatedImage{ diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go index ea38c21d1..b2e51977a 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go @@ -8,7 +8,8 @@ const ( DockerTool ) -func (t ContainerTool) String() (s string) { +func (t ContainerTool) String() string { + var s string switch t { case NoneTool: s = "none" @@ -17,7 +18,7 @@ func (t ContainerTool) String() (s string) { case DockerTool: s = "docker" } - return + return s } func (t ContainerTool) CommandFactory() CommandFactory { @@ -30,7 +31,8 @@ func (t ContainerTool) CommandFactory() CommandFactory { return &StubCommandFactory{} } -func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { +func NewContainerTool(s string, defaultTool ContainerTool) ContainerTool { + var t ContainerTool switch s { case "podman": t = PodmanTool @@ -41,16 +43,17 @@ func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { default: t = defaultTool } - return + return t } // NewCommandContainerTool returns a tool that can be used in `exec` statements. -func NewCommandContainerTool(s string) (t ContainerTool) { +func NewCommandContainerTool(s string) ContainerTool { + var t ContainerTool switch s { case "docker": t = DockerTool default: t = PodmanTool } - return + return t } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go index 79059b9ee..dd46ce22f 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go @@ -9,9 +9,11 @@ import ( const ( DefaultBinarySourceImage = "quay.io/operator-framework/opm:latest" - DefaultDbLocation = "/database/index.db" - DbLocationLabel = "operators.operatorframework.io.index.database.v1" - ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" + // nolint:stylecheck + DefaultDbLocation = "/database/index.db" + // nolint:stylecheck + DbLocationLabel = "operators.operatorframework.io.index.database.v1" + ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" ) // DockerfileGenerator defines functions to generate index dockerfiles diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go index 57de73829..18ad46d98 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go @@ -71,5 +71,6 @@ func (r ImageLabelReader) GetLabelsFromImage(image string) (map[string]string, e return data[0].Labels, nil } + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse label data from container") } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go index 660c92c6a..b5995b40f 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go @@ -2,6 +2,7 @@ package containertools import ( + "errors" "fmt" "os/exec" "strings" @@ -83,13 +84,14 @@ func (r *ContainerCommandRunner) GetToolName() string { func (r *ContainerCommandRunner) Pull(image string) error { args := r.argsForCmd("pull", image) + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s", command.String()) out, err := command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error pulling image: %s. %v", string(out), err) } @@ -114,7 +116,7 @@ func (r *ContainerCommandRunner) Build(dockerfile, tag string) error { out, err := command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error building image: %s. %v", string(out), err) } @@ -125,6 +127,7 @@ func (r *ContainerCommandRunner) Build(dockerfile, tag string) error { func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { args := r.argsForCmd("create", image, "") + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s create", r.containerTool) @@ -133,7 +136,8 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err := command.Output() if err != nil { msg := err.Error() - if exitErr, ok := err.(*exec.ExitError); ok { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { msg = fmt.Sprintf("%s: %s", err, exitErr.Stderr) } return fmt.Errorf("error creating container %s: %s", string(out), msg) @@ -141,6 +145,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { id := strings.TrimSuffix(string(out), "\n") args = r.argsForCmd("cp", id+":"+src, dst) + // nolint:gosec command = exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s cp", r.containerTool) @@ -148,11 +153,12 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error copying container directory %s: %v", string(out), err) } args = r.argsForCmd("rm", id) + // nolint:gosec command = exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s rm", r.containerTool) @@ -160,7 +166,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error removing container %s: %v", string(out), err) } @@ -172,6 +178,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { func (r *ContainerCommandRunner) Inspect(image string) ([]byte, error) { args := r.argsForCmd("inspect", image) + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s inspect", r.containerTool) @@ -179,7 +186,7 @@ func (r *ContainerCommandRunner) Inspect(image string) ([]byte, error) { out, err := command.Output() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return nil, err } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go index d447dc155..c045750e2 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go @@ -60,28 +60,31 @@ func defaultConfig() *RegistryConfig { // NewRegistry returns a new containerd Registry and a function to destroy it after use. // The destroy function is safe to call more than once, but is a no-op after the first call. -func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { +func NewRegistry(options ...RegistryOption) (*Registry, error) { + var registry *Registry + config := defaultConfig() config.apply(options) - if err = config.complete(); err != nil { - return + if err := config.complete(); err != nil { + return nil, err } cs, err := contentlocal.NewStore(config.CacheDir) if err != nil { - return + return nil, err } var bdb *bolt.DB bdb, err = bolt.Open(config.DBPath, 0644, nil) if err != nil { - return + return nil, err } var once sync.Once + // nolint:nonamedreturns destroy := func() (destroyErr error) { once.Do(func() { - if destroyErr = bdb.Close(); destroyErr != nil { + if err := bdb.Close(); err != nil { return } if config.PreserveCache { @@ -102,12 +105,13 @@ func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { resolverFunc: func(repo string) (remotes.Resolver, error) { return NewResolver(httpClient, config.ResolverConfigDir, config.PlainHTTP, repo) }, + // nolint: staticcheck platform: platforms.Ordered(platforms.DefaultSpec(), specs.Platform{ OS: "linux", Architecture: "amd64", }), } - return + return registry, nil } type RegistryOption func(config *RegistryConfig) @@ -168,12 +172,15 @@ func newClient(skipTlSVerify bool, roots *x509.CertPool) *http.Client { TLSClientConfig: &tls.Config{ InsecureSkipVerify: false, RootCAs: roots, + MinVersion: tls.VersionTLS12, }, } if skipTlSVerify { transport.TLSClientConfig = &tls.Config{ + // nolint:gosec InsecureSkipVerify: true, + MinVersion: tls.VersionTLS12, } } headers := http.Header{} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go index 61fb5c73d..9d9575942 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go @@ -14,14 +14,14 @@ import ( "github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" - "github.com/containers/image/v5/docker/reference" + "github.com/containerd/errdefs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -34,7 +34,8 @@ type Registry struct { destroy func() error log *logrus.Entry resolverFunc func(repo string) (remotes.Resolver, error) - platform platforms.MatchComparer + // nolint:staticcheck + platform platforms.MatchComparer } var _ image.Registry = &Registry{} @@ -56,9 +57,26 @@ func (r *Registry) Pull(ctx context.Context, ref image.Reference) error { return err } - name, root, err := resolver.Resolve(ctx, ref.String()) - if err != nil { - return fmt.Errorf("error resolving name for image ref %s: %v", ref.String(), err) + retryBackoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 1.0, + Jitter: 0.1, + Steps: 5, + } + + var name string + var root ocispec.Descriptor + if err := retry.OnError(retryBackoff, + func(pullErr error) bool { + r.log.Warnf("Error resolving registry %q: %v. Retrying", ref.String(), pullErr) + return true + }, + func() error { + name, root, err = resolver.Resolve(ctx, ref.String()) + return err + }, + ); err != nil { + return fmt.Errorf("error resolving remote name %s: %v", ref.String(), err) } r.log.Debugf("resolved name: %s", name) @@ -67,13 +85,6 @@ func (r *Registry) Pull(ctx context.Context, ref image.Reference) error { return err } - retryBackoff := wait.Backoff{ - Duration: 1 * time.Second, - Factor: 1.0, - Jitter: 0.1, - Steps: 5, - } - if err := retry.OnError(retryBackoff, func(pullErr error) bool { if nonRetriablePullError.MatchString(pullErr.Error()) { @@ -143,7 +154,7 @@ func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string] } // Destroy cleans up the on-disk boltdb file and other cache files, unless preserve cache is true -func (r *Registry) Destroy() (err error) { +func (r *Registry) Destroy() error { return r.destroy() } @@ -263,6 +274,7 @@ const paxSchilyXattr = "SCHILY.xattr." // dropXattrs removes all xattrs from a Header. // This is useful for unpacking on systems where writing certain xattrs is a restricted operation; e.g. "security.capability" on SELinux. func dropXattrs(h *tar.Header) (bool, error) { + // nolint:staticcheck h.Xattrs = nil // Deprecated, but still in use, clear anyway. for key := range h.PAXRecords { if strings.HasPrefix(key, paxSchilyXattr) { // Xattrs are stored under keys with the "Schilly.xattr." prefix. diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go index 95e343f4c..5d0366f81 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go @@ -7,10 +7,10 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containers/common/pkg/auth" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/types" dockerconfig "github.com/docker/cli/cli/config" + "go.podman.io/common/pkg/auth" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/types" ) func NewResolver(client *http.Client, configDir string, plainHTTP bool, repo string) (remotes.Resolver, error) { diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go new file mode 100644 index 000000000..ec6e920d3 --- /dev/null +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go @@ -0,0 +1,294 @@ +package containersimageregistry + +import ( + "archive/tar" + "context" + "fmt" + "os" + "path/filepath" + + "github.com/containerd/containerd/archive" + dockerconfig "github.com/docker/cli/cli/config" + "go.podman.io/common/pkg/auth" + "go.podman.io/image/v5/copy" + "go.podman.io/image/v5/docker" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/image" + "go.podman.io/image/v5/oci/layout" + "go.podman.io/image/v5/pkg/compression" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/signature" + "go.podman.io/image/v5/types" + "oras.land/oras-go/v2/content/oci" + + orimage "github.com/operator-framework/operator-registry/pkg/image" +) + +var _ orimage.Registry = (*Registry)(nil) + +type Registry struct { + sourceCtx *types.SystemContext + cache *cacheConfig +} + +var DefaultSystemContext = &types.SystemContext{OSChoice: "linux"} + +func New(sourceCtx *types.SystemContext, opts ...Option) (orimage.Registry, error) { + if sourceCtx == nil { + sourceCtx = &types.SystemContext{} + } + reg := &Registry{ + sourceCtx: sourceCtx, + } + + for _, opt := range opts { + if err := opt(reg); err != nil { + return nil, err + } + } + + if reg.cache == nil { + var err error + reg.cache, err = getDefaultImageCache() + if err != nil { + return nil, err + } + } + + return reg, nil +} + +func NewDefault() (orimage.Registry, error) { + return New(DefaultSystemContext) +} + +type cacheConfig struct { + baseDir string + preserve bool +} + +func (c *cacheConfig) ociLayoutDir() string { + return filepath.Join(c.baseDir, "oci-layout") +} +func (c *cacheConfig) blobInfoCacheDir() string { + return filepath.Join(c.baseDir, "blob-info-cache") +} + +func (c *cacheConfig) getSystemContext() *types.SystemContext { + return &types.SystemContext{ + BlobInfoCacheDir: c.blobInfoCacheDir(), + } +} + +type Option func(*Registry) error + +func getDefaultImageCache() (*cacheConfig, error) { + if dir := os.Getenv("OLM_CACHE_DIR"); dir != "" { + return newCacheConfig(filepath.Join(dir, "images"), true), nil + } + return getTemporaryImageCache() +} + +func getTemporaryImageCache() (*cacheConfig, error) { + tmpDir, err := os.MkdirTemp("", "opm-containers-image-cache-") + if err != nil { + return nil, err + } + return newCacheConfig(tmpDir, false), nil +} + +func newCacheConfig(dir string, preserve bool) *cacheConfig { + return &cacheConfig{ + baseDir: dir, + preserve: preserve, + } +} + +func WithTemporaryImageCache() Option { + return func(r *Registry) error { + var err error + r.cache, err = getTemporaryImageCache() + if err != nil { + return err + } + return nil + } +} + +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option { + return func(r *Registry) error { + r.sourceCtx.DockerDaemonInsecureSkipTLSVerify = insecureSkipTLSVerify + r.sourceCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(insecureSkipTLSVerify) + r.sourceCtx.OCIInsecureSkipTLSVerify = insecureSkipTLSVerify + return nil + } +} + +func (r *Registry) Pull(ctx context.Context, ref orimage.Reference) error { + namedRef, err := reference.ParseNamed(ref.String()) + if err != nil { + return err + } + dockerRef, err := docker.NewReference(namedRef) + if err != nil { + return err + } + + if err := os.MkdirAll(r.cache.ociLayoutDir(), 0700); err != nil { + return err + } + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return err + } + + policy, err := signature.DefaultPolicy(r.sourceCtx) + if err != nil { + return err + } + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return err + } + + sourceCtx := r.sourceCtx + authFile := getAuthFile(r.sourceCtx, namedRef.String()) + if authFile != "" { + sourceCtx.AuthFilePath = authFile + } + + if _, err := copy.Image(ctx, policyContext, ociLayoutRef, dockerRef, ©.Options{ + SourceCtx: sourceCtx, + DestinationCtx: r.cache.getSystemContext(), + OptimizeDestinationImageAlreadyExists: true, + + // We use the OCI layout as a temporary storage and + // pushing signatures for OCI images is not supported + // so we remove the source signatures when copying. + // Signature validation will still be performed + // accordingly to a provided policy context. + RemoveSignatures: true, + }); err != nil { + return err + } + return nil +} + +func (r *Registry) Unpack(ctx context.Context, ref orimage.Reference, unpackDir string) error { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + imageSource, err := ociLayoutRef.NewImageSource(ctx, ociLayoutCtx) + if err != nil { + return fmt.Errorf("failed to create oci image source: %v", err) + } + defer imageSource.Close() + + img, err := image.FromSource(ctx, ociLayoutCtx, imageSource) + if err != nil { + return fmt.Errorf("could not get image from oci image source: %v", err) + } + + if err := os.MkdirAll(unpackDir, 0700); err != nil { + return err + } + + for _, info := range img.LayerInfos() { + if err := func() error { + layer, _, err := imageSource.GetBlob(ctx, info, nil) + if err != nil { + return fmt.Errorf("failed to get blob: %v", err) + } + defer layer.Close() + + decompressed, _, err := compression.AutoDecompress(layer) + if err != nil { + return fmt.Errorf("failed to decompress layer: %v", err) + } + + if _, err := archive.Apply(ctx, unpackDir, decompressed, archive.WithFilter(func(th *tar.Header) (bool, error) { + th.PAXRecords = nil + th.Xattrs = nil //nolint:staticcheck + th.Uid = os.Getuid() + th.Gid = os.Getgid() + th.Mode = 0600 + if th.FileInfo().IsDir() { + th.Mode = 0700 + } + return true, nil + })); err != nil { + return fmt.Errorf("failed to apply layer: %v", err) + } + return nil + }(); err != nil { + return err + } + } + return nil +} + +func (r *Registry) Labels(ctx context.Context, ref orimage.Reference) (map[string]string, error) { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return nil, fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + img, err := ociLayoutRef.NewImage(ctx, ociLayoutCtx) + if err != nil { + return nil, fmt.Errorf("could not load image from oci image reference: %v", err) + } + imgConfig, err := img.OCIConfig(ctx) + if err != nil { + return nil, fmt.Errorf("could not get oci config from image: %v", err) + } + return imgConfig.Config.Labels, nil +} + +func (r *Registry) Destroy() error { + if !r.cache.preserve { + return os.RemoveAll(r.cache.baseDir) + } + + store, err := oci.NewWithContext(context.TODO(), r.cache.ociLayoutDir()) + if err != nil { + return fmt.Errorf("open cache for garbage collection: %v", err) + } + if err := store.GC(context.TODO()); err != nil { + return fmt.Errorf("garbage collection failed: %v", err) + } + return nil +} + +// This is a slight variation on the auth.GetDefaultAuthFile function provided by containers/image. +// The reason for this variation is so that this image registry implementation can be used as a drop-in +// replacement for our existing containerd-based image registry client, and remain compatible with current +// behavior. +func getAuthFile(sourceCtx *types.SystemContext, ref string) string { + // By default, we will use the docker config file in the standard docker config directory. + // However, if REGISTRY_AUTH_FILE or DOCKER_CONFIG environment variables are set, we will + // use those (in that order) instead to derive the auth config file. + authFile := filepath.Join(dockerconfig.Dir(), dockerconfig.ConfigFileName) + if defaultAuthFile := auth.GetDefaultAuthFile(); defaultAuthFile != "" { + authFile = defaultAuthFile + } + + // In order to maintain backward-compatibility with the original credential getter from + // the containerd registry implementation, we will first try to get the credentials from + // the auth config file we derived above, if it exists. If we find a matching credential + // in this file, we'll use this file. + if stat, statErr := os.Stat(authFile); statErr == nil && stat.Mode().IsRegular() { + if _, err := config.GetCredentials(&types.SystemContext{AuthFilePath: authFile}, ref); err == nil { + return authFile + } + } + // If the auth file was unset, doesn't exist, or if we couldn't find credentials in it, + // we'll use system defaults from containers/image (podman/skopeo) to lookup the credentials. + if sourceCtx != nil && sourceCtx.AuthFilePath != "" { + return sourceCtx.AuthFilePath + } + return "" +} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go index 40769d23e..0d299b66d 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go @@ -26,7 +26,7 @@ type Registry struct { var _ image.Registry = &Registry{} // NewRegistry instantiates and returns a new registry which manipulates images via exec podman/docker commands. -func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (registry *Registry, err error) { +func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (*Registry, error) { return &Registry{ log: logger, cmd: containertools.NewCommandRunner(tool, logger, opts...), diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go index f46d58516..1709a4a5d 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go @@ -39,7 +39,7 @@ func (i *MockImage) unpack(dir string) error { if err := os.MkdirAll(pathDir, 0777); err != nil { return err } - return os.WriteFile(path, data, 0666) + return os.WriteFile(path, data, 0600) }) } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/null.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/null.go deleted file mode 100644 index 9b7886c0c..000000000 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/null.go +++ /dev/null @@ -1,13 +0,0 @@ -package log - -import ( - "io" - - "github.com/sirupsen/logrus" -) - -func Null() *logrus.Entry { - l := logrus.New() - l.SetOutput(io.Discard) - return logrus.NewEntry(l) -} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/writerhook.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/writerhook.go deleted file mode 100644 index a67dc675f..000000000 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/log/writerhook.go +++ /dev/null @@ -1,76 +0,0 @@ -package log - -import ( - "io" - "os" - - "github.com/sirupsen/logrus" -) - -// Taken from https://github.com/sirupsen/logrus/issues/678 -// Used to split log level output until this is implemented internally by logrus - -// WriterHook is a hook that writes logs of specified LogLevels to specified Writer -type WriterHook struct { - Writer io.Writer - LogLevels []logrus.Level -} - -// Fire will be called when some logging function is called with current hook -// It will format log entry to string and write it to appropriate writer -func (hook *WriterHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - return err - } - _, err = hook.Writer.Write([]byte(line)) - return err -} - -// Levels define on which log levels this hook would trigger -func (hook *WriterHook) Levels() []logrus.Level { - return hook.LogLevels -} - -// AddHooks adds hooks to send logs to different destinations depending on level -func AddHooks(hooks ...*WriterHook) { - // Send all logs to nowhere by default - logrus.SetOutput(io.Discard) - - for _, hook := range hooks { - logrus.AddHook(hook) - } -} - -func AddDefaultWriterHooks(terminationLogPath string) error { - terminationLogFile, err := os.OpenFile(terminationLogPath, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return err - } - AddHooks( - &WriterHook{ - Writer: terminationLogFile, - LogLevels: []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - }, - }, - &WriterHook{ - Writer: os.Stderr, - LogLevels: []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - }, - }, - &WriterHook{ - Writer: os.Stdout, - LogLevels: []logrus.Level{ - logrus.InfoLevel, - logrus.DebugLevel, - }, - }) - - return nil -} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go index 6875566d0..60721cdaf 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go @@ -8,6 +8,7 @@ import ( // BuildIdCompare compares two versions and returns negative one if the first arg is less than the second arg, positive one if it is larger, and zero if they are equal. // This comparison follows typical semver precedence rules, with one addition: whenever two versions are equal with the exception of their build-ids, the build-ids are compared using prerelease precedence rules. Further, versions with no build-id are always less than versions with build-ids; e.g. 1.0.0 < 1.0.0+1. +// nolint:stylecheck func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { if c := b.Compare(v); c != 0 { return c, nil @@ -27,6 +28,7 @@ func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { } func buildAsPrerelease(v semver.Version) (*semver.Version, error) { + // nolint:prealloc var pre []semver.PRVersion for _, b := range v.Build { p, err := semver.NewPRVersion(b) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go index d8f6d5b8e..a88b7b630 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go @@ -8,12 +8,14 @@ import ( "github.com/operator-framework/api/pkg/validation/errors" interfaces "github.com/operator-framework/api/pkg/validation/interfaces" + "github.com/operator-framework/operator-registry/pkg/registry" ) var RegistryBundleValidator interfaces.Validator = interfaces.ValidatorFunc(validateBundles) -func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { +func validateBundles(objs ...interface{}) []errors.ManifestResult { + var results []errors.ManifestResult for _, obj := range objs { switch v := obj.(type) { case *registry.Bundle: @@ -23,7 +25,8 @@ func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { return results } -func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { +func validateBundle(bundle *registry.Bundle) errors.ManifestResult { + var result errors.ManifestResult csv, err := bundle.ClusterServiceVersion() if err != nil { result.Add(errors.ErrInvalidParse("error getting bundle CSV", err)) @@ -39,7 +42,8 @@ func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { return result } -func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) (result errors.ManifestResult) { +func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) errors.ManifestResult { + var result errors.ManifestResult ownedKeys, _, err := csv.GetCustomResourceDefintions() if err != nil { result.Add(errors.ErrInvalidParse("error getting CSV CRDs", err)) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go index 2f740151a..788428440 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go @@ -8,29 +8,29 @@ import ( "strings" ) -type JsonUnmarshalError struct { +type JSONUnmarshalError struct { data []byte offset int64 err error } -func NewJSONUnmarshalError(data []byte, err error) *JsonUnmarshalError { +func NewJSONUnmarshalError(data []byte, err error) *JSONUnmarshalError { var te *json.UnmarshalTypeError if errors.As(err, &te) { - return &JsonUnmarshalError{data: data, offset: te.Offset, err: te} + return &JSONUnmarshalError{data: data, offset: te.Offset, err: te} } var se *json.SyntaxError if errors.As(err, &se) { - return &JsonUnmarshalError{data: data, offset: se.Offset, err: se} + return &JSONUnmarshalError{data: data, offset: se.Offset, err: se} } - return &JsonUnmarshalError{data: data, offset: -1, err: err} + return &JSONUnmarshalError{data: data, offset: -1, err: err} } -func (e *JsonUnmarshalError) Error() string { +func (e *JSONUnmarshalError) Error() string { return e.err.Error() } -func (e *JsonUnmarshalError) Pretty() string { +func (e *JSONUnmarshalError) Pretty() string { if len(e.data) == 0 || e.offset < 0 || e.offset > int64(len(e.data)) { return e.err.Error() } @@ -82,7 +82,6 @@ func (e *JsonUnmarshalError) Pretty() string { // We found the byte in the pretty data that matches the byte in the original data, // so increment the pretty index. pIndex++ - } _, _ = sb.Write(pretty[:pOffset]) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go index b5fb28b94..8b3be74b0 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go @@ -7,7 +7,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -54,7 +53,8 @@ type Bundle struct { func NewBundle(name string, annotations *Annotations, objs ...*unstructured.Unstructured) *Bundle { bundle := &Bundle{ - Name: name, + Name: name, + // nolint:staticcheck Package: annotations.PackageName, Annotations: annotations, } @@ -62,6 +62,7 @@ func NewBundle(name string, annotations *Annotations, objs ...*unstructured.Unst bundle.Add(o) } + // nolint:staticcheck if annotations == nil { return bundle } @@ -168,6 +169,7 @@ func (b *Bundle) CustomResourceDefinitions() ([]runtime.Object, error) { if err := b.cache(); err != nil { return nil, err } + // nolint:prealloc var crds []runtime.Object for _, crd := range b.v1crds { crds = append(crds, crd) @@ -235,7 +237,6 @@ func (b *Bundle) RequiredAPIs() (map[APIKey]struct{}, error) { return nil, fmt.Errorf("couldn't parse plural.group from crd name: %s", api.Name) } required[APIKey{parts[1], api.Version, api.Kind, parts[0]}] = struct{}{} - } _, requiredAPIs, err := csv.GetApiServiceDefinitions() if err != nil { @@ -278,10 +279,18 @@ func (b *Bundle) AllProvidedAPIsInBundle() error { return nil } -func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +// (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +func (b *Bundle) Serialize() (string, string, []byte, []byte, []byte, error) { + var bundleBytes []byte + var csvName string + var csvBytes []byte + var annotationBytes []byte + var err error + csvCount := 0 for _, obj := range b.Objects { - objBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + var objBytes []byte + objBytes, err = runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return "", "", nil, nil, nil, err } @@ -301,7 +310,7 @@ func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bund } if b.Annotations != nil { - annotationBytes, err = json.Marshal(b.Annotations) + annotationBytes, _ = json.Marshal(b.Annotations) } return csvName, b.BundleImage, csvBytes, bundleBytes, annotationBytes, nil diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go index e8664c4e8..2854003a2 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go @@ -16,6 +16,7 @@ type BundleGraphLoader struct { func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, annotations *AnnotationsFile, skippatch bool) (*Package, error) { bundleVersion, err := bundle.Version() if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to extract bundle version from bundle %s, can't insert in semver mode", bundle.BundleImage) } @@ -43,6 +44,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann if graph.DefaultChannel == "" { // Infer default channel from channel list if annotations.SelectDefaultChannel() == "" { + // nolint:stylecheck return nil, fmt.Errorf("Default channel is missing and can't be inferred") } graph.DefaultChannel = annotations.SelectDefaultChannel() @@ -83,6 +85,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann for node := range channelGraph.Nodes { nodeVersion, err := semver.Make(node.Version) if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse existing bundle version stored in index %s %s %s", node.CsvName, node.Version, node.BundlePath) } @@ -131,7 +134,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann // the new channel head if !lowestAhead.IsEmpty() { channelGraph.Nodes[lowestAhead] = map[BundleKey]struct{}{ - newBundleKey: struct{}{}, + newBundleKey: {}, } } else { channelGraph.Head = newBundleKey diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go index 85f5acb40..d45bd414e 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go @@ -22,6 +22,7 @@ func GetModeFromString(mode string) (Mode, error) { case "semver-skippatch": return SkipPatchMode, nil default: + // nolint:stylecheck return -1, fmt.Errorf("Invalid channel update mode %s specified", mode) } } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go index 8dcdf65ad..4a3d8ceaf 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go @@ -7,19 +7,20 @@ import ( "os" "path" - prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" - - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" "github.com/operator-framework/api/pkg/operators" + + prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" ) const ( // Name of the CSV's kind + // nolint:unused clusterServiceVersionKind = "ClusterServiceVersion" // Name of the section under which the list of owned and required list of @@ -44,9 +45,11 @@ const ( icon = "icon" // The yaml attribute that points to the icon.base64data for the ClusterServiceVersion + // nolint:unused base64data = "base64data" // The yaml attribute that points to the icon.mediatype for the ClusterServiceVersion + // nolint:unused mediatype = "mediatype" // The yaml attribute that points to the description for the ClusterServiceVersion description = "description" @@ -131,7 +134,6 @@ func ReadCSVFromBundleDirectory(bundleDir string) (*ClusterServiceVersion, error return &csv, nil } return nil, fmt.Errorf("no ClusterServiceVersion object found in %s", bundleDir) - } // GetReplaces returns the name of the older ClusterServiceVersion object that @@ -224,16 +226,16 @@ func (csv *ClusterServiceVersion) GetSkips() ([]string, error) { // // If owned or required is not defined in the spec then an empty list is // returned respectively. -func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*DefinitionKey, required []*DefinitionKey, err error) { +func (csv *ClusterServiceVersion) GetCustomResourceDefintions() ([]*DefinitionKey, []*DefinitionKey, error) { var objmap map[string]*json.RawMessage - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, nil, err } rawValue, ok := objmap[customResourceDefinitions] if !ok || rawValue == nil { - return + return nil, nil, nil } var definitions struct { @@ -241,13 +243,11 @@ func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*Defini Required []*DefinitionKey `json:"required"` } - if err = json.Unmarshal(*rawValue, &definitions); err != nil { - return + if err := json.Unmarshal(*rawValue, &definitions); err != nil { + return nil, nil, err } - owned = definitions.Owned - required = definitions.Required - return + return definitions.Owned, definitions.Required, nil } // GetApiServiceDefinitions returns a list of owned and required @@ -261,16 +261,17 @@ func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*Defini // // If owned or required is not defined in the spec then an empty list is // returned respectively. -func (csv *ClusterServiceVersion) GetApiServiceDefinitions() (owned []*DefinitionKey, required []*DefinitionKey, err error) { +// nolint:stylecheck +func (csv *ClusterServiceVersion) GetApiServiceDefinitions() ([]*DefinitionKey, []*DefinitionKey, error) { var objmap map[string]*json.RawMessage - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { return nil, nil, fmt.Errorf("error unmarshaling into object map: %s", err) } rawValue, ok := objmap[apiServiceDefinitions] if !ok || rawValue == nil { - return + return nil, nil, nil } var definitions struct { @@ -278,27 +279,25 @@ func (csv *ClusterServiceVersion) GetApiServiceDefinitions() (owned []*Definitio Required []*DefinitionKey `json:"required"` } - if err = json.Unmarshal(*rawValue, &definitions); err != nil { - return + if err := json.Unmarshal(*rawValue, &definitions); err != nil { + return nil, nil, err } - owned = definitions.Owned - required = definitions.Required - return + return definitions.Owned, definitions.Required, nil } // GetRelatedImage returns the list of associated images for the operator -func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct{}, err error) { +func (csv *ClusterServiceVersion) GetRelatedImages() (map[string]struct{}, error) { var objmap map[string]*json.RawMessage - imageSet = make(map[string]struct{}) + imageSet := make(map[string]struct{}) - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, err } rawValue, ok := objmap[relatedImages] if !ok || rawValue == nil { - return + return imageSet, nil } type relatedImage struct { @@ -306,15 +305,15 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct Ref string `json:"image"` } var relatedImages []relatedImage - if err = json.Unmarshal(*rawValue, &relatedImages); err != nil { - return + if err := json.Unmarshal(*rawValue, &relatedImages); err != nil { + return nil, err } for _, img := range relatedImages { imageSet[img.Ref] = struct{}{} } - return + return imageSet, nil } // GetOperatorImages returns a list of any images used to run the operator. @@ -322,7 +321,7 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct func (csv *ClusterServiceVersion) GetOperatorImages() (map[string]struct{}, error) { type dep struct { Name string - Spec v1.DeploymentSpec + Spec appsv1.DeploymentSpec } type strategySpec struct { Deployments []dep @@ -416,7 +415,6 @@ func (csv *ClusterServiceVersion) GetSubstitutesFor() string { } func (csv *ClusterServiceVersion) UnmarshalJSON(data []byte) error { - if err := csv.UnmarshalSpec(data); err != nil { return err } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go index 0a9587d09..1818cc305 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go @@ -13,36 +13,34 @@ import ( // DecodeUnstructured decodes a raw stream into a an // unstructured.Unstructured instance. -func DecodeUnstructured(reader io.Reader) (obj *unstructured.Unstructured, err error) { +func DecodeUnstructured(reader io.Reader) (*unstructured.Unstructured, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) t := &unstructured.Unstructured{} - if err = decoder.Decode(t); err != nil { - return + if err := decoder.Decode(t); err != nil { + return nil, err } - obj = t - return + return t, nil } // DecodePackageManifest decodes a raw stream into a a PackageManifest instance. // If a package name is empty we consider the object invalid! -func DecodePackageManifest(reader io.Reader) (manifest *PackageManifest, err error) { +func DecodePackageManifest(reader io.Reader) (*PackageManifest, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) obj := &PackageManifest{} if decodeErr := decoder.Decode(obj); decodeErr != nil { - err = fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) - return + err := fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) + return nil, err } if obj.PackageName == "" { - err = errors.New("name of package (packageName) is missing") - return + err := errors.New("name of package (packageName) is missing") + return nil, err } - manifest = obj - return + return obj, nil } func decodeFileFS(root fs.FS, path string, into interface{}, log *logrus.Entry) error { diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go index a899f01e0..4b7209188 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go @@ -51,7 +51,7 @@ func NewPackageGraphLoaderFromDir(packageDir string) (*DirGraphLoader, error) { func (g *DirGraphLoader) Generate() (*Package, error) { err := g.loadBundleCsvPathMap() if err != nil { - return nil, fmt.Errorf("error geting CSVs from bundles in the package directory, %v", err) + return nil, fmt.Errorf("error getting CSVs from bundles in the package directory, %v", err) } pkg, err := g.parsePackageYAMLFile() @@ -76,6 +76,7 @@ func (g *DirGraphLoader) loadBundleCsvPathMap() error { } CsvNameAndReplaceMap := make(map[string]csvReplaces) for _, bundlePath := range bundleDirs { + //nolint:nestif if bundlePath.IsDir() { csvStruct, err := ReadCSVFromBundleDirectory(filepath.Join(g.PackageDir, bundlePath.Name())) if err != nil { @@ -131,7 +132,7 @@ func (g *DirGraphLoader) getChannelNodes(channelHeadCsv string) *map[BundleKey]m // Iterate through remainingCSVsInChannel and add replaces of each encountered CSVs if not already in nodes. // Loop only exit after all remaining csvs are visited/deleted. for len(remainingCSVsInChannel) > 0 { - for bk, _ := range remainingCSVsInChannel { + for bk := range remainingCSVsInChannel { if _, ok := nodes[BundleKey{CsvName: bk.CsvName}]; !ok { nodes[BundleKey{CsvName: bk.CsvName}] = func() map[BundleKey]struct{} { subNode := make(map[BundleKey]struct{}) @@ -203,5 +204,4 @@ func convertFromPackageManifest(pkgManifest PackageManifest) *Package { DefaultChannel: pkgManifest.GetDefaultChannel(), Channels: pkgChannels, } - } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go index 936f39cca..dc34f06dc 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go @@ -40,7 +40,7 @@ func (EmptyQuery) GetBundleForChannel(ctx context.Context, pkgName string, chann return nil, errors.New("empty querier: cannot get bundle for channel") } -func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that replace") } @@ -48,11 +48,11 @@ func (EmptyQuery) GetBundleThatReplaces(ctx context.Context, name, pkgName, chan return nil, errors.New("empty querier: cannot get bundle that replaces") } -func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that provide") } -func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get latest channel entries that provide") } @@ -68,7 +68,8 @@ func (EmptyQuery) GetImagesForBundle(ctx context.Context, bundleName string) ([] return nil, errors.New("empty querier: cannot get image list") } -func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +// nolint:stylecheck +func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { return nil, nil, errors.New("empty querier: cannot apis") } @@ -104,11 +105,11 @@ func (EmptyQuery) SendBundles(ctx context.Context, stream BundleSender) error { return errors.New("empty querier: cannot stream bundles") } -func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { return nil, errors.New("empty querier: cannot get dependencies for bundle") } -func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (bundlePath string, err error) { +func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (string, error) { return "", errors.New("empty querier: cannot get bundle path for bundle") } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go index 32185f189..d2623f2a6 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go @@ -35,7 +35,7 @@ type Channel struct { func (c *Channel) String() string { var b strings.Builder - for node, _ := range c.Nodes { + for node := range c.Nodes { b.WriteString(node.String()) b.WriteString("\n") } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go index 69fe210ef..ed287e687 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go @@ -4,8 +4,9 @@ import ( "os" "path/filepath" - "github.com/operator-framework/operator-registry/pkg/image" "github.com/sirupsen/logrus" + + "github.com/operator-framework/operator-registry/pkg/image" ) type ImageInput struct { diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go index 4b13ef767..24445ffe0 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go @@ -6,9 +6,10 @@ import ( "io/fs" "strings" - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ) type bundleParser struct { @@ -156,6 +157,7 @@ func (b *bundleParser) addMetadata(metadata fs.FS, bundle *Bundle) error { bundle.Package = af.Annotations.PackageName bundle.Channels = af.GetChannels() } else { + // nolint:stylecheck return fmt.Errorf("Could not find annotations file") } @@ -184,6 +186,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { return nil, fmt.Errorf("bundle missing csv") } + // nolint:prealloc var derived []Property if len(csv.GetAnnotations()) > 0 { properties, ok := csv.GetAnnotations()[PropertyKey] @@ -235,6 +238,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { // propertySet returns the deduplicated set of a property list. func propertySet(properties []Property) []Property { + // nolint:prealloc var ( set []Property visited = map[string]struct{}{} diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go index 730d27fb9..ea86a163e 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go @@ -151,6 +151,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) // globalSanityCheck should have verified this to be a head without anything replacing it // and that we have a single overwrite per package + // nolint:nestif if len(i.overwrittenImages) > 0 { if overwriter, ok := i.loader.(HeadOverwriter); ok { // Assume loader has some way to handle overwritten heads if HeadOverwriter isn't implemented explicitly @@ -180,6 +181,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) } } default: + // nolint:stylecheck return fmt.Errorf("Unsupported update mode") } @@ -195,6 +197,7 @@ var packageContextKey = "package" // ContextWithPackage adds a package value to a context. func ContextWithPackage(ctx context.Context, pkg string) context.Context { + // nolint:staticcheck return context.WithValue(ctx, packageContextKey, pkg) } @@ -262,6 +265,7 @@ func (i *DirectoryPopulator) loadManifestsSemver(bundle *Bundle, skippatch bool) } // loadOperatorBundle adds the package information to the loader's store +// nolint:unused func (i *DirectoryPopulator) loadOperatorBundle(manifest PackageManifest, bundle *Bundle) error { if manifest.PackageName == "" { return nil diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go index 0ba64c72d..947814751 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go @@ -47,7 +47,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e if err := json.Unmarshal(p.Value, &v); err != nil { return nil, nil, property.ParseError{Idx: i, Typ: p.Type, Err: err} } - k := property.GVKRequired{Group: v.Group, Kind: v.Kind, Version: v.Version} + k := property.GVKRequired(v) requiredGVKs[k] = struct{}{} case property.TypePackage: var v property.Package @@ -90,6 +90,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e } } + // nolint:prealloc var ( props []property.Property objects []string diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go index 3a5ab6293..4105aaa3d 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/blang/semver/v4" + "github.com/operator-framework/api/pkg/constraints" ) @@ -285,6 +286,7 @@ func (gd *GVKDependency) Validate() []error { func (ld *LabelDependency) Validate() []error { errs := []error{} if *ld == (LabelDependency{}) { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Label information is missing")) } return errs @@ -294,13 +296,16 @@ func (ld *LabelDependency) Validate() []error { func (pd *PackageDependency) Validate() []error { errs := []error{} if pd.PackageName == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package name is empty")) } if pd.Version == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package version is empty")) } else { _, err := semver.ParseRange(pd.Version) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid semver format version")) } } @@ -311,15 +316,18 @@ func (pd *PackageDependency) Validate() []error { func (cc *CelConstraint) Validate() []error { errs := []error{} if cc.Cel == nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL field is missing")) } else { if cc.Cel.Rule == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL expression is missing")) return errs } validator := constraints.NewCelEnvironment() _, err := validator.Validate(cc.Cel.Rule) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid CEL expression: %s", err.Error())) } } @@ -328,6 +336,7 @@ func (cc *CelConstraint) Validate() []error { // GetDependencies returns the list of dependency func (d *DependenciesFile) GetDependencies() []*Dependency { + // nolint:prealloc var dependencies []*Dependency for _, item := range d.Dependencies { dep := item diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go index 44e2302cc..a1ce927f8 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +47,7 @@ func NewSQLLoaderForConfigMapData(logger *logrus.Entry, store registry.Load, con } } -func NewSQLLoaderForConfigMap(store registry.Load, configMap v1.ConfigMap) *ConfigMapLoader { +func NewSQLLoaderForConfigMap(store registry.Load, configMap corev1.ConfigMap) *ConfigMapLoader { logger := logrus.WithFields(logrus.Fields{"configmap": configMap.GetName(), "ns": configMap.GetNamespace()}) return &ConfigMapLoader{ log: logger, @@ -66,14 +66,14 @@ func (c *ConfigMapLoader) Populate() error { return fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCRDName) } - crdListJson, err := yaml.YAMLToJSON([]byte(crdListYaml)) + crdListJSON, err := yaml.YAMLToJSON([]byte(crdListYaml)) if err != nil { c.log.WithError(err).Debug("error loading CRD list") return err } var parsedCRDList []v1beta1.CustomResourceDefinition - if err := json.Unmarshal(crdListJson, &parsedCRDList); err != nil { + if err := json.Unmarshal(crdListJSON, &parsedCRDList); err != nil { c.log.WithError(err).Debug("error parsing CRD list") return err } @@ -106,14 +106,14 @@ func (c *ConfigMapLoader) Populate() error { errs = append(errs, fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCSVName)) return utilerrors.NewAggregate(errs) } - csvListJson, err := yaml.YAMLToJSON([]byte(csvListYaml)) + csvListJSON, err := yaml.YAMLToJSON([]byte(csvListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading CSV list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedCSVList []registry.ClusterServiceVersion - err = json.Unmarshal(csvListJson, &parsedCSVList) + err = json.Unmarshal(csvListJSON, &parsedCSVList) if err != nil { errs = append(errs, fmt.Errorf("error parsing CSV list: %s", err)) return utilerrors.NewAggregate(errs) @@ -164,14 +164,14 @@ func (c *ConfigMapLoader) Populate() error { return utilerrors.NewAggregate(errs) } - packageListJson, err := yaml.YAMLToJSON([]byte(packageListYaml)) + packageListJSON, err := yaml.YAMLToJSON([]byte(packageListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading package list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedPackageManifests []registry.PackageManifest - err = json.Unmarshal(packageListJson, &parsedPackageManifests) + err = json.Unmarshal(packageListJSON, &parsedPackageManifests) if err != nil { errs = append(errs, fmt.Errorf("error parsing package list: %s", err)) return utilerrors.NewAggregate(errs) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go index ff1da4c48..47d2257f7 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go @@ -7,9 +7,10 @@ import ( "fmt" "strings" - "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/sirupsen/logrus" + "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/operator-framework/operator-registry/alpha/model" "github.com/operator-framework/operator-registry/pkg/api" "github.com/operator-framework/operator-registry/pkg/registry" @@ -39,6 +40,7 @@ func initializeModelPackages(ctx context.Context, q *SQLQuerier) (model.Model, e return nil, err } + // nolint:prealloc var rPkgs []registry.PackageManifest for _, pkgName := range pkgNames { rPkg, err := q.GetPackage(ctx, pkgName) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go index e09bfbc03..5d43615f1 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go @@ -4,12 +4,14 @@ import ( "database/sql" ) +// nolint:stylecheck type DbOptions struct { // MigratorBuilder is a function that returns a migrator instance MigratorBuilder func(*sql.DB) (Migrator, error) EnableAlpha bool } +// nolint:stylecheck type DbOption func(*DbOptions) func defaultDBOptions() *DbOptions { diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go index 4ac3d61eb..80e11fc91 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go @@ -72,6 +72,7 @@ func (d *PackageDeprecator) MaybeRemovePackages() error { var errs []error var removedBundlePaths []string + // nolint:prealloc var remainingBundlePaths []string // Iterate over bundles list - see if any bundle is the head of a default channel in a package diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go index 20a1389b7..a0b4bc75f 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go @@ -10,7 +10,7 @@ const noticeColor = "\033[1;33m%s\033[0m" func LogSqliteDeprecation() { log := logrus.New() - log.Warnf(DeprecationMessage) + log.Warn(DeprecationMessage) } var DeprecationMessage = fmt.Sprintf(noticeColor, `DEPRECATION NOTICE: diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go index 2ed0c595e..a334ff693 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go @@ -54,7 +54,9 @@ func (d *DirectoryLoader) Populate() error { // collectWalkErrs calls the given walk func and appends any non-nil, non skip dir error returned to the given errors slice. func collectWalkErrs(walk filepath.WalkFunc, errs *[]error) filepath.WalkFunc { - return func(path string, f os.FileInfo, err error) (walkErr error) { + return func(path string, f os.FileInfo, err error) error { + var walkErr error + // nolint: errorlint if walkErr = walk(path, f, err); walkErr != nil && walkErr != filepath.SkipDir { *errs = append(*errs, walkErr) return nil @@ -67,7 +69,7 @@ func collectWalkErrs(walk filepath.WalkFunc, errs *[]error) filepath.WalkFunc { // LoadBundleWalkFunc walks the directory. When it sees a `.clusterserviceversion.yaml` file, it // attempts to load the surrounding files in the same directory as a bundle, and stores them in the // db for querying -func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, err error) error { +func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, _ error) error { if f == nil { return fmt.Errorf("invalid file: %v", f) } @@ -131,7 +133,7 @@ func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, err err // LoadPackagesWalkFunc attempts to unmarshal the file at the given path into a PackageManifest resource. // If unmarshaling is successful, the PackageManifest is added to the loader's store. -func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, err error) error { +func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, _ error) error { if f == nil { return fmt.Errorf("invalid file: %v", f) } @@ -163,7 +165,6 @@ func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, err e if err != nil { return fmt.Errorf("could not decode contents of file %s into package: %s", path, err) } - } if manifest.PackageName == "" { return nil diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go index f8a5a1350..9592b5f54 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go @@ -69,7 +69,7 @@ func (s *sqlLoader) AddOperatorBundle(bundle *registry.Bundle) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -123,6 +123,7 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } if substitutesFor != "" && !s.enableAlpha { + // nolint:stylecheck return fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.") } @@ -162,7 +163,6 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error { - updateBundleReplaces, err := tx.Prepare("update operatorbundle set replaces = ? where replaces = ?") if err != nil { return err @@ -205,6 +205,7 @@ func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error if err != nil { return fmt.Errorf("failed to obtain substitutes : %s", err) } + // nolint:nestif if substitutesFor != "" { // Update any replaces that reference the substituted-for bundle _, err = updateBundleReplaces.Exec(csvName, substitutesFor) @@ -407,7 +408,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() var errs []error @@ -507,6 +508,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { // If the number of nodes is 5 and the startDepth is 3, the expected depth is 7 (3, 4, 5, 6, 7) expectedDepth := len(channel.Nodes) + startDepth - 1 if expectedDepth != depth { + // nolint:stylecheck err := fmt.Errorf("Invalid graph: some (non-bottom) nodes defined in the graph were not mentioned as replacements of any node (%d != %d)", expectedDepth, depth) errs = append(errs, err) } @@ -533,7 +535,7 @@ func (s *sqlLoader) AddPackageChannels(manifest registry.PackageManifest) error return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmPackage(tx, manifest.PackageName); err != nil { @@ -591,6 +593,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani return fmt.Errorf("failed to add package %q: %s", manifest.PackageName, err.Error()) } + // nolint:prealloc var ( errs []error channels []registry.PackageChannel @@ -717,6 +720,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani // If we find 'replaces' in the circuit list then we've seen it already, break out if _, ok := replaceCycle[replaces]; ok { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Cycle detected, %s replaces %s", channelEntryCSVName, replaces)) break } @@ -732,6 +736,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani break } if _, _, _, err := s.getBundleSkipsReplacesVersion(tx, replaces); err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid bundle %s, replaces nonexistent bundle %s", c.CurrentCSVName, replaces)) break } @@ -750,7 +755,7 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() removeNonHeadBundles, err := tx.Prepare(` @@ -773,34 +778,37 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return tx.Commit() } -func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (replaces string, skips []string, version string, err error) { +func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (string, []string, string, error) { getReplacesSkipsAndVersions, err := tx.Prepare(` SELECT replaces, skips, version FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", nil, "", err } defer getReplacesSkipsAndVersions.Close() rows, rerr := getReplacesSkipsAndVersions.Query(bundleName) if err != nil { err = rerr - return + return "", nil, "", err } defer rows.Close() if !rows.Next() { err = fmt.Errorf("no bundle found for bundlename %s", bundleName) - return + return "", nil, "", err } var replacesStringSQL sql.NullString var skipsStringSQL sql.NullString var versionStringSQL sql.NullString if err = rows.Scan(&replacesStringSQL, &skipsStringSQL, &versionStringSQL); err != nil { - return + return "", nil, "", err } + var replaces string + var skips []string + var version string if replacesStringSQL.Valid { replaces = replacesStringSQL.String } @@ -811,40 +819,41 @@ func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) version = versionStringSQL.String } - return + return replaces, skips, version, nil } -func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (bundlePath string, err error) { +func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (string, error) { getBundlePath, err := tx.Prepare(` SELECT bundlepath FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", err } defer getBundlePath.Close() rows, rerr := getBundlePath.Query(bundleName) if err != nil { err = rerr - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set - return + return "", nil } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } func (s *sqlLoader) addAPIs(tx *sql.Tx, bundle *registry.Bundle) error { @@ -950,7 +959,7 @@ func (s *sqlLoader) RemovePackage(packageName string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() csvNames, err := s.getCSVNames(tx, packageName) @@ -1059,7 +1068,7 @@ func (s *sqlLoader) AddBundlePackageChannels(manifest registry.PackageManifest, return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -1343,7 +1352,7 @@ type tailBundle struct { replacedBy []string // to handle any chain where a skipped entry may be a part of another channel that should not be truncated } -func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, err error) { +func getTailFromBundle(tx *sql.Tx, head string) (map[string]tailBundle, error) { // traverse replaces chain and collect channel list for each bundle. // This assumes that replaces chain for a bundle is the same across channels. // only real bundles with entries in the operator_bundle table are returned. @@ -1392,7 +1401,7 @@ func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, return nil, fmt.Errorf("could not find default channel head for %s", head) } var defaultChannelHead sql.NullString - err = row.Scan(&defaultChannelHead) + err := row.Scan(&defaultChannelHead) if err != nil { return nil, fmt.Errorf("error getting default channel head for %s: %v", head, err) } @@ -1481,7 +1490,7 @@ func (s *sqlLoader) DeprecateBundle(path string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() name, version, err := getBundleNameAndVersionForImage(tx, path) @@ -1550,7 +1559,6 @@ deprecate: if err := s.rmBundle(tx, bundle); err != nil { return err } - } // remove links to deprecated/truncated bundles to avoid regenerating these on add/overwrite _, err = tx.Exec(`UPDATE channel_entry SET replaces=NULL WHERE operatorbundle_name=?`, name) @@ -1592,7 +1600,7 @@ func (s *sqlLoader) RemoveStrandedBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmStrandedBundles(tx); err != nil { @@ -1742,7 +1750,7 @@ func (d *DeprecationAwareLoader) clearLastDeprecatedInPackage(pkg string) error return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // The last deprecated bundles for a package will still have "tombstone" records in channel_entry (among other tables). @@ -1770,7 +1778,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // check if bundle has anything that replaces it getBundlesThatReplaceHeadQuery := `SELECT DISTINCT operatorbundle.name AS replaces, channel_entry.channel_name @@ -1795,6 +1803,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } // This is not a head bundle for all channels it is a member of. Cannot remove + // nolint: staticcheck return fmt.Errorf("cannot overwrite bundle %s from package %s: replaced by %s on channel %s", bundle, pkg, replaces.String, channel.String) } } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go index 0196064d6..218f2cda1 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go @@ -41,6 +41,7 @@ func addReplaces(tx *sql.Tx, replacesID, entryID int64) error { return nil } +// nolint:unused func addPackage(tx *sql.Tx, packageName string) error { addPackage, err := tx.Prepare("insert into package(name) values(?)") if err != nil { @@ -71,6 +72,7 @@ func addPackageIfNotExists(tx *sql.Tx, packageName string) error { return nil } +// nolint:unused func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { addChannel, err := tx.Prepare("insert into channel(name, package_name, head_operatorbundle_name) values(?, ?, ?)") if err != nil { @@ -86,6 +88,7 @@ func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error return nil } +// nolint:unused func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { updateChannel, err := tx.Prepare("update channel set head_operatorbundle_name = ? where name = ? and package_name = ?") if err != nil { @@ -96,7 +99,6 @@ func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) err _, err = updateChannel.Exec(channelName, packageName, headCsvName) if err != nil { return fmt.Errorf("failed to update channel (%s) for package (%s) with head (%s) : %s", channelName, packageName, headCsvName, err) - } return nil diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go index 3b3c8c36b..e4511bfb2 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go @@ -45,25 +45,25 @@ func getCSV(ctx context.Context, tx *sql.Tx, name string) (*registry.ClusterServ return nil, err } - var csvJson sql.NullString + var csvJSON sql.NullString if !rows.Next() { return nil, fmt.Errorf("bundle %s not found", name) } - if err := rows.Scan(&csvJson); err != nil { + if err := rows.Scan(&csvJSON); err != nil { return nil, err } - if !csvJson.Valid { + if !csvJSON.Valid { return nil, fmt.Errorf("bad value for csv") } csv := ®istry.ClusterServiceVersion{} - if err := json.Unmarshal([]byte(csvJson.String), csv); err != nil { + if err := json.Unmarshal([]byte(csvJSON.String), csv); err != nil { return nil, err } return csv, nil } func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into related_image(image, operatorbundle_name) values(?,?)` + addSQL := `insert into related_image(image, operatorbundle_name) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling related images: %v", err) @@ -83,7 +83,7 @@ func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { images[k] = struct{}{} } for img := range images { - if _, err := tx.ExecContext(ctx, addSql, img, name); err != nil { + if _, err := tx.ExecContext(ctx, addSQL, img, name); err != nil { logrus.Warnf("error backfilling related images: %v", err) continue } @@ -101,7 +101,7 @@ var relatedImagesMigration = &Migration{ FOREIGN KEY(operatorbundle_name) REFERENCES operatorbundle(name) ); ` - _, err := tx.ExecContext(ctx, sql) + _, _ = tx.ExecContext(ctx, sql) bundles, err := listBundles(ctx, tx) if err != nil { diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go index 0253c5119..f25d285ab 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go @@ -9,14 +9,15 @@ import ( "github.com/sirupsen/logrus" ) +// nolint:stylecheck const RequiredApiMigrationKey = 3 // Register this migration func init() { - registerMigration(RequiredApiMigrationKey, requiredApiMigration) + registerMigration(RequiredApiMigrationKey, requiredAPIMigration) } -var requiredApiMigration = &Migration{ +var requiredAPIMigration = &Migration{ Id: RequiredApiMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { sql := ` @@ -37,8 +38,8 @@ var requiredApiMigration = &Migration{ if err != nil { return err } - for entryId, bundle := range bundles { - if err := extractRequiredApis(ctx, tx, entryId, bundle); err != nil { + for entryID, bundle := range bundles { + if err := extractRequiredApis(ctx, tx, entryID, bundle); err != nil { logrus.Warnf("error backfilling required apis: %v", err) continue } @@ -67,20 +68,20 @@ func getChannelEntryBundles(ctx context.Context, tx *sql.Tx) (map[int64]string, entries := map[int64]string{} for rows.Next() { - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString - if err = rows.Scan(&entryId, &name); err != nil { + if err = rows.Scan(&entryID, &name); err != nil { return nil, err } - if !entryId.Valid || !name.Valid { + if !entryID.Valid || !name.Valid { continue } - entries[entryId.Int64] = name.String + entries[entryID.Int64] = name.String } return entries, nil } -func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name string) error { +func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryID int64, name string) error { addAPI, err := tx.Prepare("insert or replace into api(group_name, version, kind, plural) values(?, ?, ?, ?)") if err != nil { return err @@ -91,12 +92,12 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st } }() - addApiRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") + addAPIRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") if err != nil { return err } defer func() { - if err := addApiRequirer.Close(); err != nil { + if err := addAPIRequirer.Close(); err != nil { logrus.WithError(err).Warningf("error closing prepared statement") } }() @@ -107,7 +108,7 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return err } - _, requiredCRDs, err := csv.GetCustomResourceDefintions() + _, requiredCRDs, _ := csv.GetCustomResourceDefintions() for _, crd := range requiredCRDs { plural, group, err := SplitCRDName(crd.Name) if err != nil { @@ -116,17 +117,17 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st if _, err := addAPI.Exec(group, crd.Version, crd.Kind, plural); err != nil { return err } - if _, err := addApiRequirer.Exec(group, crd.Version, crd.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(group, crd.Version, crd.Kind, entryID); err != nil { return err } } - _, requiredAPIs, err := csv.GetApiServiceDefinitions() + _, requiredAPIs, _ := csv.GetApiServiceDefinitions() for _, api := range requiredAPIs { if _, err := addAPI.Exec(api.Group, api.Version, api.Kind, api.Name); err != nil { return err } - if _, err := addApiRequirer.Exec(api.Group, api.Version, api.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(api.Group, api.Version, api.Kind, entryID); err != nil { return err } } @@ -134,14 +135,13 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return nil } -func SplitCRDName(crdName string) (plural, group string, err error) { +func SplitCRDName(crdName string) (string, string, error) { + var err error pluralGroup := strings.SplitN(crdName, ".", 2) if len(pluralGroup) != 2 { err = fmt.Errorf("can't split bad CRD name %s", crdName) - return + return "", "", err } - plural = pluralGroup[0] - group = pluralGroup[1] - return + return pluralGroup[0], pluralGroup[1], nil } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go index 60b3c87ad..6a825debc 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go @@ -75,7 +75,7 @@ var versionSkipRangeMigration = &Migration{ } func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into operatorbundle(version, skiprange) values(?,?)` + addSQL := `insert into operatorbundle(version, skiprange) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling versioning: %v", err) @@ -89,6 +89,6 @@ func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { version = "" } - _, err = tx.ExecContext(ctx, addSql, version, skiprange) + _, err = tx.ExecContext(ctx, addSQL, version, skiprange) return err } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go index f70436f1d..0e57e67fc 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go @@ -11,7 +11,7 @@ const AssociateApisWithBundleMigrationKey = 6 // Register this migration func init() { - registerMigration(AssociateApisWithBundleMigrationKey, bundleApiMigration) + registerMigration(AssociateApisWithBundleMigrationKey, bundleAPIMigration) } // This migration moves the link between the provided and required apis table from the channel_entry to the @@ -24,7 +24,7 @@ func init() { // api_provider: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), // api_requirer: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), -var bundleApiMigration = &Migration{ +var bundleAPIMigration = &Migration{ Id: AssociateApisWithBundleMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { createNew := ` diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go index 7825e89fe..2340634be 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go @@ -97,12 +97,12 @@ func extractReplaces(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { return err } - updateSql := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` - _, err = tx.ExecContext(ctx, updateSql, replaces, strings.Join(skips, ","), name) + updateSQL := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` + _, err = tx.ExecContext(ctx, updateSQL, replaces, strings.Join(skips, ","), name) return err } -func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces string, skips []string, err error) { +func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (string, []string, error) { getReplacees := ` SELECT DISTINCT replaces.operatorbundle_name FROM channel_entry @@ -117,26 +117,28 @@ func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces } defer rows.Close() + var replaces string if rows.Next() { var replaceeName sql.NullString if err = rows.Scan(&replaceeName); err != nil { - return + return "", nil, err } if replaceeName.Valid { replaces = replaceeName.String } } + var skips []string skips = []string{} for rows.Next() { var skipName sql.NullString if err = rows.Scan(&skipName); err != nil { - return + return "", nil, err } if !skipName.Valid { continue } skips = append(skips, skipName.String) } - return + return replaces, skips, nil } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go index 046675611..252ad99ec 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go @@ -75,12 +75,12 @@ var propertiesMigration = &Migration{ } // update the serialized value to omit the dependency type - updateDependencySql := ` + updateDependencySQL := ` UPDATE dependencies SET value = (SELECT json_remove(value, "$.type") FROM dependencies WHERE operatorbundle_name=dependencies.operatorbundle_name)` - _, err = tx.ExecContext(ctx, updateDependencySql) + _, err = tx.ExecContext(ctx, updateDependencySQL) if err != nil { return err } @@ -111,6 +111,7 @@ func getPackageForBundle(ctx context.Context, name string, tx *sql.Tx) (string, if !pkg.Valid { return "", err } + // nolint: staticcheck return pkg.String, nil } return "", err diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go index bee961621..d488775b0 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go @@ -15,12 +15,12 @@ func init() { var bundlePathPkgPropertyMigration = &Migration{ Id: BundlePathPkgMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = (SELECT bundlepath FROM operatorbundle WHERE operatorbundle_name = operatorbundle.name AND operatorbundle_version = operatorbundle.version)` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } @@ -28,11 +28,11 @@ var bundlePathPkgPropertyMigration = &Migration{ return nil }, Down: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = null WHERE type = "olm.package"` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go index 760b381ff..e99480d58 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go @@ -29,6 +29,7 @@ var deprecatedMigration = &Migration{ return err } + // nolint: gosec initDeprecated := fmt.Sprintf(`INSERT OR REPLACE INTO deprecated(operatorbundle_name) SELECT operatorbundle_name FROM properties WHERE properties.type='%s'`, registry.DeprecatedType) _, err := tx.ExecContext(ctx, initDeprecated) diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go index b9bb60fba..475bb7cd6 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go @@ -8,6 +8,7 @@ import ( ) type Migration struct { + // nolint:stylecheck Id int Up func(context.Context, *sql.Tx) error Down func(context.Context, *sql.Tx) error diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go index 82bacc834..9f1438ab5 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go @@ -3,6 +3,7 @@ package sqlite import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -86,12 +87,12 @@ func (m *SQLLiteMigrator) Up(ctx context.Context, migrations migrations.Migratio } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version+1 { + if migration.Id != currentVersion+1 { return fmt.Errorf("migration applied out of order") } @@ -127,12 +128,12 @@ func (m *SQLLiteMigrator) Down(ctx context.Context, migrations migrations.Migrat } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version { + if migration.Id != currentVersion { return fmt.Errorf("migration applied out of order") } @@ -175,7 +176,7 @@ func (m *SQLLiteMigrator) tableExists(tx *sql.Tx, table string) (bool, error) { return exists, nil } -func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, err error) { +func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (int, error) { tableExists, err := m.tableExists(tx, m.migrationsTable) if err != nil { return NilVersion, err @@ -185,9 +186,10 @@ func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, } query := `SELECT version FROM ` + m.migrationsTable + ` LIMIT 1` + var version int err = tx.QueryRowContext(ctx, query).Scan(&version) switch { - case err == sql.ErrNoRows: + case errors.Is(err, sql.ErrNoRows): return NilVersion, nil case err != nil: return NilVersion, err @@ -200,10 +202,12 @@ func (m *SQLLiteMigrator) setVersion(ctx context.Context, tx *sql.Tx, version in if err := m.ensureMigrationTable(ctx, tx); err != nil { return err } + // nolint: gosec _, err := tx.ExecContext(ctx, "DELETE FROM "+m.migrationsTable) if err != nil { return err } + // nolint: gosec _, err = tx.ExecContext(ctx, "INSERT INTO "+m.migrationsTable+"(version) values(?)", version) return err } diff --git a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go index 24880f1fc..7a42981f4 100644 --- a/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go +++ b/tools/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go @@ -63,6 +63,7 @@ func NewSQLLiteQuerier(dbFilename string, opts ...SQLiteQuerierOption) (*SQLQuer return NewSQLLiteQuerierFromDb(db, opts...), nil } +// nolint:stylecheck func NewSQLLiteQuerierFromDb(db *sql.DB, opts ...SQLiteQuerierOption) *SQLQuerier { return NewSQLLiteQuerierFromDBQuerier(dbQuerierAdapter{db}, opts...) } @@ -241,13 +242,13 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s %s", pkgName, channelName, csvName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -265,7 +266,7 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -315,18 +316,18 @@ WHERE channel.name = :channel AND channel.package_name = :package` }, nil } -func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name FROM channel_entry LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id WHERE replaces.operatorbundle_name = ?` rows, err := s.db.QueryContext(ctx, query, name) if err != nil { - return + return nil, err } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString @@ -334,7 +335,7 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri var bundleNameSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ PackageName: pkgNameSQL.String, @@ -345,9 +346,9 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that replace %s", name) - return + return nil, err } - return + return entries, nil } func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, channelName string) (*api.Bundle, error) { @@ -365,13 +366,13 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var outName sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -389,7 +390,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -411,7 +412,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c return out, nil } -func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*registry.ChannelEntry, error) { // TODO: join on full fk, not just operatorbundlename query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name FROM channel_entry @@ -433,7 +434,7 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString @@ -441,7 +442,7 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve var bundleNameSQL sql.NullString var replacesSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ @@ -453,13 +454,13 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind) - return + return nil, err } - return + return entries, nil } // Get latest channel entries that provide an api -func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name, MIN(channel_entry.depth) FROM channel_entry INNER JOIN properties ON channel_entry.operatorbundle_name = properties.operatorbundle_name @@ -482,15 +483,15 @@ func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, gro } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString var channelNameSQL sql.NullString var bundleNameSQL sql.NullString var replacesSQL sql.NullString - var min_depth sql.NullInt64 - if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &min_depth); err != nil { + var minDepth sql.NullInt64 + if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &minDepth); err != nil { return nil, err } @@ -518,7 +519,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio WHERE properties.type = ? AND properties.value = ? AND package.default_channel = channel_entry.channel_name GROUP BY channel_entry.package_name, channel_entry.channel_name` - value, err := json.Marshal(map[string]string{ + value, _ := json.Marshal(map[string]string{ "group": group, "version": apiVersion, "kind": kind, @@ -532,17 +533,17 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio if !rows.Next() { return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var bundle sql.NullString var bundlePath sql.NullString - var min_depth sql.NullInt64 + var minDepth sql.NullInt64 var bundleName sql.NullString var pkgName sql.NullString var channelName sql.NullString var replaces sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &bundle, &bundlePath, &min_depth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &bundle, &bundlePath, &minDepth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { return nil, err } @@ -564,7 +565,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -627,7 +628,7 @@ func (s *SQLQuerier) GetImagesForBundle(ctx context.Context, csvName string) ([] return images, nil } -func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { groups := map[string]struct{}{} kinds := map[string]struct{}{} versions := map[string]struct{}{} @@ -642,7 +643,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer providedRows.Close() - provided = []*api.GroupVersionKind{} + var provided []*api.GroupVersionKind for providedRows.Next() { var value sql.NullString @@ -678,7 +679,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer requiredRows.Close() - required = []*api.GroupVersionKind{} + var required []*api.GroupVersionKind for requiredRows.Next() { var value sql.NullString @@ -770,7 +771,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } required[i].Plural = plural } - return + return provided, required, nil } func (s *SQLQuerier) GetBundleVersion(ctx context.Context, image string) (string, error) { @@ -809,6 +810,7 @@ func (s *SQLQuerier) GetBundlePathsForPackage(ctx context.Context, pkgName strin return nil, err } if imgName.Valid && imgName.String == "" { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find paths to bundle images") } images = append(images, imgName.String) @@ -844,6 +846,7 @@ func (s *SQLQuerier) GetBundlesForPackage(ctx context.Context, pkgName string) ( key.Version = version.String } if key.IsEmpty() { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find identifier for bundle in package %s", pkgName) } bundles[key] = struct{}{} @@ -1047,7 +1050,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) + _ = buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) out.Dependencies = uniqueDeps(out.Dependencies) if props.Valid { @@ -1055,7 +1058,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) + _ = buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) out.Properties = uniqueProps(out.Properties) if err := stream.Send(out); err != nil { return err @@ -1079,7 +1082,6 @@ func (s *SQLQuerier) ListBundles(ctx context.Context) ([]*api.Bundle, error) { return nil, err } return bundleSender, nil - } func buildLegacyRequiredAPIs(src []*api.Dependency, dst *[]*api.GroupVersionKind) error { @@ -1150,7 +1152,7 @@ func uniqueProps(props []*api.Property) []*api.Property { return list } -func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { depQuery := `SELECT DISTINCT type, value FROM dependencies WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1162,7 +1164,7 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version } defer rows.Close() - dependencies = []*api.Dependency{} + var dependencies []*api.Dependency for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1179,10 +1181,10 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version }) } - return + return dependencies, nil } -func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) (properties []*api.Property, err error) { +func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) ([]*api.Property, error) { propQuery := `SELECT DISTINCT type, value FROM properties WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1194,7 +1196,7 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, } defer rows.Close() - properties = []*api.Property{} + var properties []*api.Property for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1211,10 +1213,10 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, }) } - return + return properties, nil } -func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (bundlePath string, err error) { +func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (string, error) { getBundlePathQuery := ` SELECT bundlepath FROM operatorbundle @@ -1222,26 +1224,27 @@ func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName strin rows, err := s.db.QueryContext(ctx, getBundlePathQuery, bundleName) if err != nil { - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set err = registry.ErrBundleImageNotInDatabase - return + return "", err } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } // ListRegistryBundles returns a set of registry bundles. diff --git a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/api.go b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/api.go index 696d7bcca..d38f795ec 100644 --- a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/api.go +++ b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/api.go @@ -108,6 +108,9 @@ func (p *createAPISubcommand) UpdateMetadata(cliMeta plugin.CLIMetadata, subcmdM $ %[1]s create api \ --helm-chart=/path/to/local/chart-archives/app-1.2.3.tgz + + $ %[1]s create api \ + --helm-chart=oci://charts.mycompany.com/example-namespace/app:1.2.3 `, cliMeta.CommandName) } diff --git a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/chartutil/chart.go b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/chartutil/chart.go index 5587c2175..59a3b8baf 100644 --- a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/chartutil/chart.go +++ b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/chartutil/chart.go @@ -27,6 +27,7 @@ import ( "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/downloader" "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" "helm.sh/helm/v3/pkg/repo" ) @@ -126,11 +127,19 @@ func LoadChart(opts Options) (*chart.Chart, error) { func downloadChart(destDir string, opts Options) (string, error) { settings := cli.New() getters := getter.All(settings) + + // Create registry client for OCI registry support + registryClient, err := registry.NewClient() + if err != nil { + return "", fmt.Errorf("failed to create registry client: %w", err) + } + c := downloader.ChartDownloader{ Out: os.Stderr, Getters: getters, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + RegistryClient: registryClient, } if opts.Repo != "" { @@ -182,6 +191,12 @@ func fetchChartDependencies(chartPath string) error { settings := cli.New() getters := getter.All(settings) + // Create registry client for OCI registry support + registryClient, err := registry.NewClient() + if err != nil { + return fmt.Errorf("failed to create registry client: %w", err) + } + out := &bytes.Buffer{} man := &downloader.Manager{ Out: out, @@ -189,6 +204,7 @@ func fetchChartDependencies(chartPath string) error { Getters: getters, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + RegistryClient: registryClient, } if err := man.Build(); err != nil { fmt.Println(out.String()) diff --git a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/scaffolds/internal/templates/makefile.go b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/scaffolds/internal/templates/makefile.go index 342e9b941..065625038 100644 --- a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/scaffolds/internal/templates/makefile.go +++ b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/helm/v1/scaffolds/internal/templates/makefile.go @@ -65,6 +65,9 @@ func (f *Makefile) SetTemplateDefaults() error { } const makefileTemplate = ` +# Container tool to use for building and pushing images +CONTAINER_TOOL ?= docker + # Image URL to use all building/pushing image targets IMG ?= {{ .Image }} @@ -96,11 +99,11 @@ run: helm-operator ## Run against the configured Kubernetes cluster in ~/.kube/c .PHONY: docker-build docker-build: ## Build docker image with the manager. - docker build -t ${IMG} . + $(CONTAINER_TOOL) build -t ${IMG} . .PHONY: docker-push docker-push: ## Push docker image with the manager. - docker push ${IMG} + $(CONTAINER_TOOL) push ${IMG} # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: @@ -111,10 +114,10 @@ docker-push: ## Push docker image with the manager. PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le .PHONY: docker-buildx docker-buildx: ## Build and push docker image for the manager for cross-platform support - - docker buildx create --name project-v3-builder - docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . - - docker buildx rm project-v3-builder + - $(CONTAINER_TOOL) buildx create --name project-v3-builder + $(CONTAINER_TOOL) buildx use project-v3-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . + - $(CONTAINER_TOOL) buildx rm project-v3-builder ##@ Deployment diff --git a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/manifests/v2/init.go b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/manifests/v2/init.go index 9d1708cf5..81cec65e1 100644 --- a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/manifests/v2/init.go +++ b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/manifests/v2/init.go @@ -21,6 +21,8 @@ import ( "path/filepath" "strings" + "sigs.k8s.io/kubebuilder/v4/pkg/plugin/util" + "github.com/spf13/afero" "sigs.k8s.io/kubebuilder/v4/pkg/config" "sigs.k8s.io/kubebuilder/v4/pkg/machinery" @@ -33,7 +35,7 @@ import ( // Version of `opm` to download and use for building index images. // This version's release artifacts *must* contain a binary for multiple arches; certain releases do not. -const opmVersion = "v1.23.0" +const opmVersion = "v1.55.0" const filePath = "Makefile" @@ -125,6 +127,15 @@ func (s *initSubcommand) Scaffold(fs machinery.Filesystem) error { return err } + // TODO: remove this when we bump kubebuilder to v5.x + // Not adopt changes introduced by mistake in the default Makefile of kubebuilder v4.x. + if operatorType == projutil.OperatorTypeGo { + err = util.ReplaceInFile("Makefile", "$(KIND) create cluster --name $(KIND_CLUSTER)", makefileTestFix) + if err != nil { + return fmt.Errorf("error replacing Makefile: %w", err) + } + } + return nil } @@ -230,6 +241,7 @@ bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metada ` makefileBundleFragmentNonGo = ` + .PHONY: bundle bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. $(OPERATOR_SDK) generate kustomize manifests -q @@ -241,7 +253,7 @@ bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then v makefileBundleBuildPushFragment = ` .PHONY: bundle-build bundle-build: ## Build the bundle image. - docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + $(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . .PHONY: bundle-push bundle-push: ## Push the bundle image. @@ -303,11 +315,22 @@ endif # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator .PHONY: catalog-build catalog-build: opm ## Build a catalog image. - $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) # Push the catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) ` + + // TODO: remove it when we bump kubebuilder to v4.x + // We will not adopt this change since it did not work and was a bug introduced in the + // default Makefile of kubebuilder v4.x. + makefileTestFix = `@case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac` ) diff --git a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/util/cleanup.go b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/util/cleanup.go index 536a2804d..87a95f2c8 100644 --- a/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/util/cleanup.go +++ b/tools/vendor/github.com/operator-framework/operator-sdk/internal/plugins/util/cleanup.go @@ -72,116 +72,227 @@ func UpdateKustomizationsInit() error { return fmt.Errorf("remove %s resources: %v", defaultKFile, err) } + if err := kbutil.ReplaceInFile(defaultKFile, + `# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment`, ""); err != nil { + return fmt.Errorf("remove %s resources: %v", defaultKFile, err) + } + + // Remove the file not used for Helm projects since we do not scaffold the cert-manager + certPatchPath := filepath.Join("config", "default", "cert_metrics_manager_patch.yaml") + if err := os.Remove(certPatchPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove unused file %s: %v", certPatchPath, err) + } + + // Remove the file not used for Helm projects since we do not scaffold the cert-manager + monitorTLSPatchPath := filepath.Join("config", "prometheus", "monitor_tls_patch.yaml") + if err := os.Remove(monitorTLSPatchPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove unused file %s: %v", monitorTLSPatchPath, err) + } + + prometheusKustomize := filepath.Join("config", "prometheus", "kustomization.yaml") + if err := kbutil.ReplaceInFile(prometheusKustomize, + `# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor`, ""); err != nil { + return fmt.Errorf("remove %s resources: %v", defaultKFile, err) + } + if err := kbutil.ReplaceInFile(defaultKFile, ` # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- path: manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -#- path: webhookcainjection_patch.yaml +# target: +# kind: Deployment # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. # Uncomment the following replacements to add the cert-manager CA injection annotations #replacements: -# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.namespace # namespace of the certificate CR -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 0 -# create: true -# - source: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldPath: .metadata.name -# targets: -# - select: -# kind: ValidatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: MutatingWebhookConfiguration -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - select: -# kind: CustomResourceDefinition -# fieldPaths: -# - .metadata.annotations.[cert-manager.io/inject-ca-from] -# options: -# delimiter: '/' -# index: 1 -# create: true -# - source: # Add cert-manager annotation to the webhook Service -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.name # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 0 -# create: true -# - source: -# kind: Service -# version: v1 -# name: webhook-service -# fieldPath: .metadata.namespace # namespace of the service -# targets: -# - select: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# fieldPaths: -# - .spec.dnsNames.0 -# - .spec.dnsNames.1 -# options: -# delimiter: '.' -# index: 1 -# create: true +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true +# +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname `, ""); err != nil { return fmt.Errorf("remove %s patch and vars blocks: %v", defaultKFile, err) } diff --git a/tools/vendor/github.com/otiai10/copy/README.md b/tools/vendor/github.com/otiai10/copy/README.md index 7c8b4e05a..9f3dce74d 100644 --- a/tools/vendor/github.com/otiai10/copy/README.md +++ b/tools/vendor/github.com/otiai10/copy/README.md @@ -10,6 +10,8 @@ [![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/otiai10/copy?sort=semver)](https://pkg.go.dev/github.com/otiai10/copy) [![Docker Test](https://github.com/otiai10/copy/actions/workflows/docker-test.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/docker-test.yml) [![Vagrant Test](https://github.com/otiai10/copy/actions/workflows/vagrant-test.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/vagrant-test.yml) +[![GopherJS](https://github.com/otiai10/copy/actions/workflows/gopherjs.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/gopherjs.yml) +[![Go WASM](https://github.com/otiai10/copy/actions/workflows/wasm.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/wasm.yml) `copy` copies directories recursively. @@ -47,6 +49,10 @@ type Options struct { // Skip can specify which files should be skipped Skip func(srcinfo os.FileInfo, src, dest string) (bool, error) + // RenameDestination can rename destination. + // If not set, nil, it does nothing. + RenameDestination func(src, dest string) (string, error) + // PermissionControl can control permission of // every entry. // When you want to add permission 0222, do like diff --git a/tools/vendor/github.com/otiai10/copy/copy.go b/tools/vendor/github.com/otiai10/copy/copy.go index 2979af937..f9787cd9c 100644 --- a/tools/vendor/github.com/otiai10/copy/copy.go +++ b/tools/vendor/github.com/otiai10/copy/copy.go @@ -4,7 +4,6 @@ import ( "context" "io" "io/fs" - "io/ioutil" "os" "path/filepath" "time" @@ -47,6 +46,12 @@ func switchboard(src, dest string, info os.FileInfo, opt Options) (err error) { return onError(src, dest, err, opt) } + if opt.RenameDestination != nil { + if dest, err = opt.RenameDestination(src, dest); err != nil { + return onError(src, dest, err, opt) + } + } + switch { case info.Mode()&os.ModeSymlink != 0: err = onsymlink(src, dest, opt) @@ -167,28 +172,29 @@ func dcopy(srcdir, destdir string, info os.FileInfo, opt Options) (err error) { } defer chmodfunc(&err) - var contents []os.FileInfo + var entries []fs.DirEntry if opt.FS != nil { - entries, err := fs.ReadDir(opt.FS, srcdir) + entries, err = fs.ReadDir(opt.FS, srcdir) if err != nil { return err } - for _, e := range entries { - info, err := e.Info() - if err != nil { - return err + } else { + entries, err = os.ReadDir(srcdir) + if err != nil { + if os.IsNotExist(err) { + return nil } - contents = append(contents, info) + return err } - } else { - contents, err = ioutil.ReadDir(srcdir) } - if err != nil { - if os.IsNotExist(err) { - return nil + contents := make([]fs.FileInfo, 0, len(entries)) + for _, e := range entries { + info, err := e.Info() + if err != nil { + return err } - return + contents = append(contents, info) } if yes, err := shouldCopyDirectoryConcurrent(opt, srcdir, destdir); err != nil { @@ -286,6 +292,10 @@ func onsymlink(src, dest string, opt Options) error { if err != nil { return err } + if !filepath.IsAbs(orig) { + // orig is a relative link: need to add src dir to orig + orig = filepath.Join(filepath.Dir(src), orig) + } info, err := os.Lstat(orig) if err != nil { return err @@ -301,18 +311,29 @@ func onsymlink(src, dest string, opt Options) error { // lcopy is for a symlink, // with just creating a new symlink by replicating src symlink. func lcopy(src, dest string) error { - src, err := os.Readlink(src) + orig, err := os.Readlink(src) + // @See https://github.com/otiai10/copy/issues/111 + // TODO: This might be controlled by Options in the future. if err != nil { - if os.IsNotExist(err) { - return nil + if os.IsNotExist(err) { // Copy symlink even if not existing + return os.Symlink(src, dest) } return err } - return os.Symlink(src, dest) + + // @See https://github.com/otiai10/copy/issues/132 + // TODO: Control by SymlinkExistsAction + if _, err := os.Lstat(dest); err == nil { + if err := os.Remove(dest); err != nil { + return err + } + } + + return os.Symlink(orig, dest) } // fclose ANYHOW closes file, -// with asiging error raised during Close, +// with assigning error raised during Close, // BUT respecting the error already reported. func fclose(f io.Closer, reported *error) { if err := f.Close(); *reported == nil { @@ -321,7 +342,7 @@ func fclose(f io.Closer, reported *error) { } // onError lets caller to handle errors -// occured when copying a file. +// occurred when copying a file. func onError(src, dest string, err error, opt Options) error { if opt.OnError == nil { return err diff --git a/tools/vendor/github.com/otiai10/copy/copy_namedpipes.go b/tools/vendor/github.com/otiai10/copy/copy_namedpipes.go index 615ddcd55..657fb3812 100644 --- a/tools/vendor/github.com/otiai10/copy/copy_namedpipes.go +++ b/tools/vendor/github.com/otiai10/copy/copy_namedpipes.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js -// +build !windows,!plan9,!netbsd,!aix,!illumos,!solaris,!js package copy diff --git a/tools/vendor/github.com/otiai10/copy/copy_namedpipes_x.go b/tools/vendor/github.com/otiai10/copy/copy_namedpipes_x.go index 38dd9dc72..da3d6f796 100644 --- a/tools/vendor/github.com/otiai10/copy/copy_namedpipes_x.go +++ b/tools/vendor/github.com/otiai10/copy/copy_namedpipes_x.go @@ -1,5 +1,4 @@ //go:build windows || plan9 || netbsd || aix || illumos || solaris || js -// +build windows plan9 netbsd aix illumos solaris js package copy diff --git a/tools/vendor/github.com/otiai10/copy/fileinfo_go1.15.go b/tools/vendor/github.com/otiai10/copy/fileinfo_go1.15.go deleted file mode 100644 index c0708eaf1..000000000 --- a/tools/vendor/github.com/otiai10/copy/fileinfo_go1.15.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package copy - -import "os" - -// This is a cloned definition of os.FileInfo (go1.15) or fs.FileInfo (go1.16~) -// A FileInfo describes a file and is returned by Stat. -type fileInfo interface { - // Name() string // base name of the file - // Size() int64 // length in bytes for regular files; system-dependent for others - Mode() os.FileMode // file mode bits - // ModTime() time.Time // modification time - IsDir() bool // abbreviation for Mode().IsDir() - Sys() interface{} // underlying data source (can return nil) -} diff --git a/tools/vendor/github.com/otiai10/copy/fileinfo_go1.16.go b/tools/vendor/github.com/otiai10/copy/fileinfo_go1.16.go deleted file mode 100644 index 01b3fd249..000000000 --- a/tools/vendor/github.com/otiai10/copy/fileinfo_go1.16.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package copy - -import "io/fs" - -// This is a cloned definition of os.FileInfo (go1.15) or fs.FileInfo (go1.16~) -// A FileInfo describes a file and is returned by Stat. -type fileInfo interface { - // Name() string // base name of the file - // Size() int64 // length in bytes for regular files; system-dependent for others - Mode() fs.FileMode // file mode bits - // ModTime() time.Time // modification time - IsDir() bool // abbreviation for Mode().IsDir() - Sys() interface{} // underlying data source (can return nil) -} diff --git a/tools/vendor/github.com/otiai10/copy/options.go b/tools/vendor/github.com/otiai10/copy/options.go index 1fbfcb14a..c1db48c8c 100644 --- a/tools/vendor/github.com/otiai10/copy/options.go +++ b/tools/vendor/github.com/otiai10/copy/options.go @@ -24,6 +24,9 @@ type Options struct { // Skip can specify which files should be skipped Skip func(srcinfo os.FileInfo, src, dest string) (bool, error) + // RenameDestination can specify the destination file or dir name if needed to rename. + RenameDestination func(src, dest string) (string, error) + // Specials includes special files to be copied. default false. Specials bool diff --git a/tools/vendor/github.com/otiai10/copy/permission_control.go b/tools/vendor/github.com/otiai10/copy/permission_control.go index 97ae12d8e..901a84514 100644 --- a/tools/vendor/github.com/otiai10/copy/permission_control.go +++ b/tools/vendor/github.com/otiai10/copy/permission_control.go @@ -1,6 +1,7 @@ package copy import ( + "io/fs" "os" ) @@ -11,11 +12,11 @@ const ( tmpPermissionForDirectory = os.FileMode(0755) ) -type PermissionControlFunc func(srcinfo fileInfo, dest string) (chmodfunc func(*error), err error) +type PermissionControlFunc func(srcinfo fs.FileInfo, dest string) (chmodfunc func(*error), err error) var ( AddPermission = func(perm os.FileMode) PermissionControlFunc { - return func(srcinfo fileInfo, dest string) (func(*error), error) { + return func(srcinfo fs.FileInfo, dest string) (func(*error), error) { orig := srcinfo.Mode() if srcinfo.IsDir() { if err := os.MkdirAll(dest, tmpPermissionForDirectory); err != nil { @@ -28,7 +29,7 @@ var ( } } PerservePermission PermissionControlFunc = AddPermission(0) - DoNothing PermissionControlFunc = func(srcinfo fileInfo, dest string) (func(*error), error) { + DoNothing PermissionControlFunc = func(srcinfo fs.FileInfo, dest string) (func(*error), error) { if srcinfo.IsDir() { if err := os.MkdirAll(dest, srcinfo.Mode()); err != nil { return func(*error) {}, err @@ -39,7 +40,7 @@ var ( ) // chmod ANYHOW changes file mode, -// with asiging error raised during Chmod, +// with assigning error raised during Chmod, // BUT respecting the error already reported. func chmod(dir string, mode os.FileMode, reported *error) { if err := os.Chmod(dir, mode); *reported == nil { diff --git a/tools/vendor/github.com/otiai10/copy/preserve_ltimes.go b/tools/vendor/github.com/otiai10/copy/preserve_ltimes.go index cc006d375..6b6787b2a 100644 --- a/tools/vendor/github.com/otiai10/copy/preserve_ltimes.go +++ b/tools/vendor/github.com/otiai10/copy/preserve_ltimes.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !js -// +build !windows,!plan9,!js package copy diff --git a/tools/vendor/github.com/otiai10/copy/preserve_ltimes_x.go b/tools/vendor/github.com/otiai10/copy/preserve_ltimes_x.go index 02aec40be..5ef234d56 100644 --- a/tools/vendor/github.com/otiai10/copy/preserve_ltimes_x.go +++ b/tools/vendor/github.com/otiai10/copy/preserve_ltimes_x.go @@ -1,5 +1,4 @@ //go:build windows || js || plan9 -// +build windows js plan9 package copy diff --git a/tools/vendor/github.com/otiai10/copy/preserve_owner.go b/tools/vendor/github.com/otiai10/copy/preserve_owner.go index 13ec4f579..bd129644f 100644 --- a/tools/vendor/github.com/otiai10/copy/preserve_owner.go +++ b/tools/vendor/github.com/otiai10/copy/preserve_owner.go @@ -1,14 +1,14 @@ //go:build !windows && !plan9 -// +build !windows,!plan9 package copy import ( + "io/fs" "os" "syscall" ) -func preserveOwner(src, dest string, info fileInfo) (err error) { +func preserveOwner(src, dest string, info fs.FileInfo) (err error) { if info == nil { if info, err = os.Stat(src); err != nil { return err diff --git a/tools/vendor/github.com/otiai10/copy/preserve_owner_x.go b/tools/vendor/github.com/otiai10/copy/preserve_owner_x.go index 9d8257400..1e8f1251d 100644 --- a/tools/vendor/github.com/otiai10/copy/preserve_owner_x.go +++ b/tools/vendor/github.com/otiai10/copy/preserve_owner_x.go @@ -1,8 +1,9 @@ //go:build windows || plan9 -// +build windows plan9 package copy -func preserveOwner(src, dest string, info fileInfo) (err error) { +import "io/fs" + +func preserveOwner(src, dest string, info fs.FileInfo) (err error) { return nil } diff --git a/tools/vendor/github.com/otiai10/copy/stat_times.go b/tools/vendor/github.com/otiai10/copy/stat_times.go index 75f45f6e2..49ea67c27 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin && !freebsd && !plan9 && !netbsd && !js -// +build !windows,!darwin,!freebsd,!plan9,!netbsd,!js // TODO: add more runtimes diff --git a/tools/vendor/github.com/otiai10/copy/stat_times_darwin.go b/tools/vendor/github.com/otiai10/copy/stat_times_darwin.go index d4c23d8ef..935ce1d79 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times_darwin.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package copy diff --git a/tools/vendor/github.com/otiai10/copy/stat_times_freebsd.go b/tools/vendor/github.com/otiai10/copy/stat_times_freebsd.go index 5309334ef..1deb1cc4e 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times_freebsd.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package copy diff --git a/tools/vendor/github.com/otiai10/copy/stat_times_js.go b/tools/vendor/github.com/otiai10/copy/stat_times_js.go index c645771ca..a4b1e288f 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times_js.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times_js.go @@ -1,5 +1,4 @@ //go:build js -// +build js package copy diff --git a/tools/vendor/github.com/otiai10/copy/stat_times_windows.go b/tools/vendor/github.com/otiai10/copy/stat_times_windows.go index d6a84a769..babfe7d9d 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times_windows.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package copy diff --git a/tools/vendor/github.com/otiai10/copy/stat_times_x.go b/tools/vendor/github.com/otiai10/copy/stat_times_x.go index 886ddd3fd..53da32e2c 100644 --- a/tools/vendor/github.com/otiai10/copy/stat_times_x.go +++ b/tools/vendor/github.com/otiai10/copy/stat_times_x.go @@ -1,5 +1,4 @@ //go:build plan9 || netbsd -// +build plan9 netbsd package copy diff --git a/tools/vendor/github.com/otiai10/copy/symlink_test_x.go b/tools/vendor/github.com/otiai10/copy/symlink_test_x.go new file mode 100644 index 000000000..1f6bb1f46 --- /dev/null +++ b/tools/vendor/github.com/otiai10/copy/symlink_test_x.go @@ -0,0 +1,45 @@ +//go:build windows || plan9 || netbsd || aix || illumos || solaris || js + +package copy + +import ( + "os" + "testing" + + . "github.com/otiai10/mint" +) + +func TestOptions_OnSymlink(t *testing.T) { + opt := Options{OnSymlink: func(string) SymlinkAction { return Deep }} + err := Copy("test/data/case03", "test/data.copy/case03.deep", opt) + Expect(t, err).ToBe(nil) + info, err := os.Lstat("test/data.copy/case03.deep/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: func(string) SymlinkAction { return Shallow }} + err = Copy("test/data/case03", "test/data.copy/case03.shallow", opt) + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.shallow/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: func(string) SymlinkAction { return Skip }} + err = Copy("test/data/case03", "test/data.copy/case03.skip", opt) + Expect(t, err).ToBe(nil) + _, err = os.Stat("test/data.copy/case03.skip/case01") + Expect(t, os.IsNotExist(err)).ToBe(true) + + err = Copy("test/data/case03", "test/data.copy/case03.default") + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.default/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: nil} + err = Copy("test/data/case03", "test/data.copy/case03.not-specified", opt) + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.not-specified/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) +} diff --git a/tools/vendor/github.com/otiai10/copy/test_setup.go b/tools/vendor/github.com/otiai10/copy/test_setup.go deleted file mode 100644 index 64a529278..000000000 --- a/tools/vendor/github.com/otiai10/copy/test_setup.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js -// +build !windows,!plan9,!netbsd,!aix,!illumos,!solaris,!js - -package copy - -import ( - "os" - "syscall" - "testing" -) - -func setup(m *testing.M) { - os.RemoveAll("test/data.copy") - os.MkdirAll("test/data.copy", os.ModePerm) - os.Symlink("test/data/case01", "test/data/case03/case01") - os.Chmod("test/data/case07/dir_0555", 0o555) - os.Chmod("test/data/case07/file_0444", 0o444) - syscall.Mkfifo("test/data/case11/foo/bar", 0o555) - Copy("test/data/case18/assets", "test/data/case18/assets.backup") -} diff --git a/tools/vendor/github.com/otiai10/copy/test_setup_x.go b/tools/vendor/github.com/otiai10/copy/test_setup_x.go deleted file mode 100644 index 4c35b144b..000000000 --- a/tools/vendor/github.com/otiai10/copy/test_setup_x.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build windows || plan9 || netbsd || aix || illumos || solaris || js -// +build windows plan9 netbsd aix illumos solaris js - -package copy - -import ( - "os" - "testing" -) - -func setup(m *testing.M) { - os.RemoveAll("test/data.copy") - os.MkdirAll("test/data.copy", os.ModePerm) - os.Symlink("test/data/case01", "test/data/case03/case01") - os.Chmod("test/data/case07/dir_0555", 0555) - os.Chmod("test/data/case07/file_0444", 0444) -} diff --git a/tools/vendor/github.com/otiai10/mint/.gitignore b/tools/vendor/github.com/otiai10/mint/.gitignore new file mode 100644 index 000000000..6ae51791e --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/.gitignore @@ -0,0 +1,2 @@ +coverage.txt +vendor diff --git a/tools/vendor/github.com/otiai10/mint/LICENSE b/tools/vendor/github.com/otiai10/mint/LICENSE new file mode 100644 index 000000000..a5bad7fc4 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/LICENSE @@ -0,0 +1,7 @@ +Copyright 2017 otiai10 (Hiromu OCHIAI) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tools/vendor/github.com/otiai10/mint/README.md b/tools/vendor/github.com/otiai10/mint/README.md new file mode 100644 index 000000000..06caae039 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/README.md @@ -0,0 +1,62 @@ +# mint + +[![Go](https://github.com/otiai10/mint/actions/workflows/go.yml/badge.svg)](https://github.com/otiai10/mint/actions/workflows/go.yml) +[![codecov](https://codecov.io/gh/otiai10/mint/branch/master/graph/badge.svg)](https://codecov.io/gh/otiai10/mint) +[![Go Report Card](https://goreportcard.com/badge/github.com/otiai10/mint)](https://goreportcard.com/report/github.com/otiai10/mint) +[![GoDoc](https://godoc.org/github.com/otiai10/mint?status.png)](https://godoc.org/github.com/otiai10/mint) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/otiai10/mint?sort=semver)](https://pkg.go.dev/github.com/otiai10/mint) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fmint.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fmint?ref=badge_shield) + +The very minimum assertion for Go. + +```go +package your_test + +import ( + "testing" + "pkg/your" + . "github.com/otiai10/mint" +) + +func TestFoo(t *testing.T) { + + foo := your.Foo() + Expect(t, foo).ToBe(1234) + Expect(t, foo).TypeOf("int") + Expect(t, foo).Not().ToBe(nil) + Expect(t, func() { yourFunc() }).Exit(1) + + // If assertion failed, exit 1 with message. + Expect(t, foo).ToBe("foobarbuz") + + // You can run assertions without os.Exit + res := Expect(t, foo).Dry().ToBe("bar") + // res.OK() == false + + // You can omit repeated `t`. + m := mint.Blend(t) + m.Expect(foo).ToBe(1234) +} +``` + +# features + +- Simple syntax +- Loosely coupled +- Plain implementation + +# tests +``` +go test ./... +``` + +# use cases + +Projects bellow use `mint` + +- [github.com/otiai10/gosseract](https://github.com/otiai10/gosseract/blob/master/all_test.go) +- [github.com/otiai10/marmoset](https://github.com/otiai10/marmoset/blob/master/all_test.go#L168-L190) + + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fmint.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fmint?ref=badge_large) \ No newline at end of file diff --git a/tools/vendor/github.com/otiai10/mint/because.go b/tools/vendor/github.com/otiai10/mint/because.go new file mode 100644 index 000000000..6d496cee7 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/because.go @@ -0,0 +1,15 @@ +package mint + +import "testing" + +// Because is context printer. +func Because(t *testing.T, context string, wrapper func(*testing.T)) { + Log(" Because ", context, "\n") + wrapper(t) +} + +// When is an alternative of `Because` +func When(t *testing.T, context string, wrapper func(*testing.T)) { + Log(" When ", context, "\n") + wrapper(t) +} diff --git a/tools/vendor/github.com/otiai10/mint/comparer.go b/tools/vendor/github.com/otiai10/mint/comparer.go new file mode 100644 index 000000000..d543eb6d5 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/comparer.go @@ -0,0 +1,53 @@ +package mint + +import ( + "fmt" + "reflect" +) + +func getComparer(a, b interface{}, deeply bool) Comparer { + if deeply { + return deepComparer{} + } + switch reflect.ValueOf(a).Kind() { + case reflect.Slice: + return sliceComparer{} + case reflect.Map: + return mapComparer{} + } + if b == nil { + return nilComparer{} + } + return defaultComparer{} +} + +type Comparer interface { + Compare(a, b interface{}) bool +} + +type defaultComparer struct{} + +func (c defaultComparer) Compare(a, b interface{}) bool { + return a == b +} + +type deepComparer struct{} + +func (c deepComparer) Compare(a, b interface{}) bool { + return reflect.DeepEqual(a, b) +} + +type mapComparer struct { + deepComparer +} + +type sliceComparer struct { + deepComparer +} + +type nilComparer struct { +} + +func (c nilComparer) Compare(a, _ interface{}) bool { + return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", nil) +} diff --git a/tools/vendor/github.com/otiai10/mint/exit.go b/tools/vendor/github.com/otiai10/mint/exit.go new file mode 100644 index 000000000..fc64ac963 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/exit.go @@ -0,0 +1,41 @@ +//go:build !freebsd +// +build !freebsd + +package mint + +// On "freebsd/FreeBSD-10.4-STABLE" OS image, +// Go installed by `pkg install` might NOT have `syscall.Mprotect` +// causing such error: "bou.ke/monkey/replace_unix.go:13:10: undefined: syscall.Mprotect". +// See https://www.freebsd.org/cgi/man.cgi?sektion=2&query=mprotect +// TODO: Fix the image for https://github.com/otiai10/gosseract/blob/master/test/runtimes/freebsd.Vagrantfile#L4 +/* + * "bou.ke/monkey" + */ // FIXME: Now I remove this library because of LICENSE problem +// See https://github.com/otiai10/copy/issues/12 as well + +// Exit ... +func (testee *Testee) Exit(expectedCode int) MintResult { + + panic("`mint.Testee.Exit` method is temporarily deprecated.") + + /* + fun, ok := testee.actual.(func()) + if !ok { + panic("mint error: Exit only can be called for func type value") + } + + var actualCode int + patch := monkey.Patch(os.Exit, func(code int) { + actualCode = code + }) + fun() + patch.Unpatch() + + testee.actual = actualCode + if judge(actualCode, expectedCode, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expectedCode + return testee.failed(failExitCode) + */ +} diff --git a/tools/vendor/github.com/otiai10/mint/exit_freebsd.go b/tools/vendor/github.com/otiai10/mint/exit_freebsd.go new file mode 100644 index 000000000..d5eed6cf0 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/exit_freebsd.go @@ -0,0 +1,10 @@ +//go:build freebsd +// +build freebsd + +package mint + +// Exit ... +func (testee *Testee) Exit(expectedCode int) MintResult { + panic("Exit method can NOT be used on FreeBSD, for now.") + return MintResult{ok: false} +} diff --git a/tools/vendor/github.com/otiai10/mint/log.go b/tools/vendor/github.com/otiai10/mint/log.go new file mode 100644 index 000000000..6aa8f8dcf --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/log.go @@ -0,0 +1,15 @@ +package mint + +import ( + "fmt" + "os" +) + +// Log only output if -v flag is given. +// This is because the standard "t.Testing.Log" method decorates +// its caller: runtime.Caller(3) automatically. +func Log(args ...interface{}) { + if isVerbose(os.Args) { + fmt.Print(args...) + } +} diff --git a/tools/vendor/github.com/otiai10/mint/mint.go b/tools/vendor/github.com/otiai10/mint/mint.go new file mode 100644 index 000000000..a37e3c1d7 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/mint.go @@ -0,0 +1,86 @@ +package mint + +import ( + "os" + "testing" +) + +// Mint (mint.Mint) is wrapper for *testing.T +// blending testing type to omit repeated `t`. +type Mint struct { + t *testing.T +} + +var ( + failToBe = 0 + failType = 1 + failIn = 2 + failToMatch = 3 + failExitCode = 4 + scolds = map[int]string{ + failToBe: "%s:%d\n\tExpected %sto be\t`%+v`\n\tBut actual\t`%+v`", + failType: "%s:%d\n\tExpected %stype\t`%+v`\n\tBut actual\t`%T`", + failIn: "%s:%d\n\tExpected %sis in\t`%v`\n\tbut it's not", + failToMatch: "%s:%d\n\tExpected %v to match\t`%s`\n\tBut actual\t`%+v`", + failExitCode: "%s:%d\n\tExpected %sto exit with code `%d`\n\tBut actual\t`%d`", + } +) +var ( + redB = "\033[1;31m" + reset = "\033[0m" + colorize = map[string]func(string) string{ + "red": func(v string) string { + return redB + v + reset + }, + } +) + +// Blend provides (blended) *mint.Mint. +// You can save writing "t" repeatedly. +func Blend(t *testing.T) *Mint { + return &Mint{ + t, + } +} + +// Expect provides "*Testee". +// The blended mint is merely a proxy to instantiate testee. +func (m *Mint) Expect(actual interface{}) *Testee { + return expect(m.t, actual) +} + +// Expect provides "*mint.Testee". +// It has assertion methods such as "ToBe". +func Expect(t *testing.T, actual interface{}) *Testee { + return expect(t, actual) +} + +func expect(t *testing.T, actual interface{}) *Testee { + return &Testee{t: t, actual: actual, verbose: isVerbose(os.Args), result: MintResult{ok: true}} +} + +// Require provides "*mint.Testee", +// which stops execution of goroutine when the assertion failed. +func Require(t *testing.T, actual interface{}) *Testee { + return require(t, actual) +} + +func require(t *testing.T, actual interface{}) *Testee { + return &Testee{t: t, actual: actual, verbose: isVerbose(os.Args), required: true, result: MintResult{ok: true}} +} + +func isVerbose(flags []string) bool { + for _, f := range flags { + if f == "-test.v=true" { + return true + } + } + return false +} +func judge(a, b interface{}, not, deeply bool) bool { + comparer := getComparer(a, b, deeply) + if not { + return !comparer.Compare(a, b) + } + return comparer.Compare(a, b) +} diff --git a/tools/vendor/github.com/otiai10/mint/mocks.go b/tools/vendor/github.com/otiai10/mint/mocks.go new file mode 100644 index 000000000..87feab494 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/mocks.go @@ -0,0 +1,30 @@ +package mint + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +type HTTPClientMock struct { + HTTPError error + ResponseStatusCode int + ResponseBody string +} + +func (hcm *HTTPClientMock) Handle() (res *http.Response, err error, ok bool) { + if hcm.HTTPError != nil { + err = hcm.HTTPError + ok = true + } + res = new(http.Response) + if hcm.ResponseBody != "" { + res.Body = ioutil.NopCloser(bytes.NewBufferString(hcm.ResponseBody)) + ok = true + } + if hcm.ResponseStatusCode != 0 { + res.StatusCode = hcm.ResponseStatusCode + ok = true + } + return res, err, ok +} diff --git a/tools/vendor/github.com/otiai10/mint/mquery/README.md b/tools/vendor/github.com/otiai10/mint/mquery/README.md new file mode 100644 index 000000000..4992930de --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/mquery/README.md @@ -0,0 +1,31 @@ +mquery +=== + +```go +import mquery + +var m = map[string]interface{}{ + "foo": "bar", + "hoge": map[string]interface{}{ + "name": "otiai10", + }, + "fuga": map[int]map[string]interface{}{ + 0: {"greet": "Hello"}, + 1: {"greet": "こんにちは"}, + }, + "langs": []string{"Go", "JavaScript", "English"}, + "baz": nil, + "required": false, +} + +func main() { + fmt.Println( + Query(m, "foo"), // "bar" + Query(m, "hoge.name"), // "otiai10" + Query(m, "fuga.0.greet"), // "Hello" + Query(m, "langs.2"), // "English" + Query(m, "required"), // false + Query(m, "baz.biz"), // nil + ) +} +``` \ No newline at end of file diff --git a/tools/vendor/github.com/otiai10/mint/mquery/mquery.go b/tools/vendor/github.com/otiai10/mint/mquery/mquery.go new file mode 100644 index 000000000..2a7ddbac3 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/mquery/mquery.go @@ -0,0 +1,72 @@ +package mquery + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +func Query(m interface{}, q string) interface{} { + return query(m, strings.Split(q, ".")) +} + +func query(m interface{}, qs []string) interface{} { + t := reflect.TypeOf(m) + switch t.Kind() { + case reflect.Map: + return queryMap(m, t, qs) + case reflect.Slice: + return querySlice(m, t, qs) + default: + return m + } +} + +func queryMap(m interface{}, t reflect.Type, qs []string) interface{} { + if len(qs) == 0 { + return m + } + val := reflect.ValueOf(m) + if val.IsZero() { + return nil + } + switch t.Key().Kind() { + case reflect.String: + val := reflect.ValueOf(m).MapIndex(reflect.ValueOf(qs[0])) + if !val.IsValid() { + return nil + } + return query(val.Interface(), qs[1:]) + case reflect.Int: + i, err := strconv.Atoi(qs[0]) + if err != nil { + return fmt.Errorf("cannot access map with keyword: %s: %v", qs[0], err) + } + val := reflect.ValueOf(m).MapIndex(reflect.ValueOf(i)) + if !val.IsValid() { + return nil + } + return query(val.Interface(), qs[1:]) + } + return nil +} + +func querySlice(m interface{}, t reflect.Type, qs []string) interface{} { + if len(qs) == 0 { + return m + } + v := reflect.ValueOf(m) + if v.Len() == 0 { + return nil + } + i, err := strconv.Atoi(qs[0]) + if err != nil { + return fmt.Errorf("cannot access slice with keyword: %s: %v", qs[0], err) + } + if v.Len() <= i { + return nil + } + next := v.Index(i).Interface() + return query(next, qs[1:]) +} diff --git a/tools/vendor/github.com/otiai10/mint/result.go b/tools/vendor/github.com/otiai10/mint/result.go new file mode 100644 index 000000000..2ce38c056 --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/result.go @@ -0,0 +1,23 @@ +package mint + +// MintResult provide the results of assertion +// for `Dry` option. +type MintResult struct { + ok bool + message string +} + +// OK returns whether result is ok or not. +func (r MintResult) OK() bool { + return r.ok +} + +// NG is the opposite alias for OK(). +func (r MintResult) NG() bool { + return !r.ok +} + +// Message returns failure message. +func (r MintResult) Message() string { + return r.message +} diff --git a/tools/vendor/github.com/otiai10/mint/testee.go b/tools/vendor/github.com/otiai10/mint/testee.go new file mode 100644 index 000000000..90537fc3b --- /dev/null +++ b/tools/vendor/github.com/otiai10/mint/testee.go @@ -0,0 +1,145 @@ +package mint + +import ( + "fmt" + "path/filepath" + "reflect" + "regexp" + "runtime" + "testing" + + "github.com/otiai10/mint/mquery" +) + +// Testee is holder of interfaces which user want to assert +// and also has its result. +type Testee struct { + t *testing.T + actual interface{} + expected interface{} + dry bool + not bool + deeply bool + result MintResult + required bool + verbose bool + + // origin string // Only used when querying +} + +// Query queries the actual value with given query string. +func (testee *Testee) Query(query string) *Testee { + // testee.origin = fmt.Sprintf("%T", testee.actual) + testee.actual = mquery.Query(testee.actual, query) + return testee +} + +// ToBe can assert the testee to equal the parameter of this func. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) ToBe(expected interface{}) MintResult { + if judge(testee.actual, expected, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expected + return testee.failed(failToBe) +} + +// Match can assert the testee to match with specified regular expression. +// It uses `regexp.MustCompile`, it's due to caller to make sure it's valid regexp. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) Match(expression string) MintResult { + exp := regexp.MustCompile(expression) + matched := exp.MatchString(fmt.Sprintf("%v", testee.actual)) + if judge(matched, true, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expression + return testee.failed(failToMatch) +} + +// In can assert the testee is in given array. +func (testee *Testee) In(expecteds ...interface{}) MintResult { + for _, expected := range expecteds { + if judge(testee.actual, expected, testee.not, testee.deeply) { + return testee.result + } + } + testee.expected = expecteds + return testee.failed(failIn) +} + +// TypeOf can assert the type of testee to equal the parameter of this func. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) TypeOf(typeName string) MintResult { + if judge(reflect.TypeOf(testee.actual).String(), typeName, testee.not, testee.deeply) { + return testee.result + } + testee.expected = typeName + return testee.failed(failType) +} + +// Not makes following assertion conversed. +func (testee *Testee) Not() *Testee { + testee.not = true + return testee +} + +// Dry makes the testee NOT to call "Fail()". +// Use this if you want to fail test in a purpose. +func (testee *Testee) Dry() *Testee { + testee.dry = true + return testee +} + +// Deeply makes following assertions use `reflect.DeepEqual`. +// You had better use this to compare reference type objects. +func (testee *Testee) Deeply() *Testee { + testee.deeply = true + return testee +} + +func (testee *Testee) failed(failure int) MintResult { + message := testee.toText(failure) + testee.result.ok = false + testee.result.message = message + if !testee.dry { + fmt.Println(colorize["red"](message)) + if testee.required { + testee.t.FailNow() + } else { + testee.t.Fail() + } + } + return testee.result +} + +func (testee *Testee) toText(fail int) string { + not := "" + if testee.not { + not = "NOT " + } + _, file, line, _ := runtime.Caller(3) + // if testee.origin != "" { + // testee.origin = fmt.Sprintf("(queried from %s)", testee.origin) + // } + return fmt.Sprintf( + scolds[fail], + filepath.Base(file), line, + not, + testee.expected, + testee.actual, + ) +} + +// Log only output if -v flag is given. +// This is because the standard "t.Testing.Log" method decorates +// its caller: runtime.Caller(3) automatically. +func (testee *Testee) Log(args ...interface{}) { + if !testee.verbose { + return + } + fmt.Print(args...) +} diff --git a/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index ec52857a3..47f0f5914 100644 --- a/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/tools/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -113,7 +113,7 @@ dockers: checksum: name_template: 'sha256sums.txt' snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" release: github: owner: pelletier diff --git a/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index c3df8bee1..189be525e 100644 --- a/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/tools/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -59,7 +59,7 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // // With this feature enabled, types implementing the unstable/Unmarshaler // interface can be decoded from any structure of the document. It allows types -// that don't have a straightfoward TOML representation to provide their own +// that don't have a straightforward TOML representation to provide their own // decoding logic. // // Currently, types can only decode from a single value. Tables and array tables diff --git a/tools/vendor/github.com/proglottis/gpgme/.gitignore b/tools/vendor/github.com/proglottis/gpgme/.gitignore new file mode 100644 index 000000000..bc485bfc5 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/.gitignore @@ -0,0 +1,3 @@ +testdata/gpghome/random_seed +testdata/gpghome/.gpg-v21-migrated +testdata/gpghome/private-keys-v1.d/ diff --git a/tools/vendor/github.com/proglottis/gpgme/LICENSE b/tools/vendor/github.com/proglottis/gpgme/LICENSE new file mode 100644 index 000000000..06d4ab773 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2015, James Fargher +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/vendor/github.com/proglottis/gpgme/README.md b/tools/vendor/github.com/proglottis/gpgme/README.md new file mode 100644 index 000000000..4770b82a8 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/README.md @@ -0,0 +1,13 @@ +# GPGME (golang) + +Go wrapper for the GPGME library. + +This library is intended for use with desktop applications. If you are looking to add OpenPGP support to a server application I suggest you first look at [golang.org/x/crypto/openpgp](https://godoc.org/golang.org/x/crypto/openpgp). + +## Installation + + go get -u github.com/proglottis/gpgme + +## Documentation + +* [godoc](https://godoc.org/github.com/proglottis/gpgme) diff --git a/tools/vendor/github.com/proglottis/gpgme/data.go b/tools/vendor/github.com/proglottis/gpgme/data.go new file mode 100644 index 000000000..0e81c36d6 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/data.go @@ -0,0 +1,226 @@ +package gpgme + +// #include +// #include +// #include +// #include "go_gpgme.h" +import "C" + +import ( + "io" + "os" + "runtime" + "runtime/cgo" + "unsafe" +) + +const ( + SeekSet = C.SEEK_SET + SeekCur = C.SEEK_CUR + SeekEnd = C.SEEK_END +) + +var dataCallbacks = C.struct_gpgme_data_cbs{ + read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc), + write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc), + seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc), +} + +//export gogpgme_readfunc +func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size)) + if err != nil && err != io.EOF { + d.err = err + C.gpgme_err_set_errno(C.EIO) + return -1 + } + return C.ssize_t(n) +} + +//export gogpgme_writefunc +func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size)) + if err != nil && err != io.EOF { + d.err = err + C.gpgme_err_set_errno(C.EIO) + return -1 + } + return C.ssize_t(n) +} + +//export gogpgme_seekfunc +func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t { + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.s.Seek(int64(offset), int(whence)) + if err != nil { + d.err = err + C.gpgme_err_set_errno(C.EIO) + return -1 + } + return C.gpgme_off_t(n) +} + +// The Data buffer used to communicate with GPGME +type Data struct { + dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C + r io.Reader + w io.Writer + s io.Seeker + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + err error +} + +func newData() *Data { + d := &Data{} + runtime.SetFinalizer(d, (*Data).Close) + return d +} + +// NewData returns a new memory based data buffer +func NewData() (*Data, error) { + d := newData() + return d, handleError(C.gpgme_data_new(&d.dh)) +} + +// NewDataFile returns a new file based data buffer +func NewDataFile(f *os.File) (*Data, error) { + d := newData() + d.r = f + return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) +} + +// NewDataBytes returns a new memory based data buffer that contains `b` bytes +func NewDataBytes(b []byte) (*Data, error) { + d := newData() + var cb *C.char + if len(b) != 0 { + cb = (*C.char)(unsafe.Pointer(&b[0])) + } + return d, handleError(C.gpgme_data_new_from_mem(&d.dh, cb, C.size_t(len(b)), 1)) +} + +// NewDataReader returns a new callback based data buffer +func NewDataReader(r io.Reader) (*Data, error) { + d := newData() + d.r = r + if s, ok := r.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) +} + +// NewDataWriter returns a new callback based data buffer +func NewDataWriter(w io.Writer) (*Data, error) { + d := newData() + d.w = w + if s, ok := w.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) +} + +// NewDataReadWriter returns a new callback based data buffer +func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { + d := newData() + d.r = rw + d.w = rw + if s, ok := rw.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) +} + +// NewDataReadWriteSeeker returns a new callback based data buffer +func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { + d := newData() + d.r = rw + d.w = rw + d.s = rw + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) +} + +// Close releases any resources associated with the data buffer +func (d *Data) Close() error { + if d.dh == nil { + return nil + } + if d.cbc > 0 { + d.cbc.Delete() + } + _, err := C.gpgme_data_release(d.dh) + runtime.KeepAlive(d) + d.dh = nil + return err +} + +func (d *Data) Write(p []byte) (int, error) { + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) + runtime.KeepAlive(d) + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + case len(p) > 0 && n == 0: + return 0, io.EOF + } + return int(n), nil +} + +func (d *Data) Read(p []byte) (int, error) { + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) + runtime.KeepAlive(d) + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + case len(p) > 0 && n == 0: + return 0, io.EOF + } + return int(n), nil +} + +func (d *Data) Seek(offset int64, whence int) (int64, error) { + n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence)) + runtime.KeepAlive(d) + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + } + return int64(n), nil +} + +// Name returns the associated filename if any +func (d *Data) Name() string { + res := C.GoString(C.gpgme_data_get_file_name(d.dh)) + runtime.KeepAlive(d) + return res +} diff --git a/tools/vendor/github.com/proglottis/gpgme/go_gpgme.c b/tools/vendor/github.com/proglottis/gpgme/go_gpgme.c new file mode 100644 index 000000000..f5b38ce7e --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/go_gpgme.c @@ -0,0 +1,103 @@ +#include "go_gpgme.h" + +gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { + return gpgme_data_seek(dh, offset, whence); +} + +gpgme_error_t gogpgme_op_assuan_transact_ext( + gpgme_ctx_t ctx, + char* cmd, + void* data_h, + void* inquiry_h, + void* status_h, + gpgme_error_t *operr + ){ + return gpgme_op_assuan_transact_ext( + ctx, + cmd, + (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h, + (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h, + (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h, + operr + ); +} + +unsigned int key_revoked(gpgme_key_t k) { + return k->revoked; +} + +unsigned int key_expired(gpgme_key_t k) { + return k->expired; +} + +unsigned int key_disabled(gpgme_key_t k) { + return k->disabled; +} + +unsigned int key_invalid(gpgme_key_t k) { + return k->invalid; +} + +unsigned int key_can_encrypt(gpgme_key_t k) { + return k->can_encrypt; +} + +unsigned int key_can_sign(gpgme_key_t k) { + return k->can_sign; +} + +unsigned int key_can_certify(gpgme_key_t k) { + return k->can_certify; +} + +unsigned int key_secret(gpgme_key_t k) { + return k->secret; +} + +unsigned int key_can_authenticate(gpgme_key_t k) { + return k->can_authenticate; +} + +unsigned int key_is_qualified(gpgme_key_t k) { + return k->is_qualified; +} + +unsigned int signature_wrong_key_usage(gpgme_signature_t s) { + return s->wrong_key_usage; +} + +unsigned int signature_pka_trust(gpgme_signature_t s) { + return s->pka_trust; +} + +unsigned int signature_chain_model(gpgme_signature_t s) { + return s->chain_model; +} + +unsigned int subkey_revoked(gpgme_subkey_t k) { + return k->revoked; +} + +unsigned int subkey_expired(gpgme_subkey_t k) { + return k->expired; +} + +unsigned int subkey_disabled(gpgme_subkey_t k) { + return k->disabled; +} + +unsigned int subkey_invalid(gpgme_subkey_t k) { + return k->invalid; +} + +unsigned int subkey_secret(gpgme_subkey_t k) { + return k->secret; +} + +unsigned int uid_revoked(gpgme_user_id_t u) { + return u->revoked; +} + +unsigned int uid_invalid(gpgme_user_id_t u) { + return u->invalid; +} diff --git a/tools/vendor/github.com/proglottis/gpgme/go_gpgme.h b/tools/vendor/github.com/proglottis/gpgme/go_gpgme.h new file mode 100644 index 000000000..8eb5fddd9 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -0,0 +1,42 @@ +#ifndef GO_GPGME_H +#define GO_GPGME_H + +#define _FILE_OFFSET_BITS 64 +#include + +#include + +extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); +extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); +extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); +extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); +extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence); + +extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr); + +extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen ); +extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args); +extern gpgme_error_t gogpgme_assuan_status_callback(void *opaque, char* status, char* args); + +extern unsigned int key_revoked(gpgme_key_t k); +extern unsigned int key_expired(gpgme_key_t k); +extern unsigned int key_disabled(gpgme_key_t k); +extern unsigned int key_invalid(gpgme_key_t k); +extern unsigned int key_can_encrypt(gpgme_key_t k); +extern unsigned int key_can_sign(gpgme_key_t k); +extern unsigned int key_can_certify(gpgme_key_t k); +extern unsigned int key_secret(gpgme_key_t k); +extern unsigned int key_can_authenticate(gpgme_key_t k); +extern unsigned int key_is_qualified(gpgme_key_t k); +extern unsigned int signature_wrong_key_usage(gpgme_signature_t s); +extern unsigned int signature_pka_trust(gpgme_signature_t s); +extern unsigned int signature_chain_model(gpgme_signature_t s); +extern unsigned int subkey_revoked(gpgme_subkey_t k); +extern unsigned int subkey_expired(gpgme_subkey_t k); +extern unsigned int subkey_disabled(gpgme_subkey_t k); +extern unsigned int subkey_invalid(gpgme_subkey_t k); +extern unsigned int subkey_secret(gpgme_subkey_t k); +extern unsigned int uid_revoked(gpgme_user_id_t u); +extern unsigned int uid_invalid(gpgme_user_id_t u); + +#endif diff --git a/tools/vendor/github.com/proglottis/gpgme/gpgme.go b/tools/vendor/github.com/proglottis/gpgme/gpgme.go new file mode 100644 index 000000000..62a095c12 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/gpgme.go @@ -0,0 +1,982 @@ +// Package gpgme provides a Go wrapper for the GPGME library +package gpgme + +// #cgo pkg-config: gpgme +// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 +// #include +// #include +// #include "go_gpgme.h" +import "C" + +import ( + "fmt" + "io" + "os" + "runtime" + "runtime/cgo" + "time" + "unsafe" +) + +var Version string + +func init() { + Version = C.GoString(C.gpgme_check_version(nil)) +} + +// Callback is the function that is called when a passphrase is required +type Callback func(uidHint string, prevWasBad bool, f *os.File) error + +//export gogpgme_passfunc +func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { + h := *(*cgo.Handle)(hook) + c := h.Value().(*Context) + go_uid_hint := C.GoString(uid_hint) + f := os.NewFile(uintptr(fd), go_uid_hint) + defer f.Close() + err := c.callback(go_uid_hint, prev_was_bad != 0, f) + if err != nil { + return C.GPG_ERR_CANCELED + } + return 0 +} + +type Protocol int + +const ( + ProtocolOpenPGP Protocol = C.GPGME_PROTOCOL_OpenPGP + ProtocolCMS Protocol = C.GPGME_PROTOCOL_CMS + ProtocolGPGConf Protocol = C.GPGME_PROTOCOL_GPGCONF + ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN + ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13 + ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER + ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT + ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN +) + +type PinEntryMode int + +const ( + PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT + PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK + PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL + PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR + PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +) + +type EncryptFlag uint + +const ( + EncryptAlwaysTrust EncryptFlag = C.GPGME_ENCRYPT_ALWAYS_TRUST + EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO + EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE + EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN +) + +type HashAlgo int + +// const values for HashAlgo values should be added when necessary. + +type KeyListMode uint + +const ( + KeyListModeLocal KeyListMode = C.GPGME_KEYLIST_MODE_LOCAL + KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN + KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS + KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS + KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL + KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE +) + +type PubkeyAlgo int + +// const values for PubkeyAlgo values should be added when necessary. + +type SigMode int + +const ( + SigModeNormal SigMode = C.GPGME_SIG_MODE_NORMAL + SigModeDetach SigMode = C.GPGME_SIG_MODE_DETACH + SigModeClear SigMode = C.GPGME_SIG_MODE_CLEAR +) + +type SigSum int + +const ( + SigSumValid SigSum = C.GPGME_SIGSUM_VALID + SigSumGreen SigSum = C.GPGME_SIGSUM_GREEN + SigSumRed SigSum = C.GPGME_SIGSUM_RED + SigSumKeyRevoked SigSum = C.GPGME_SIGSUM_KEY_REVOKED + SigSumKeyExpired SigSum = C.GPGME_SIGSUM_KEY_EXPIRED + SigSumSigExpired SigSum = C.GPGME_SIGSUM_SIG_EXPIRED + SigSumKeyMissing SigSum = C.GPGME_SIGSUM_KEY_MISSING + SigSumCRLMissing SigSum = C.GPGME_SIGSUM_CRL_MISSING + SigSumCRLTooOld SigSum = C.GPGME_SIGSUM_CRL_TOO_OLD + SigSumBadPolicy SigSum = C.GPGME_SIGSUM_BAD_POLICY + SigSumSysError SigSum = C.GPGME_SIGSUM_SYS_ERROR +) + +type Validity int + +const ( + ValidityUnknown Validity = C.GPGME_VALIDITY_UNKNOWN + ValidityUndefined Validity = C.GPGME_VALIDITY_UNDEFINED + ValidityNever Validity = C.GPGME_VALIDITY_NEVER + ValidityMarginal Validity = C.GPGME_VALIDITY_MARGINAL + ValidityFull Validity = C.GPGME_VALIDITY_FULL + ValidityUltimate Validity = C.GPGME_VALIDITY_ULTIMATE +) + +type ErrorCode int + +const ( + ErrorNoError ErrorCode = C.GPG_ERR_NO_ERROR + ErrorEOF ErrorCode = C.GPG_ERR_EOF +) + +// Error is a wrapper for GPGME errors +type Error struct { + err C.gpgme_error_t +} + +func (e Error) Code() ErrorCode { + return ErrorCode(C.gpgme_err_code(e.err)) +} + +func (e Error) Error() string { + return C.GoString(C.gpgme_strerror(e.err)) +} + +func handleError(err C.gpgme_error_t) error { + e := Error{err: err} + if e.Code() == ErrorNoError { + return nil + } + return e +} + +func cbool(b bool) C.int { + if b { + return 1 + } + return 0 +} + +func EngineCheckVersion(p Protocol) error { + return handleError(C.gpgme_engine_check_version(C.gpgme_protocol_t(p))) +} + +type EngineInfo struct { + next *EngineInfo + protocol Protocol + fileName string + homeDir string + version string + requiredVersion string +} + +func copyEngineInfo(info C.gpgme_engine_info_t) *EngineInfo { + res := &EngineInfo{ + next: nil, + protocol: Protocol(info.protocol), + fileName: C.GoString(info.file_name), + homeDir: C.GoString(info.home_dir), + version: C.GoString(info.version), + requiredVersion: C.GoString(info.req_version), + } + if info.next != nil { + res.next = copyEngineInfo(info.next) + } + return res +} + +func (e *EngineInfo) Next() *EngineInfo { + return e.next +} + +func (e *EngineInfo) Protocol() Protocol { + return e.protocol +} + +func (e *EngineInfo) FileName() string { + return e.fileName +} + +func (e *EngineInfo) Version() string { + return e.version +} + +func (e *EngineInfo) RequiredVersion() string { + return e.requiredVersion +} + +func (e *EngineInfo) HomeDir() string { + return e.homeDir +} + +func GetEngineInfo() (*EngineInfo, error) { + var cInfo C.gpgme_engine_info_t + err := handleError(C.gpgme_get_engine_info(&cInfo)) + if err != nil { + return nil, err + } + return copyEngineInfo(cInfo), nil // It is up to the caller not to invalidate cInfo concurrently until this is done. +} + +func SetEngineInfo(proto Protocol, fileName, homeDir string) error { + var cfn, chome *C.char + if fileName != "" { + cfn = C.CString(fileName) + defer C.free(unsafe.Pointer(cfn)) + } + if homeDir != "" { + chome = C.CString(homeDir) + defer C.free(unsafe.Pointer(chome)) + } + return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) +} + +func GetDirInfo(what string) string { + cwhat := C.CString(what) + defer C.free(unsafe.Pointer(cwhat)) + + cdir := C.gpgme_get_dirinfo(cwhat) + if cdir == nil { + return "" + } + return C.GoString(cdir) +} + +func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { + var keys []*Key + ctx, err := New() + if err != nil { + return keys, err + } + defer ctx.Release() + if err := ctx.KeyListStart(pattern, secretOnly); err != nil { + return keys, err + } + defer func() { _ = ctx.KeyListEnd() }() + for ctx.KeyListNext() { + keys = append(keys, ctx.Key) + } + if ctx.KeyError != nil { + return keys, ctx.KeyError + } + return keys, nil +} + +func Decrypt(r io.Reader) (*Data, error) { + ctx, err := New() + if err != nil { + return nil, err + } + defer ctx.Release() + cipher, err := NewDataReader(r) + if err != nil { + return nil, err + } + defer cipher.Close() + plain, err := NewData() + if err != nil { + return nil, err + } + if err := ctx.Decrypt(cipher, plain); err != nil { + return nil, err + } + _, err = plain.Seek(0, SeekSet) + return plain, err +} + +type Context struct { + Key *Key + KeyError error + + callback Callback + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) + + ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C +} + +func New() (*Context, error) { + c := &Context{} + err := C.gpgme_new(&c.ctx) + runtime.SetFinalizer(c, (*Context).Release) + return c, handleError(err) +} + +func (c *Context) Release() { + if c.ctx == nil { + return + } + if c.cbc > 0 { + c.cbc.Delete() + } + C.gpgme_release(c.ctx) + runtime.KeepAlive(c) + c.ctx = nil +} + +func (c *Context) SetArmor(yes bool) { + C.gpgme_set_armor(c.ctx, cbool(yes)) + runtime.KeepAlive(c) +} + +func (c *Context) Armor() bool { + res := C.gpgme_get_armor(c.ctx) != 0 + runtime.KeepAlive(c) + return res +} + +func (c *Context) SetTextMode(yes bool) { + C.gpgme_set_textmode(c.ctx, cbool(yes)) + runtime.KeepAlive(c) +} + +func (c *Context) TextMode() bool { + res := C.gpgme_get_textmode(c.ctx) != 0 + runtime.KeepAlive(c) + return res +} + +func (c *Context) SetProtocol(p Protocol) error { + err := handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) + runtime.KeepAlive(c) + return err +} + +func (c *Context) Protocol() Protocol { + res := Protocol(C.gpgme_get_protocol(c.ctx)) + runtime.KeepAlive(c) + return res +} + +func (c *Context) SetKeyListMode(m KeyListMode) error { + err := handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) + runtime.KeepAlive(c) + return err +} + +func (c *Context) KeyListMode() KeyListMode { + res := KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} + +func (c *Context) SetPinEntryMode(m PinEntryMode) error { + err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) + runtime.KeepAlive(c) + return err +} + +func (c *Context) PinEntryMode() PinEntryMode { + res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} + +func (c *Context) SetCallback(callback Callback) error { + var err error + c.callback = callback + if c.cbc > 0 { + c.cbc.Delete() + } + if callback != nil { + c.cbc = cgo.NewHandle(c) + _, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc)) + } else { + c.cbc = 0 + _, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil) + } + runtime.KeepAlive(c) + return err +} + +func (c *Context) EngineInfo() *EngineInfo { + cInfo := C.gpgme_ctx_get_engine_info(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing cInfo. + res := copyEngineInfo(cInfo) + runtime.KeepAlive(c) // for accesses to cInfo + return res +} + +func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error { + var cfn, chome *C.char + if fileName != "" { + cfn = C.CString(fileName) + defer C.free(unsafe.Pointer(cfn)) + } + if homeDir != "" { + chome = C.CString(homeDir) + defer C.free(unsafe.Pointer(chome)) + } + err := handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) + runtime.KeepAlive(c) + return err +} + +func (c *Context) KeyListStart(pattern string, secretOnly bool) error { + cpattern := C.CString(pattern) + defer C.free(unsafe.Pointer(cpattern)) + err := handleError(C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly))) + runtime.KeepAlive(c) + return err +} + +func (c *Context) KeyListNext() bool { + c.Key = newKey() + err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k)) + runtime.KeepAlive(c) // implies runtime.KeepAlive(c.Key) + if err != nil { + if e, ok := err.(Error); ok && e.Code() == ErrorEOF { + c.KeyError = nil + } else { + c.KeyError = err + } + return false + } + c.KeyError = nil + return true +} + +func (c *Context) KeyListEnd() error { + err := handleError(C.gpgme_op_keylist_end(c.ctx)) + runtime.KeepAlive(c) + return err +} + +func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { + key := newKey() + cfpr := C.CString(fingerprint) + defer C.free(unsafe.Pointer(cfpr)) + err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret))) + runtime.KeepAlive(c) + runtime.KeepAlive(key) + keyKIsNil := key.k == nil + runtime.KeepAlive(key) + if e, ok := err.(Error); keyKIsNil && ok && e.Code() == ErrorEOF { + return nil, fmt.Errorf("key %q not found", fingerprint) + } + if err != nil { + return nil, err + } + return key, nil +} + +func (c *Context) Decrypt(ciphertext, plaintext *Data) error { + err := handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(ciphertext) + runtime.KeepAlive(plaintext) + return err +} + +func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error { + err := handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(ciphertext) + runtime.KeepAlive(plaintext) + return err +} + +type Signature struct { + Summary SigSum + Fingerprint string + Status error + Timestamp time.Time + ExpTimestamp time.Time + WrongKeyUsage bool + PKATrust uint + ChainModel bool + Validity Validity + ValidityReason error + PubkeyAlgo PubkeyAlgo + HashAlgo HashAlgo +} + +func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, error) { + var signedTextPtr, plainPtr C.gpgme_data_t = nil, nil + if signedText != nil { + signedTextPtr = signedText.dh + } + if plain != nil { + plainPtr = plain.dh + } + err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr)) + runtime.KeepAlive(c) + runtime.KeepAlive(sig) + if signedText != nil { + runtime.KeepAlive(signedText) + } + if plain != nil { + runtime.KeepAlive(plain) + } + if err != nil { + return "", nil, err + } + res := C.gpgme_op_verify_result(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing res. + sigs := []Signature{} + for s := res.signatures; s != nil; s = s.next { + sig := Signature{ + Summary: SigSum(s.summary), + Fingerprint: C.GoString(s.fpr), + Status: handleError(s.status), + // s.notations not implemented + Timestamp: time.Unix(int64(s.timestamp), 0), + ExpTimestamp: time.Unix(int64(s.exp_timestamp), 0), + WrongKeyUsage: C.signature_wrong_key_usage(s) != 0, + PKATrust: uint(C.signature_pka_trust(s)), + ChainModel: C.signature_chain_model(s) != 0, + Validity: Validity(s.validity), + ValidityReason: handleError(s.validity_reason), + PubkeyAlgo: PubkeyAlgo(s.pubkey_algo), + HashAlgo: HashAlgo(s.hash_algo), + } + sigs = append(sigs, sig) + } + fileName := C.GoString(res.file_name) + runtime.KeepAlive(c) // for all accesses to res above + return fileName, sigs, nil +} + +func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error { + size := unsafe.Sizeof(new(C.gpgme_key_t)) + recp := C.calloc(C.size_t(len(recipients)+1), C.size_t(size)) + defer C.free(recp) + for i := range recipients { + ptr := (*C.gpgme_key_t)(unsafe.Pointer(uintptr(recp) + size*uintptr(i))) + *ptr = recipients[i].k + } + err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh) + runtime.KeepAlive(c) + runtime.KeepAlive(recipients) + runtime.KeepAlive(plaintext) + runtime.KeepAlive(ciphertext) + return handleError(err) +} + +func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { + C.gpgme_signers_clear(c.ctx) + runtime.KeepAlive(c) + for _, k := range signers { + err := handleError(C.gpgme_signers_add(c.ctx, k.k)) + runtime.KeepAlive(c) + runtime.KeepAlive(k) + if err != nil { + C.gpgme_signers_clear(c.ctx) + runtime.KeepAlive(c) + return err + } + } + err := handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) + runtime.KeepAlive(c) + runtime.KeepAlive(plain) + runtime.KeepAlive(sig) + return err +} + +type ( + AssuanDataCallback func(data []byte) error + AssuanInquireCallback func(name, args string) error + AssuanStatusCallback func(status, args string) error +) + +// AssuanSend sends a raw Assuan command to gpg-agent +func (c *Context) AssuanSend( + cmd string, + data AssuanDataCallback, + inquiry AssuanInquireCallback, + status AssuanStatusCallback, +) error { + var operr C.gpgme_error_t + + dataPtr := cgo.NewHandle(&data) + inquiryPtr := cgo.NewHandle(&inquiry) + statusPtr := cgo.NewHandle(&status) + cmdCStr := C.CString(cmd) + defer C.free(unsafe.Pointer(cmdCStr)) + err := C.gogpgme_op_assuan_transact_ext( + c.ctx, + cmdCStr, + unsafe.Pointer(&dataPtr), + unsafe.Pointer(&inquiryPtr), + unsafe.Pointer(&statusPtr), + &operr, + ) + runtime.KeepAlive(c) + + if handleError(operr) != nil { + return handleError(operr) + } + return handleError(err) +} + +//export gogpgme_assuan_data_callback +func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t { + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanDataCallback) + if *c == nil { + return 0 + } + if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } + return 0 +} + +//export gogpgme_assuan_inquiry_callback +func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t { + name := C.GoString(cName) + args := C.GoString(cArgs) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanInquireCallback) + if *c == nil { + return 0 + } + if err := (*c)(name, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } + return 0 +} + +//export gogpgme_assuan_status_callback +func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t { + status := C.GoString(cStatus) + args := C.GoString(cArgs) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanStatusCallback) + if *c == nil { + return 0 + } + if err := (*c)(status, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } + return 0 +} + +// ExportModeFlags defines how keys are exported from Export +type ExportModeFlags uint + +const ( + ExportModeExtern ExportModeFlags = C.GPGME_EXPORT_MODE_EXTERN + ExportModeMinimal ExportModeFlags = C.GPGME_EXPORT_MODE_MINIMAL +) + +func (c *Context) Export(pattern string, mode ExportModeFlags, data *Data) error { + pat := C.CString(pattern) + defer C.free(unsafe.Pointer(pat)) + err := handleError(C.gpgme_op_export(c.ctx, pat, C.gpgme_export_mode_t(mode), data.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(data) + return err +} + +// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned". +type ImportStatusFlags uint + +const ( + ImportNew ImportStatusFlags = C.GPGME_IMPORT_NEW + ImportUID ImportStatusFlags = C.GPGME_IMPORT_UID + ImportSIG ImportStatusFlags = C.GPGME_IMPORT_SIG + ImportSubKey ImportStatusFlags = C.GPGME_IMPORT_SUBKEY + ImportSecret ImportStatusFlags = C.GPGME_IMPORT_SECRET +) + +type ImportStatus struct { + Fingerprint string + Result error + Status ImportStatusFlags +} + +type ImportResult struct { + Considered int + NoUserID int + Imported int + ImportedRSA int + Unchanged int + NewUserIDs int + NewSubKeys int + NewSignatures int + NewRevocations int + SecretRead int + SecretImported int + SecretUnchanged int + NotImported int + Imports []ImportStatus +} + +func (c *Context) Import(keyData *Data) (*ImportResult, error) { + err := handleError(C.gpgme_op_import(c.ctx, keyData.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(keyData) + if err != nil { + return nil, err + } + res := C.gpgme_op_import_result(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing res. + imports := []ImportStatus{} + for s := res.imports; s != nil; s = s.next { + imports = append(imports, ImportStatus{ + Fingerprint: C.GoString(s.fpr), + Result: handleError(s.result), + Status: ImportStatusFlags(s.status), + }) + } + importResult := &ImportResult{ + Considered: int(res.considered), + NoUserID: int(res.no_user_id), + Imported: int(res.imported), + ImportedRSA: int(res.imported_rsa), + Unchanged: int(res.unchanged), + NewUserIDs: int(res.new_user_ids), + NewSubKeys: int(res.new_sub_keys), + NewSignatures: int(res.new_signatures), + NewRevocations: int(res.new_revocations), + SecretRead: int(res.secret_read), + SecretImported: int(res.secret_imported), + SecretUnchanged: int(res.secret_unchanged), + NotImported: int(res.not_imported), + Imports: imports, + } + runtime.KeepAlive(c) // for all accesses to res above + return importResult, nil +} + +type Key struct { + k C.gpgme_key_t // WARNING: Call Runtime.KeepAlive(k) after ANY passing of k.k to C +} + +func newKey() *Key { + k := &Key{} + runtime.SetFinalizer(k, (*Key).Release) + return k +} + +func (k *Key) Release() { + C.gpgme_key_release(k.k) + runtime.KeepAlive(k) + k.k = nil +} + +func (k *Key) Revoked() bool { + res := C.key_revoked(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) Expired() bool { + res := C.key_expired(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) Disabled() bool { + res := C.key_disabled(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) Invalid() bool { + res := C.key_invalid(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) CanEncrypt() bool { + res := C.key_can_encrypt(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) CanSign() bool { + res := C.key_can_sign(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) CanCertify() bool { + res := C.key_can_certify(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) Secret() bool { + res := C.key_secret(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) CanAuthenticate() bool { + res := C.key_can_authenticate(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) IsQualified() bool { + res := C.key_is_qualified(k.k) != 0 + runtime.KeepAlive(k) + return res +} + +func (k *Key) Protocol() Protocol { + res := Protocol(k.k.protocol) + runtime.KeepAlive(k) + return res +} + +func (k *Key) IssuerSerial() string { + res := C.GoString(k.k.issuer_serial) + runtime.KeepAlive(k) + return res +} + +func (k *Key) IssuerName() string { + res := C.GoString(k.k.issuer_name) + runtime.KeepAlive(k) + return res +} + +func (k *Key) ChainID() string { + res := C.GoString(k.k.chain_id) + runtime.KeepAlive(k) + return res +} + +func (k *Key) OwnerTrust() Validity { + res := Validity(k.k.owner_trust) + runtime.KeepAlive(k) + return res +} + +func (k *Key) SubKeys() *SubKey { + subKeys := k.k.subkeys + runtime.KeepAlive(k) + if subKeys == nil { + return nil + } + return &SubKey{k: subKeys, parent: k} // The parent: k reference ensures subKeys remains valid +} + +func (k *Key) UserIDs() *UserID { + uids := k.k.uids + runtime.KeepAlive(k) + if uids == nil { + return nil + } + return &UserID{u: uids, parent: k} // The parent: k reference ensures uids remains valid +} + +func (k *Key) KeyListMode() KeyListMode { + res := KeyListMode(k.k.keylist_mode) + runtime.KeepAlive(k) + return res +} + +func (k *Key) Fingerprint() string { + res := C.GoString(k.k.fpr) + runtime.KeepAlive(k) + return res +} + +type SubKey struct { + k C.gpgme_subkey_t + parent *Key // make sure the key is not released when we have a reference to a subkey +} + +func (k *SubKey) Next() *SubKey { + if k.k.next == nil { + return nil + } + return &SubKey{k: k.k.next, parent: k.parent} +} + +func (k *SubKey) Revoked() bool { + return C.subkey_revoked(k.k) != 0 +} + +func (k *SubKey) Expired() bool { + return C.subkey_expired(k.k) != 0 +} + +func (k *SubKey) Disabled() bool { + return C.subkey_disabled(k.k) != 0 +} + +func (k *SubKey) Invalid() bool { + return C.subkey_invalid(k.k) != 0 +} + +func (k *SubKey) Secret() bool { + return C.subkey_secret(k.k) != 0 +} + +func (k *SubKey) KeyID() string { + return C.GoString(k.k.keyid) +} + +func (k *SubKey) Fingerprint() string { + return C.GoString(k.k.fpr) +} + +func (k *SubKey) Created() time.Time { + if k.k.timestamp <= 0 { + return time.Time{} + } + return time.Unix(int64(k.k.timestamp), 0) +} + +func (k *SubKey) Expires() time.Time { + if k.k.expires <= 0 { + return time.Time{} + } + return time.Unix(int64(k.k.expires), 0) +} + +func (k *SubKey) CardNumber() string { + return C.GoString(k.k.card_number) +} + +type UserID struct { + u C.gpgme_user_id_t + parent *Key // make sure the key is not released when we have a reference to a user ID +} + +func (u *UserID) Next() *UserID { + if u.u.next == nil { + return nil + } + return &UserID{u: u.u.next, parent: u.parent} +} + +func (u *UserID) Revoked() bool { + return C.uid_revoked(u.u) != 0 +} + +func (u *UserID) Invalid() bool { + return C.uid_invalid(u.u) != 0 +} + +func (u *UserID) Validity() Validity { + return Validity(u.u.validity) +} + +func (u *UserID) UID() string { + return C.GoString(u.u.uid) +} + +func (u *UserID) Name() string { + return C.GoString(u.u.name) +} + +func (u *UserID) Comment() string { + return C.GoString(u.u.comment) +} + +func (u *UserID) Email() string { + return C.GoString(u.u.email) +} diff --git a/tools/vendor/github.com/proglottis/gpgme/unset_agent_info.go b/tools/vendor/github.com/proglottis/gpgme/unset_agent_info.go new file mode 100644 index 000000000..8add8ec87 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/unset_agent_info.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package gpgme + +// #include +import "C" +import ( + "unsafe" +) + +// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. +// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved +// - and cgo can't be used in tests. So, provide this helper for test initialization. +func unsetenvGPGAgentInfo() { + v := C.CString("GPG_AGENT_INFO") + defer C.free(unsafe.Pointer(v)) + C.unsetenv(v) +} diff --git a/tools/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go b/tools/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go new file mode 100644 index 000000000..431ec86d3 --- /dev/null +++ b/tools/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go @@ -0,0 +1,14 @@ +package gpgme + +// #include +import "C" +import ( + "unsafe" +) + +// unsetenv is not available in mingw +func unsetenvGPGAgentInfo() { + v := C.CString("GPG_AGENT_INFO=") + defer C.free(unsafe.Pointer(v)) + C.putenv(v) +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go new file mode 100644 index 000000000..9a71a15db --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// CollectorFunc is a convenient way to implement a Prometheus Collector +// without interface boilerplate. +// This implementation is based on DescribeByCollect method. +// familiarize yourself to it before using. +type CollectorFunc func(chan<- Metric) + +// Collect calls the defined CollectorFunc function with the provided Metrics channel +func (f CollectorFunc) Collect(ch chan<- Metric) { + f(ch) +} + +// Describe sends the descriptor information using DescribeByCollect +func (f CollectorFunc) Describe(ch chan<- *Desc) { + DescribeByCollect(f, ch) +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c..2331b8b4f 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355a..7bac0da33 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef92..d273b6640 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f29..5fe8d3b4d 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e2..76e59f128 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b9846..b32c95fa3 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d..378865129 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130be..8074f70f5 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 28eed2672..763d99e36 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -41,11 +41,11 @@ import ( "sync" "time" - "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp/internal" ) const ( @@ -65,7 +65,13 @@ const ( Zstd Compression = "zstd" ) -var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} +func defaultCompressionFormats() []Compression { + if internal.NewZstdWriter != nil { + return []Compression{Identity, Gzip, Zstd} + } else { + return []Compression{Identity, Gzip} + } +} var gzipPool = sync.Pool{ New: func() interface{} { @@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO // Select compression formats to offer based on default or user choice. var compressions []string if !opts.DisableCompression { - offers := defaultCompressionFormats + offers := defaultCompressionFormats() if len(opts.OfferedCompressions) > 0 { offers = opts.OfferedCompressions } @@ -466,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin switch selected { case "zstd": - // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. - z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) - if err != nil { - return nil, "", func() {}, err + if internal.NewZstdWriter == nil { + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } - - z.Reset(rw) - return z, selected, func() { _ = z.Close() }, nil + writer, closeWriter, err := internal.NewZstdWriter(rw) + return writer, selected, closeWriter, err case "gzip": gz := gzipPool.Get().(*gzip.Writer) gz.Reset(rw) @@ -483,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin return rw, selected, func() {}, nil default: // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb786..9332b0249 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go new file mode 100644 index 000000000..c5039590f --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "io" +) + +// NewZstdWriter enables zstd write support if non-nil. +var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece..487b46656 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f1..2ed128506 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/tools/vendor/github.com/prometheus/common/expfmt/decode.go b/tools/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7..7b762370e 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/tools/vendor/github.com/prometheus/common/expfmt/encode.go b/tools/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f5..73c24dfbc 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go b/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560..c34c7de43 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go b/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4..0290f6abc 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1..8dbf6d04e 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/tools/vendor/github.com/prometheus/common/expfmt/text_create.go b/tools/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b3..c4e9c1bbc 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go b/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d..8f2edde32 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/tools/vendor/github.com/prometheus/common/model/alert.go b/tools/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e..460f554f2 100644 --- a/tools/vendor/github.com/prometheus/common/model/alert.go +++ b/tools/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/tools/vendor/github.com/prometheus/common/model/labels.go b/tools/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e6..dfeb34be5 100644 --- a/tools/vendor/github.com/prometheus/common/model/labels.go +++ b/tools/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,33 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/tools/vendor/github.com/prometheus/common/model/labelset.go b/tools/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da3..9de47b256 100644 --- a/tools/vendor/github.com/prometheus/common/model/labelset.go +++ b/tools/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/tools/vendor/github.com/prometheus/common/model/metric.go b/tools/vendor/github.com/prometheus/common/model/metric.go index 5766107cf..3feebf328 100644 --- a/tools/vendor/github.com/prometheus/common/model/metric.go +++ b/tools/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,17 +24,30 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,16 +64,151 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -89,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/tools/vendor/github.com/prometheus/common/model/time.go b/tools/vendor/github.com/prometheus/common/model/time.go index 5727452c1..1730b0fdc 100644 --- a/tools/vendor/github.com/prometheus/common/model/time.go +++ b/tools/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/tools/vendor/github.com/prometheus/common/model/value.go b/tools/vendor/github.com/prometheus/common/model/value.go index 8050637d8..a9995a37e 100644 --- a/tools/vendor/github.com/prometheus/common/model/value.go +++ b/tools/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/tools/vendor/github.com/prometheus/common/model/value_histogram.go b/tools/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e8..91ce5b7a4 100644 --- a/tools/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/tools/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/tools/vendor/github.com/prometheus/common/model/value_type.go b/tools/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee6..078910f46 100644 --- a/tools/vendor/github.com/prometheus/common/model/value_type.go +++ b/tools/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/tools/vendor/github.com/prometheus/procfs/.golangci.yml b/tools/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67..3c3bf910f 100644 --- a/tools/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/tools/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/tools/vendor/github.com/prometheus/procfs/Makefile.common b/tools/vendor/github.com/prometheus/procfs/Makefile.common index 161729235..0ed55c2ba 100644 --- a/tools/vendor/github.com/prometheus/procfs/Makefile.common +++ b/tools/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/tools/vendor/github.com/prometheus/procfs/README.md b/tools/vendor/github.com/prometheus/procfs/README.md index 1224816c2..0718239cf 100644 --- a/tools/vendor/github.com/prometheus/procfs/README.md +++ b/tools/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/tools/vendor/github.com/prometheus/procfs/arp.go b/tools/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7cc..2e5334415 100644 --- a/tools/vendor/github.com/prometheus/procfs/arp.go +++ b/tools/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/tools/vendor/github.com/prometheus/procfs/fs.go b/tools/vendor/github.com/prometheus/procfs/fs.go index 4980c875b..9bdaccc7c 100644 --- a/tools/vendor/github.com/prometheus/procfs/fs.go +++ b/tools/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69..1b5bdbdf8 100644 --- a/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/fscache.go b/tools/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa0..7db863307 100644 --- a/tools/vendor/github.com/prometheus/procfs/fscache.go +++ b/tools/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610..3a43e8391 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go b/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc78..5a7d2df06 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875cee..d5404a6d7 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/tools/vendor/github.com/prometheus/procfs/mountstats.go b/tools/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c81..50caa7327 100644 --- a/tools/vendor/github.com/prometheus/procfs/mountstats.go +++ b/tools/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 000000000..f50b38e35 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go b/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a..19e3378f7 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/tools/vendor/github.com/prometheus/procfs/net_protocols.go b/tools/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709..8d4b1ac05 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/tools/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/tools/vendor/github.com/prometheus/procfs/net_tcp.go b/tools/vendor/github.com/prometheus/procfs/net_tcp.go index 527762955..0396d7201 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/tools/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/tools/vendor/github.com/prometheus/procfs/net_unix.go b/tools/vendor/github.com/prometheus/procfs/net_unix.go index d868cebda..d7e0cacb4 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_unix.go +++ b/tools/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/tools/vendor/github.com/prometheus/procfs/proc.go b/tools/vendor/github.com/prometheus/procfs/proc.go index 142796368..368187fa8 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc.go +++ b/tools/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go b/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f57..4a64347c0 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/tools/vendor/github.com/prometheus/procfs/proc_io.go b/tools/vendor/github.com/prometheus/procfs/proc_io.go index 776f34971..d15b66ddb 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_io.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/tools/vendor/github.com/prometheus/procfs/proc_netstat.go b/tools/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d79..4248c1716 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_smaps.go b/tools/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e820..9a297afcf 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_snmp.go b/tools/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642..4bdc90b07 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go b/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a1..fb7fd3995 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/tools/vendor/github.com/prometheus/procfs/proc_status.go b/tools/vendor/github.com/prometheus/procfs/proc_status.go index a055197c6..dd8aa5688 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_status.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_sys.go b/tools/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef..3810d1ac9 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/tools/vendor/github.com/prometheus/procfs/softirqs.go b/tools/vendor/github.com/prometheus/procfs/softirqs.go index 28708e074..403e6ae70 100644 --- a/tools/vendor/github.com/prometheus/procfs/softirqs.go +++ b/tools/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/tools/vendor/github.com/rubenv/sql-migrate/.golangci.yaml b/tools/vendor/github.com/rubenv/sql-migrate/.golangci.yaml index f0a970753..f58184165 100644 --- a/tools/vendor/github.com/rubenv/sql-migrate/.golangci.yaml +++ b/tools/vendor/github.com/rubenv/sql-migrate/.golangci.yaml @@ -1,107 +1,133 @@ -linters-settings: - gocritic: - disabled-checks: - - ifElseChain - goimports: - local-prefixes: github.com/rubenv/sql-migrate - govet: - enable-all: true - disable: - - fieldalignment - depguard: - rules: - main: - allow: - - $gostd - - github.com/denisenkom/go-mssqldb - - github.com/go-sql-driver/mysql - - github.com/go-gorp/gorp/v3 - - github.com/lib/pq - - github.com/mattn/go-sqlite3 - - github.com/mitchellh/cli - - github.com/olekukonko/tablewriter - - github.com/rubenv/sql-migrate - exhaustive: - default-signifies-exhaustive: true - nolintlint: - allow-unused: false - allow-leading-space: false - allow-no-explanation: - - depguard - require-explanation: true - require-specific: true - revive: - enable-all-rules: false - rules: - - name: atomic - - name: blank-imports - - name: bool-literal-in-expr - - name: call-to-gc - - name: constant-logical-expr - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: duplicated-imports - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - - name: identical-branches - - name: imports-blacklist - - name: increment-decrement - - name: indent-error-flow - - name: modifies-parameter - - name: modifies-value-receiver - - name: package-comments - - name: range - - name: range-val-address - - name: range-val-in-closure - - name: receiver-naming - - name: string-format - - name: string-of-int - - name: struct-tag - - name: time-naming - - name: unconditional-recursion - - name: unexported-naming - - name: unexported-return - - name: superfluous-else - - name: unreachable-code - - name: var-declaration - - name: waitgroup-by-value - - name: unused-receiver - - name: unnecessary-stmt - - name: unused-parameter +version: "2" run: tests: true - timeout: 1m linters: - disable-all: true + default: none enable: - asciicheck - depguard - errcheck + - errorlint - exhaustive - gocritic - - gofmt - - gofumpt - - goimports - govet - ineffassign - nolintlint - revive - staticcheck - - typecheck + - unparam - unused - whitespace - - errorlint - - gosimple - - unparam + settings: + depguard: + rules: + main: + allow: + - $gostd + - github.com/denisenkom/go-mssqldb + - github.com/go-sql-driver/mysql + - github.com/go-gorp/gorp/v3 + - github.com/lib/pq + - github.com/mattn/go-sqlite3 + - github.com/mitchellh/cli + - github.com/olekukonko/tablewriter + - github.com/rubenv/sql-migrate + - gopkg.in/check.v1 + - gopkg.in/yaml.v2 + exhaustive: + default-signifies-exhaustive: true + gocritic: + disabled-checks: + - ifElseChain + govet: + disable: + - fieldalignment + enable-all: true + nolintlint: + require-explanation: true + require-specific: true + allow-no-explanation: + - depguard + allow-unused: false + revive: + enable-all-rules: false + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: imports-blocklist + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: string-format + - name: string-of-int + - name: struct-tag + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: superfluous-else + - name: unreachable-code + - name: var-declaration + - name: waitgroup-by-value + - name: unused-receiver + - name: unnecessary-stmt + - name: unused-parameter + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: (.+)\.go$ + text: declaration of "err" shadows declaration at + - path: (.+)\.go$ + text: 'error-strings: error strings should not be capitalized or end with punctuation or a newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not end with punctuation or newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not be capitalized' + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude: - - 'declaration of "err" shadows declaration at' # Allow shadowing of `err` because it's so common - - 'error-strings: error strings should not be capitalized or end with punctuation or a newline' - max-same-issues: 10000 max-issues-per-linter: 10000 + max-same-issues: 10000 +formatters: + enable: + - gofmt + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/rubenv/sql-migrate + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/tools/vendor/github.com/rubenv/sql-migrate/migrate.go b/tools/vendor/github.com/rubenv/sql-migrate/migrate.go index 7fb56f1a9..c9cb4a48b 100644 --- a/tools/vendor/github.com/rubenv/sql-migrate/migrate.go +++ b/tools/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -700,13 +700,14 @@ func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m Migrati toApplyCount = max } for _, v := range toApply[0:toApplyCount] { - if dir == Up { + switch dir { + case Up: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Up, DisableTransaction: v.DisableTransactionUp, }) - } else if dir == Down { + case Down: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Down, @@ -779,14 +780,13 @@ func ToApply(migrations []*Migration, current string, direction MigrationDirecti } } - if direction == Up { + switch direction { + case Up: return migrations[index+1:] - } else if direction == Down { + case Down: if index == -1 { return []*Migration{} } - - // Add in reverse order toApply := make([]*Migration, index+1) for i := 0; i < index+1; i++ { toApply[index-i] = migrations[i] diff --git a/tools/vendor/github.com/sagikazarmark/locafero/.envrc b/tools/vendor/github.com/sagikazarmark/locafero/.envrc index 2e0f9f5f7..5c95dc798 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/tools/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" fi use flake . --impure diff --git a/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml index 829de2a4a..a27a42959 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml +++ b/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml @@ -1,27 +1,37 @@ +version: "2" + run: timeout: 10m -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/sagikazarmark/locafero) - goimports: - local-prefixes: github.com/sagikazarmark/locafero - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - revive: - confidence: 0 - linters: enable: - - gci - - goimports + - errcheck + - govet + - ineffassign - misspell - nolintlint - revive + - staticcheck + - unused + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/tools/vendor/github.com/sagikazarmark/locafero/README.md b/tools/vendor/github.com/sagikazarmark/locafero/README.md index a48e8e978..d25fe80f3 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/README.md +++ b/tools/vendor/github.com/sagikazarmark/locafero/README.md @@ -2,8 +2,8 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/sagikazarmark/locafero?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/sagikazarmark/locafero/badge?style=flat-square)](https://deps.dev/go/github.com%252Fsagikazarmark%252Flocafero) **Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** diff --git a/tools/vendor/github.com/sagikazarmark/locafero/file_type.go b/tools/vendor/github.com/sagikazarmark/locafero/file_type.go index 9a9b14023..5ea57c93e 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/file_type.go +++ b/tools/vendor/github.com/sagikazarmark/locafero/file_type.go @@ -5,19 +5,23 @@ import "io/fs" // FileType represents the kind of entries [Finder] can return. type FileType int +// FileType represents the kind of entries [Finder] can return. const ( - FileTypeAll FileType = iota + FileTypeAny FileType = iota FileTypeFile FileTypeDir + + // Deprecated: Use [FileTypeAny] instead. + FileTypeAll = FileTypeAny ) -func (ft FileType) matchFileInfo(info fs.FileInfo) bool { +func (ft FileType) match(info fs.FileInfo) bool { switch ft { - case FileTypeAll: + case FileTypeAny: return true case FileTypeFile: - return !info.IsDir() + return info.Mode().IsRegular() case FileTypeDir: return info.IsDir() diff --git a/tools/vendor/github.com/sagikazarmark/locafero/finder.go b/tools/vendor/github.com/sagikazarmark/locafero/finder.go index ef8d54712..ce43c7826 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/tools/vendor/github.com/sagikazarmark/locafero/finder.go @@ -1,4 +1,4 @@ -// Package finder looks for files and directories in an {fs.Fs} filesystem. +// Package locafero looks for files and directories in an {fs.Fs} filesystem. package locafero import ( @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/sourcegraph/conc/iter" + "github.com/sourcegraph/conc/pool" "github.com/spf13/afero" ) @@ -44,65 +44,66 @@ type Finder struct { // Find looks for files and directories in an [afero.Fs] filesystem. func (f Finder) Find(fsys afero.Fs) ([]string, error) { // Arbitrary go routine limit (TODO: make this a parameter) - // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() + p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError() - type searchItem struct { - path string - name string + for _, searchPath := range f.Paths { + for _, searchName := range f.Names { + p.Go(func() ([]searchResult, error) { + // If the name contains any glob character, perform a glob match + if strings.ContainsAny(searchName, globMatch) { + return globWalkSearch(fsys, searchPath, searchName, f.Type) + } + + return statSearch(fsys, searchPath, searchName, f.Type) + }) + } } - var searchItems []searchItem + searchResults, err := flatten(p.Wait()) + if err != nil { + return nil, err + } - for _, searchPath := range f.Paths { - searchPath := searchPath + // Return early if no results were found + if len(searchResults) == 0 { + return nil, nil + } - for _, searchName := range f.Names { - searchName := searchName - - searchItems = append(searchItems, searchItem{searchPath, searchName}) - - // pool.Go(func() ([]string, error) { - // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, globMatch) { - // return globWalkSearch(fsys, searchPath, searchName, f.Type) - // } - // - // return statSearch(fsys, searchPath, searchName, f.Type) - // }) - } + results := make([]string, 0, len(searchResults)) + + for _, searchResult := range searchResults { + results = append(results, searchResult.path) } - // allResults, err := pool.Wait() - // if err != nil { - // return nil, err - // } + return results, nil +} - allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { - // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, globMatch) { - return globWalkSearch(fsys, item.path, item.name, f.Type) - } +type searchResult struct { + path string + info fs.FileInfo +} - return statSearch(fsys, item.path, item.name, f.Type) - }) +func flatten[T any](results [][]T, err error) ([]T, error) { if err != nil { return nil, err } - var results []string + var flattened []T - for _, r := range allResults { - results = append(results, r...) + for _, r := range results { + flattened = append(flattened, r...) } - // Sort results in alphabetical order for now - // sort.Strings(results) - - return results, nil + return flattened, nil } -func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - var results []string +func globWalkSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { + var results []searchResult err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { if err != nil { @@ -123,7 +124,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return result } @@ -133,7 +134,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } if match { - results = append(results, p) + results = append(results, searchResult{p, fileInfo}) } return result @@ -145,7 +146,12 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT return results, nil } -func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { +func statSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { filePath := filepath.Join(searchPath, searchName) fileInfo, err := fsys.Stat(filePath) @@ -157,9 +163,9 @@ func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return nil, nil } - return []string{filePath}, nil + return []searchResult{{filePath, fileInfo}}, nil } diff --git a/tools/vendor/github.com/sagikazarmark/locafero/flake.lock b/tools/vendor/github.com/sagikazarmark/locafero/flake.lock index df2a8ccec..b14a842c2 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/flake.lock +++ b/tools/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1725907707, - "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", - "owner": "cachix", - "repo": "devenv", - "rev": "2bbbbc468fc02257265a79652a8350651cca495a", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1753981111, + "narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1725234343, - "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1753121425, + "narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "644e0fc48951a860279da645ba77fe4a6e814c5e", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1752773918, + "narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "031c3cf42d2e9391eee373507d8c12e0f9606779", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1725233747, - "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1725910328, - "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1751159883, + "narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1753939845, + "narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "94def634a20494ee057c76998843c015909d6311", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/tools/vendor/github.com/sagikazarmark/locafero/flake.nix b/tools/vendor/github.com/sagikazarmark/locafero/flake.nix index 312f1ec8c..bdb10dbe4 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/flake.nix +++ b/tools/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -1,64 +1,42 @@ { - description = "Finder library for Afero"; - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; flake-parts.url = "github:hercules-ci/flake-parts"; devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_23; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; - ci_1_22 = { - imports = [ devenv.shells.ci ]; + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_24; + }; - languages = { - go.package = pkgs.go_1_22; - }; - }; + packages = with pkgs; [ + just - ci_1_23 = { - imports = [ devenv.shells.ci ]; + golangci-lint + ]; - languages = { - go.package = pkgs.go_1_23; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; }; }; }; - }; }; } diff --git a/tools/vendor/github.com/sagikazarmark/locafero/justfile b/tools/vendor/github.com/sagikazarmark/locafero/justfile index 00a88850c..bac5e75db 100644 --- a/tools/vendor/github.com/sagikazarmark/locafero/justfile +++ b/tools/vendor/github.com/sagikazarmark/locafero/justfile @@ -2,10 +2,13 @@ default: just --list test: - go test -race -v ./... + go test -count 10 -shuffle on -race -v ./... + +fuzz: + go test -race -v -fuzz=Fuzz -fuzztime=60s ./... lint: golangci-lint run fmt: - golangci-lint run --fix + golangci-lint fmt diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules new file mode 100644 index 000000000..d14f5ea70 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules @@ -0,0 +1,4 @@ +[submodule "testdata/JSON-Schema-Test-Suite"] + path = testdata/JSON-Schema-Test-Suite + url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git + branch = main diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml new file mode 100644 index 000000000..6534d5316 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml @@ -0,0 +1,7 @@ +version: "2" +linters: + enable: + - nakedret + - errname + - godot + - misspell diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml new file mode 100644 index 000000000..695b502ed --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml @@ -0,0 +1,7 @@ +- id: jsonschema-validate + name: Validate JSON against JSON Schema + description: ensure json files follow specified JSON Schema + entry: jv + language: golang + additional_dependencies: + - ./cmd/jv diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE new file mode 100644 index 000000000..19dc35b24 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md new file mode 100644 index 000000000..1243b66c5 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md @@ -0,0 +1,88 @@ +# jsonschema v6.0.2 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon) + +see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples + +## Library Features + +- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#)) + - [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4) + - [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6) + - [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7) + - [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09) + - [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12) +- [x] detect infinite loop traps + - [x] `$schema` cycle + - [x] validation cycle +- [x] custom `$schema` url +- [x] vocabulary based validation +- [x] custom regex engine +- [x] format assertions + - [x] flag to enable in draft >= 2019-09 + - [x] custom format registration + - [x] built-in formats + - [x] regex, uuid + - [x] ipv4, ipv6 + - [x] hostname, email + - [x] date, time, date-time, duration + - [x] json-pointer, relative-json-pointer + - [x] uri, uri-reference, uri-template + - [x] iri, iri-reference + - [x] period, semver +- [x] content assertions + - [x] flag to enable in draft >= 7 + - [x] contentEncoding + - [x] base64 + - [x] custom + - [x] contentMediaType + - [x] application/json + - [x] custom + - [x] contentSchema +- [x] errors + - [x] introspectable + - [x] hierarchy + - [x] alternative display with `#` + - [x] output + - [x] flag + - [x] basic + - [x] detailed +- [x] custom vocabulary + - enable via `$vocabulary` for draft >=2019-19 + - enable via flag for draft <= 7 +- [x] mixed dialect support + +## CLI v0.7.0 + +to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +Note that the cli is versioned independently. you can see it in git tags `cmd/jv/v0.7.0` + +``` +Usage: jv [OPTIONS] SCHEMA [INSTANCE...] + +Options: + -c, --assert-content Enable content assertions with draft >= 7 + -f, --assert-format Enable format assertions with draft >= 2019 + --cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates + -d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020) + -h, --help Print help information + -k, --insecure Use insecure TLS connection + -o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple") + -q, --quiet Do not print errors + -v, --version Print build information +``` + +- [x] exit code `1` for validation errors, `2` for usage errors +- [x] validate both schema and multiple instances +- [x] support both json and yaml files +- [x] support standard input, use `-` +- [x] quite mode with parsable output +- [x] http(s) url support + - [x] custom certs for validation, use `--cacert` + - [x] flag to skip certificate verification, use `--insecure` + diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go new file mode 100644 index 000000000..4da736103 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go @@ -0,0 +1,332 @@ +package jsonschema + +import ( + "fmt" + "regexp" + "slices" +) + +// Compiler compiles json schema into *Schema. +type Compiler struct { + schemas map[urlPtr]*Schema + roots *roots + formats map[string]*Format + decoders map[string]*Decoder + mediaTypes map[string]*MediaType + assertFormat bool + assertContent bool +} + +// NewCompiler create Compiler Object. +func NewCompiler() *Compiler { + return &Compiler{ + schemas: map[urlPtr]*Schema{}, + roots: newRoots(), + formats: map[string]*Format{}, + decoders: map[string]*Decoder{}, + mediaTypes: map[string]*MediaType{}, + assertFormat: false, + assertContent: false, + } +} + +// DefaultDraft overrides the draft used to +// compile schemas without `$schema` field. +// +// By default, this library uses the latest +// draft supported. +// +// The use of this option is HIGHLY encouraged +// to ensure continued correct operation of your +// schema. The current default value will not stay +// the same overtime. +func (c *Compiler) DefaultDraft(d *Draft) { + c.roots.defaultDraft = d +} + +// AssertFormat always enables format assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required. +// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required. +func (c *Compiler) AssertFormat() { + c.assertFormat = true +} + +// AssertContent enables content assertions. +// +// Content assertions include keywords: +// - contentEncoding +// - contentMediaType +// - contentSchema +// +// Default behavior is always disabled. +func (c *Compiler) AssertContent() { + c.assertContent = true +} + +// RegisterFormat registers custom format. +// +// NOTE: +// - "regex" format can not be overridden +// - format assertions are disabled for draft >= 2019-09 +// see [Compiler.AssertFormat] +func (c *Compiler) RegisterFormat(f *Format) { + if f.Name != "regex" { + c.formats[f.Name] = f + } +} + +// RegisterContentEncoding registers custom contentEncoding. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentEncoding(d *Decoder) { + c.decoders[d.Name] = d +} + +// RegisterContentMediaType registers custom contentMediaType. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentMediaType(mt *MediaType) { + c.mediaTypes[mt.Name] = mt +} + +// RegisterVocabulary registers custom vocabulary. +// +// NOTE: +// - vocabularies are disabled for draft >= 2019-09 +// see [Compiler.AssertVocabs] +func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) { + c.roots.vocabularies[vocab.URL] = vocab +} + +// AssertVocabs always enables user-defined vocabularies assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema enables a vocabulary. +// for draft/2020-12: disabled unless metaschema enables a vocabulary. +func (c *Compiler) AssertVocabs() { + c.roots.assertVocabs = true +} + +// AddResource adds schema resource which gets used later in reference +// resolution. +// +// The argument url can be file path or url. Any fragment in url is ignored. +// The argument doc must be valid json value. +func (c *Compiler) AddResource(url string, doc any) error { + uf, err := absolute(url) + if err != nil { + return err + } + if isMeta(string(uf.url)) { + return &ResourceExistsError{string(uf.url)} + } + if !c.roots.loader.add(uf.url, doc) { + return &ResourceExistsError{string(uf.url)} + } + return nil +} + +// UseLoader overrides the default [URLLoader] used +// to load schema resources. +func (c *Compiler) UseLoader(loader URLLoader) { + c.roots.loader.loader = loader +} + +// UseRegexpEngine changes the regexp-engine used. +// By default it uses regexp package from go standard +// library. +// +// NOTE: must be called before compiling any schemas. +func (c *Compiler) UseRegexpEngine(engine RegexpEngine) { + if engine == nil { + engine = goRegexpCompile + } + c.roots.regexpEngine = engine +} + +func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema { + if sch, ok := c.schemas[up]; ok { + // already got compiled + return sch + } + if sch := q.get(up); sch != nil { + return sch + } + sch := newSchema(up) + q.append(sch) + return sch +} + +// MustCompile is like [Compile] but panics if compilation fails. +// It simplifies safe initialization of global variables holding +// compiled schema. +func (c *Compiler) MustCompile(loc string) *Schema { + sch, err := c.Compile(loc) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err)) + } + return sch +} + +// Compile compiles json-schema at given loc. +func (c *Compiler) Compile(loc string) (*Schema, error) { + uf, err := absolute(loc) + if err != nil { + return nil, err + } + up, err := c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.doCompile(up) +} + +func (c *Compiler) doCompile(up urlPtr) (*Schema, error) { + q := &queue{} + compiled := 0 + + c.enqueue(q, up) + for q.len() > compiled { + sch := q.at(compiled) + if err := c.roots.ensureSubschema(sch.up); err != nil { + return nil, err + } + r := c.roots.roots[sch.up.url] + v, err := sch.up.lookup(r.doc) + if err != nil { + return nil, err + } + if err := c.compileValue(v, sch, r, q); err != nil { + return nil, err + } + compiled++ + } + for _, sch := range *q { + c.schemas[sch.up] = sch + } + return c.schemas[up], nil +} + +func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error { + res := r.resource(sch.up.ptr) + sch.DraftVersion = res.dialect.draft.version + + base := urlPtr{sch.up.url, res.ptr} + sch.resource = c.enqueue(q, base) + + // if resource, enqueue dynamic anchors for compilation + if sch.DraftVersion >= 2020 && sch.up == sch.resource.up { + res := r.resource(sch.up.ptr) + for anchor, anchorPtr := range res.anchors { + if slices.Contains(res.dynamicAnchors, anchor) { + up := urlPtr{sch.up.url, anchorPtr} + danchorSch := c.enqueue(q, up) + if sch.dynamicAnchors == nil { + sch.dynamicAnchors = map[string]*Schema{} + } + sch.dynamicAnchors[string(anchor)] = danchorSch + } + } + } + + switch v := v.(type) { + case bool: + sch.Bool = &v + case map[string]any: + if err := c.compileObject(v, sch, r, q); err != nil { + return err + } + } + + sch.allPropsEvaluated = sch.AdditionalProperties != nil + if sch.DraftVersion < 2020 { + sch.allItemsEvaluated = sch.AdditionalItems != nil + switch items := sch.Items.(type) { + case *Schema: + sch.allItemsEvaluated = true + case []*Schema: + sch.numItemsEvaluated = len(items) + } + } else { + sch.allItemsEvaluated = sch.Items2020 != nil + sch.numItemsEvaluated = len(sch.PrefixItems) + } + + return nil +} + +func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error { + if len(obj) == 0 { + b := true + sch.Bool = &b + return nil + } + oc := objCompiler{ + c: c, + obj: obj, + up: sch.up, + r: r, + res: r.resource(sch.up.ptr), + q: q, + } + return oc.compile(sch) +} + +// queue -- + +type queue []*Schema + +func (q *queue) append(sch *Schema) { + *q = append(*q, sch) +} + +func (q *queue) at(i int) *Schema { + return (*q)[i] +} + +func (q *queue) len() int { + return len(*q) +} + +func (q *queue) get(up urlPtr) *Schema { + i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up }) + if i != -1 { + return (*q)[i] + } + return nil +} + +// regexp -- + +// Regexp is the representation of compiled regular expression. +type Regexp interface { + fmt.Stringer + + // MatchString reports whether the string s contains + // any match of the regular expression. + MatchString(string) bool +} + +// RegexpEngine parses a regular expression and returns, +// if successful, a Regexp object that can be used to +// match against text. +type RegexpEngine func(string) (Regexp, error) + +func (re RegexpEngine) validate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := re(s) + return err +} + +func goRegexpCompile(s string) (Regexp, error) { + return regexp.Compile(s) +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go new file mode 100644 index 000000000..8d62e58b0 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go @@ -0,0 +1,51 @@ +package jsonschema + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// Decoder specifies how to decode specific contentEncoding. +type Decoder struct { + // Name of contentEncoding. + Name string + // Decode given string to byte array. + Decode func(string) ([]byte, error) +} + +var decoders = map[string]*Decoder{ + "base64": { + Name: "base64", + Decode: func(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) + }, + }, +} + +// MediaType specified how to validate bytes against specific contentMediaType. +type MediaType struct { + // Name of contentMediaType. + Name string + + // Validate checks whether bytes conform to this mediatype. + Validate func([]byte) error + + // UnmarshalJSON unmarshals bytes into json value. + // This must be nil if this mediatype is not compatible + // with json. + UnmarshalJSON func([]byte) (any, error) +} + +var mediaTypes = map[string]*MediaType{ + "application/json": { + Name: "application/json", + Validate: func(b []byte) error { + var v any + return json.Unmarshal(b, &v) + }, + UnmarshalJSON: func(b []byte) (any, error) { + return UnmarshalJSON(bytes.NewReader(b)) + }, + }, +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go new file mode 100644 index 000000000..fd09bae8d --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go @@ -0,0 +1,360 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +// A Draft represents json-schema specification. +type Draft struct { + version int + url string + sch *Schema + id string // property name used to represent id + subschemas []SchemaPath // locations of subschemas + vocabPrefix string // prefix used for vocabulary + allVocabs map[string]*Schema // names of supported vocabs with its schemas + defaultVocabs []string // names of default vocabs +} + +// String returns the specification url. +func (d *Draft) String() string { + return d.url +} + +var ( + Draft4 = &Draft{ + version: 4, + url: "http://json-schema.org/draft-04/schema", + id: "id", + subschemas: []SchemaPath{ + // type agonistic + schemaPath("definitions/*"), + schemaPath("not"), + schemaPath("allOf/[]"), + schemaPath("anyOf/[]"), + schemaPath("oneOf/[]"), + // object + schemaPath("properties/*"), + schemaPath("additionalProperties"), + schemaPath("patternProperties/*"), + // array + schemaPath("items"), + schemaPath("items/[]"), + schemaPath("additionalItems"), + schemaPath("dependencies/*"), + }, + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft6 = &Draft{ + version: 6, + url: "http://json-schema.org/draft-06/schema", + id: "$id", + subschemas: joinSubschemas(Draft4.subschemas, + schemaPath("propertyNames"), + schemaPath("contains"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft7 = &Draft{ + version: 7, + url: "http://json-schema.org/draft-07/schema", + id: "$id", + subschemas: joinSubschemas(Draft6.subschemas, + schemaPath("if"), + schemaPath("then"), + schemaPath("else"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft2019 = &Draft{ + version: 2019, + url: "https://json-schema.org/draft/2019-09/schema", + id: "$id", + subschemas: joinSubschemas(Draft7.subschemas, + schemaPath("$defs/*"), + schemaPath("dependentSchemas/*"), + schemaPath("unevaluatedProperties"), + schemaPath("unevaluatedItems"), + schemaPath("contentSchema"), + ), + vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "validation": nil, + "meta-data": nil, + "format": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "validation"}, + } + + Draft2020 = &Draft{ + version: 2020, + url: "https://json-schema.org/draft/2020-12/schema", + id: "$id", + subschemas: joinSubschemas(Draft2019.subschemas, + schemaPath("prefixItems/[]"), + ), + vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "unevaluated": nil, + "validation": nil, + "meta-data": nil, + "format-annotation": nil, + "format-assertion": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"}, + } + + draftLatest = Draft2020 +) + +func init() { + c := NewCompiler() + c.AssertFormat() + for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} { + d.sch = c.MustCompile(d.url) + for name := range d.allVocabs { + d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name) + } + } +} + +func draftFromURL(url string) *Draft { + u, frag := split(url) + if frag != "" { + return nil + } + u, ok := strings.CutPrefix(u, "http://") + if !ok { + u, _ = strings.CutPrefix(u, "https://") + } + switch u { + case "json-schema.org/schema": + return draftLatest + case "json-schema.org/draft/2020-12/schema": + return Draft2020 + case "json-schema.org/draft/2019-09/schema": + return Draft2019 + case "json-schema.org/draft-07/schema": + return Draft7 + case "json-schema.org/draft-06/schema": + return Draft6 + case "json-schema.org/draft-04/schema": + return Draft4 + default: + return nil + } +} + +func (d *Draft) getID(obj map[string]any) string { + if d.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return "" + } + } + + id, ok := strVal(obj, d.id) + if !ok { + return "" + } + id, _ = split(id) // ignore fragment + return id +} + +func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) { + if d.version < 2019 { + return nil, nil + } + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + v, ok := obj["$vocabulary"] + if !ok { + return nil, nil + } + obj, ok = v.(map[string]any) + if !ok { + return nil, nil + } + + var vocabs []string + for vocab, reqd := range obj { + if reqd, ok := reqd.(bool); !ok || !reqd { + continue + } + name, ok := strings.CutPrefix(vocab, d.vocabPrefix) + if ok { + if _, ok := d.allVocabs[name]; ok { + if !slices.Contains(vocabs, name) { + vocabs = append(vocabs, name) + continue + } + } + } + if _, ok := vocabularies[vocab]; !ok { + return nil, &UnsupportedVocabularyError{url.String(), vocab} + } + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + if !slices.Contains(vocabs, "core") { + vocabs = append(vocabs, "core") + } + return vocabs, nil +} + +// -- + +type dialect struct { + draft *Draft + vocabs []string // nil means use draft.defaultVocabs +} + +func (d *dialect) hasVocab(name string) bool { + if name == "core" || d.draft.version < 2019 { + return true + } + if d.vocabs != nil { + return slices.Contains(d.vocabs, name) + } + return slices.Contains(d.draft.defaultVocabs, name) +} + +func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string { + if len(vocabularies) == 0 { + return d.vocabs + } + if d.draft.version < 2019 { + assertVocabs = true + } + if !assertVocabs { + return d.vocabs + } + var vocabs []string + if d.vocabs == nil { + vocabs = slices.Clone(d.draft.defaultVocabs) + } else { + vocabs = slices.Clone(d.vocabs) + } + for vocab := range vocabularies { + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + return vocabs +} + +func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema { + vocabs := d.activeVocabs(assertVocabs, vocabularies) + if vocabs == nil { + return d.draft.sch + } + + var allOf []*Schema + for _, vocab := range vocabs { + sch := d.draft.allVocabs[vocab] + if sch == nil { + if v, ok := vocabularies[vocab]; ok { + sch = v.Schema + } + } + if sch != nil { + allOf = append(allOf, sch) + } + } + if !slices.Contains(vocabs, "core") { + sch := d.draft.allVocabs["core"] + if sch == nil { + sch = d.draft.sch + } + allOf = append(allOf, sch) + } + sch := &Schema{ + Location: "urn:mem:metaschema", + up: urlPtr{url("urn:mem:metaschema"), ""}, + DraftVersion: d.draft.version, + AllOf: allOf, + } + sch.resource = sch + if sch.DraftVersion >= 2020 { + sch.DynamicAnchor = "meta" + sch.dynamicAnchors = map[string]*Schema{ + "meta": sch, + } + } + return sch +} + +// -- + +type ParseIDError struct { + URL string +} + +func (e *ParseIDError) Error() string { + return fmt.Sprintf("error in parsing id at %q", e.URL) +} + +// -- + +type ParseAnchorError struct { + URL string +} + +func (e *ParseAnchorError) Error() string { + return fmt.Sprintf("error in parsing anchor at %q", e.URL) +} + +// -- + +type DuplicateIDError struct { + ID string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateIDError) Error() string { + return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +type DuplicateAnchorError struct { + Anchor string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateAnchorError) Error() string { + return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath { + var a []SchemaPath + a = append(a, a1...) + a = append(a, a2...) + return a +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go new file mode 100644 index 000000000..b78b22e2a --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go @@ -0,0 +1,708 @@ +package jsonschema + +import ( + "net/netip" + gourl "net/url" + "strconv" + "strings" + "time" +) + +// Format defined specific format. +type Format struct { + // Name of format. + Name string + + // Validate checks if given value is of this format. + Validate func(v any) error +} + +var formats = map[string]*Format{ + "json-pointer": {"json-pointer", validateJSONPointer}, + "relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer}, + "uuid": {"uuid", validateUUID}, + "duration": {"duration", validateDuration}, + "period": {"period", validatePeriod}, + "ipv4": {"ipv4", validateIPV4}, + "ipv6": {"ipv6", validateIPV6}, + "hostname": {"hostname", validateHostname}, + "email": {"email", validateEmail}, + "date": {"date", validateDate}, + "time": {"time", validateTime}, + "date-time": {"date-time", validateDateTime}, + "uri": {"uri", validateURI}, + "iri": {"iri", validateURI}, + "uri-reference": {"uri-reference", validateURIReference}, + "iri-reference": {"iri-reference", validateURIReference}, + "uri-template": {"uri-template", validateURITemplate}, + "semver": {"semver", validateSemver}, +} + +// see https://www.rfc-editor.org/rfc/rfc6901#section-3 +func validateJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if s == "" { + return nil + } + if !strings.HasPrefix(s, "/") { + return LocalizableError("not starting with /") + } + for _, tok := range strings.Split(s, "/")[1:] { + escape := false + for _, ch := range tok { + if escape { + escape = false + if ch != '0' && ch != '1' { + return LocalizableError("~ must be followed by 0 or 1") + } + continue + } + if ch == '~' { + escape = true + continue + } + switch { + case ch >= '\x00' && ch <= '\x2E': + case ch >= '\x30' && ch <= '\x7D': + case ch >= '\x7F' && ch <= '\U0010FFFF': + default: + return LocalizableError("invalid character %q", ch) + } + } + if escape { + return LocalizableError("~ must be followed by 0 or 1") + } + } + return nil +} + +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func validateRelativeJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // start with non-negative-integer + numDigits := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("must start with non-negative integer") + } + if numDigits > 1 && strings.HasPrefix(s, "0") { + return LocalizableError("starts with zero") + } + s = s[numDigits:] + + // followed by either json-pointer or '#' + if s == "#" { + return nil + } + return validateJSONPointer(s) +} + +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4 +func validateUUID(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + hexGroups := []int{8, 4, 4, 4, 12} + groups := strings.Split(s, "-") + if len(groups) != len(hexGroups) { + return LocalizableError("must have %d elements", len(hexGroups)) + } + for i, group := range groups { + if len(group) != hexGroups[i] { + return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i]) + } + for _, ch := range group { + switch { + case ch >= '0' && ch <= '9': + case ch >= 'a' && ch <= 'f': + case ch >= 'A' && ch <= 'F': + default: + return LocalizableError("non-hex character %q", ch) + } + } + } + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A +func validateDuration(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // must start with 'P' + s, ok = strings.CutPrefix(s, "P") + if !ok { + return LocalizableError("must start with P") + } + if s == "" { + return LocalizableError("nothing after P") + } + + // dur-week + if s, ok := strings.CutSuffix(s, "W"); ok { + if s == "" { + return LocalizableError("no number in week") + } + for _, ch := range s { + if ch < '0' || ch > '9' { + return LocalizableError("invalid week") + } + } + return nil + } + + allUnits := []string{"YMD", "HMS"} + for i, s := range strings.Split(s, "T") { + if i != 0 && s == "" { + return LocalizableError("no time elements") + } + if i >= len(allUnits) { + return LocalizableError("more than one T") + } + units := allUnits[i] + for s != "" { + digitCount := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + digitCount++ + } else { + break + } + } + if digitCount == 0 { + return LocalizableError("missing number") + } + s = s[digitCount:] + if s == "" { + return LocalizableError("missing unit") + } + unit := s[0] + j := strings.IndexByte(units, unit) + if j == -1 { + if strings.IndexByte(allUnits[i], unit) != -1 { + return LocalizableError("unit %q out of order", unit) + } + return LocalizableError("invalid unit %q", unit) + } + units = units[j+1:] + s = s[1:] + } + } + + return nil +} + +func validateIPV4(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return LocalizableError("expected four decimals") + } + for _, group := range groups { + if len(group) > 1 && group[0] == '0' { + return LocalizableError("leading zeros") + } + n, err := strconv.Atoi(group) + if err != nil { + return err + } + if n < 0 || n > 255 { + return LocalizableError("decimal must be between 0 and 255") + } + } + return nil +} + +func validateIPV6(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if !strings.Contains(s, ":") { + return LocalizableError("missing colon") + } + addr, err := netip.ParseAddr(s) + if err != nil { + return err + } + if addr.Zone() != "" { + return LocalizableError("zone id is not a part of ipv6 address") + } + return nil +} + +// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names +func validateHostname(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return LocalizableError("more than 253 characters long") + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if len(label) < 1 || len(label) > 63 { + return LocalizableError("label must be 1 to 63 characters long") + } + + // labels must not start or end with a hyphen + if strings.HasPrefix(label, "-") { + return LocalizableError("label starts with hyphen") + } + if strings.HasSuffix(label, "-") { + return LocalizableError("label ends with hyphen") + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, ch := range label { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case ch == '-': + default: + return LocalizableError("invalid character %q", ch) + } + } + } + return nil +} + +// see https://en.wikipedia.org/wiki/Email_address +func validateEmail(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return LocalizableError("more than 255 characters long") + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return LocalizableError("missing @") + } + local, domain := s[:at], s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return LocalizableError("local part more than 64 characters long") + } + + if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) { + // quoted + local := local[1 : len(local)-1] + if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 { + return LocalizableError("backslash and quote are not allowed within quoted local part") + } + } else { + // unquoted + if strings.HasPrefix(local, ".") { + return LocalizableError("starts with dot") + } + if strings.HasSuffix(local, ".") { + return LocalizableError("ends with dot") + } + + // consecutive dots not allowed + if strings.Contains(local, "..") { + return LocalizableError("consecutive dots") + } + + // check allowed chars + for _, ch := range local { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch): + default: + return LocalizableError("invalid character %q", ch) + } + } + } + + // domain if enclosed in brackets, must match an IP address + if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") { + domain = domain[1 : len(domain)-1] + if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok { + if err := validateIPV6(rem); err != nil { + return LocalizableError("invalid ipv6 address: %v", err) + } + return nil + } + if err := validateIPV4(domain); err != nil { + return LocalizableError("invalid ipv4 address: %v", err) + } + return nil + } + + // domain must match the requirements for a hostname + if err := validateHostname(domain); err != nil { + return LocalizableError("invalid domain: %v", err) + } + + return nil +} + +// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := time.Parse("2006-01-02", s) + return err +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +// NOTE: golang time package does not support leap seconds. +func validateTime(v any) error { + str, ok := v.(string) + if !ok { + return nil + } + + // min: hh:mm:ssZ + if len(str) < 9 { + return LocalizableError("less than 9 characters long") + } + if str[2] != ':' || str[5] != ':' { + return LocalizableError("missing colon in correct place") + } + + // parse hh:mm:ss + var hms []int + for _, tok := range strings.SplitN(str[:8], ":", 3) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min/sec") + } + if i < 0 { + return LocalizableError("non-positive hour/min/sec") + } + hms = append(hms, i) + } + if len(hms) != 3 { + return LocalizableError("missing hour/min/sec") + } + h, m, s := hms[0], hms[1], hms[2] + if h > 23 || m > 59 || s > 60 { + return LocalizableError("hour/min/sec out of range") + } + str = str[8:] + + // parse sec-frac if present + if rem, ok := strings.CutPrefix(str, "."); ok { + numDigits := 0 + for _, ch := range rem { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("no digits in second fraction") + } + str = rem[numDigits:] + } + + if str != "z" && str != "Z" { + // parse time-numoffset + if len(str) != 6 { + return LocalizableError("offset must be 6 characters long") + } + var sign int + switch str[0] { + case '+': + sign = -1 + case '-': + sign = +1 + default: + return LocalizableError("offset must begin with plus/minus") + } + str = str[1:] + if str[2] != ':' { + return LocalizableError("missing colon in offset in correct place") + } + + var zhm []int + for _, tok := range strings.SplitN(str, ":", 2) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min in offset") + } + if i < 0 { + return LocalizableError("non-positive hour/min in offset") + } + zhm = append(zhm, i) + } + zh, zm := zhm[0], zhm[1] + if zh > 23 || zm > 59 { + return LocalizableError("hour/min in offset out of range") + } + + // apply timezone + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leap second + if s >= 60 && (h != 23 || m != 59) { + return LocalizableError("invalid leap second") + } + + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDateTime(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // min: yyyy-mm-ddThh:mm:ssZ + if len(s) < 20 { + return LocalizableError("less than 20 characters long") + } + + if s[10] != 't' && s[10] != 'T' { + return LocalizableError("11th character must be t or T") + } + if err := validateDate(s[:10]); err != nil { + return LocalizableError("invalid date element: %v", err) + } + if err := validateTime(s[11:]); err != nil { + return LocalizableError("invalid time element: %v", err) + } + return nil +} + +func parseURL(s string) (*gourl.URL, error) { + u, err := gourl.Parse(s) + if err != nil { + return nil, err + } + + // gourl does not validate ipv6 host address + hostName := u.Hostname() + if strings.Contains(hostName, ":") { + if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") { + return nil, LocalizableError("ipv6 address not enclosed in brackets") + } + if err := validateIPV6(hostName); err != nil { + return nil, LocalizableError("invalid ipv6 address: %v", err) + } + } + + return u, nil +} + +func validateURI(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + if !u.IsAbs() { + return LocalizableError("relative url") + } + return nil +} + +func validateURIReference(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if strings.Contains(s, `\`) { + return LocalizableError(`contains \`) + } + _, err := parseURL(s) + return err +} + +func validateURITemplate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + for _, tok := range strings.Split(u.RawPath, "/") { + tok, err = decode(tok) + if err != nil { + return LocalizableError("percent decode failed: %v", err) + } + want := true + for _, ch := range tok { + var got bool + switch ch { + case '{': + got = true + case '}': + got = false + default: + continue + } + if got != want { + return LocalizableError("nested curly braces") + } + want = !want + } + if !want { + return LocalizableError("no matching closing brace") + } + } + return nil +} + +func validatePeriod(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + slash := strings.IndexByte(s, '/') + if slash == -1 { + return LocalizableError("missing slash") + } + + start, end := s[:slash], s[slash+1:] + if strings.HasPrefix(start, "P") { + if err := validateDuration(start); err != nil { + return LocalizableError("invalid start duration: %v", err) + } + if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } else { + if err := validateDateTime(start); err != nil { + return LocalizableError("invalid start date-time: %v", err) + } + if strings.HasPrefix(end, "P") { + if err := validateDuration(end); err != nil { + return LocalizableError("invalid end duration: %v", err) + } + } else if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } + + return nil +} + +// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions +func validateSemver(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // build -- + if i := strings.IndexByte(s, '+'); i != -1 { + build := s[i+1:] + if build == "" { + return LocalizableError("build is empty") + } + for _, buildID := range strings.Split(build, ".") { + if buildID == "" { + return LocalizableError("build identifier is empty") + } + for _, ch := range buildID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + default: + return LocalizableError("invalid character %q in build identifier", ch) + } + } + } + s = s[:i] + } + + // pre-release -- + if i := strings.IndexByte(s, '-'); i != -1 { + preRelease := s[i+1:] + for _, preReleaseID := range strings.Split(preRelease, ".") { + if preReleaseID == "" { + return LocalizableError("pre-release identifier is empty") + } + allDigits := true + for _, ch := range preReleaseID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + allDigits = false + default: + return LocalizableError("invalid character %q in pre-release identifier", ch) + } + } + if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' { + return LocalizableError("pre-release numeric identifier starts with zero") + } + } + s = s[:i] + } + + // versionCore -- + versions := strings.Split(s, ".") + if len(versions) != 3 { + return LocalizableError("versionCore must have 3 numbers separated by dot") + } + names := []string{"major", "minor", "patch"} + for i, version := range versions { + if version == "" { + return LocalizableError("%s is empty", names[i]) + } + if len(version) > 1 && version[0] == '0' { + return LocalizableError("%s starts with zero", names[i]) + } + for _, ch := range version { + if ch < '0' || ch > '9' { + return LocalizableError("%s contains non-digit", names[i]) + } + } + } + + return nil +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work new file mode 100644 index 000000000..e7f4d93de --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work @@ -0,0 +1,8 @@ +go 1.21.1 + +use ( + . + ./cmd/jv +) + +// replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 => ./ diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum new file mode 100644 index 000000000..2b5b811d9 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum @@ -0,0 +1,4 @@ +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go new file mode 100644 index 000000000..a37fb0b97 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go @@ -0,0 +1,651 @@ +package kind + +import ( + "fmt" + "math/big" + "strings" + + "golang.org/x/text/message" +) + +// -- + +type InvalidJsonValue struct { + Value any +} + +func (*InvalidJsonValue) KeywordPath() []string { + return nil +} + +func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid jsonType %T", k.Value) +} + +// -- + +type Schema struct { + Location string +} + +func (*Schema) KeywordPath() []string { + return nil +} + +func (k *Schema) LocalizedString(p *message.Printer) string { + return p.Sprintf("jsonschema validation failed with %s", quote(k.Location)) +} + +// -- + +type Group struct{} + +func (*Group) KeywordPath() []string { + return nil +} + +func (*Group) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type Not struct{} + +func (*Not) KeywordPath() []string { + return nil +} + +func (*Not) LocalizedString(p *message.Printer) string { + return p.Sprintf("'not' failed") +} + +// -- + +type AllOf struct{} + +func (*AllOf) KeywordPath() []string { + return []string{"allOf"} +} + +func (*AllOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("'allOf' failed") +} + +// -- + +type AnyOf struct{} + +func (*AnyOf) KeywordPath() []string { + return []string{"anyOf"} +} + +func (*AnyOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("'anyOf' failed") +} + +// -- + +type OneOf struct { + // Subschemas gives indexes of Subschemas that have matched. + // Value nil, means none of the subschemas matched. + Subschemas []int +} + +func (*OneOf) KeywordPath() []string { + return []string{"oneOf"} +} + +func (k *OneOf) LocalizedString(p *message.Printer) string { + if len(k.Subschemas) == 0 { + return p.Sprintf("'oneOf' failed, none matched") + } + return p.Sprintf("'oneOf' failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1]) +} + +//-- + +type FalseSchema struct{} + +func (*FalseSchema) KeywordPath() []string { + return nil +} + +func (*FalseSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("false schema") +} + +// -- + +type RefCycle struct { + URL string + KeywordLocation1 string + KeywordLocation2 string +} + +func (*RefCycle) KeywordPath() []string { + return nil +} + +func (k *RefCycle) LocalizedString(p *message.Printer) string { + return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL) +} + +// -- + +type Type struct { + Got string + Want []string +} + +func (*Type) KeywordPath() []string { + return []string{"type"} +} + +func (k *Type) LocalizedString(p *message.Printer) string { + want := strings.Join(k.Want, " or ") + return p.Sprintf("got %s, want %s", k.Got, want) +} + +// -- + +type Enum struct { + Got any + Want []any +} + +// KeywordPath implements jsonschema.ErrorKind. +func (*Enum) KeywordPath() []string { + return []string{"enum"} +} + +func (k *Enum) LocalizedString(p *message.Printer) string { + allPrimitive := true +loop: + for _, item := range k.Want { + switch item.(type) { + case []any, map[string]any: + allPrimitive = false + break loop + } + } + if allPrimitive { + if len(k.Want) == 1 { + return p.Sprintf("value must be %s", display(k.Want[0])) + } + var want []string + for _, v := range k.Want { + want = append(want, display(v)) + } + return p.Sprintf("value must be one of %s", strings.Join(want, ", ")) + } + return p.Sprintf("'enum' failed") +} + +// -- + +type Const struct { + Got any + Want any +} + +func (*Const) KeywordPath() []string { + return []string{"const"} +} + +func (k *Const) LocalizedString(p *message.Printer) string { + switch want := k.Want.(type) { + case []any, map[string]any: + return p.Sprintf("'const' failed") + default: + return p.Sprintf("value must be %s", display(want)) + } +} + +// -- + +type Format struct { + Got any + Want string + Err error +} + +func (*Format) KeywordPath() []string { + return []string{"format"} +} + +func (k *Format) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p)) +} + +// -- + +type Reference struct { + Keyword string + URL string +} + +func (k *Reference) KeywordPath() []string { + return []string{k.Keyword} +} + +func (*Reference) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type MinProperties struct { + Got, Want int +} + +func (*MinProperties) KeywordPath() []string { + return []string{"minProperties"} +} + +func (k *MinProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxProperties struct { + Got, Want int +} + +func (*MaxProperties) KeywordPath() []string { + return []string{"maxProperties"} +} + +func (k *MaxProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MinItems struct { + Got, Want int +} + +func (*MinItems) KeywordPath() []string { + return []string{"minItems"} +} + +func (k *MinItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxItems struct { + Got, Want int +} + +func (*MaxItems) KeywordPath() []string { + return []string{"maxItems"} +} + +func (k *MaxItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type AdditionalItems struct { + Count int +} + +func (*AdditionalItems) KeywordPath() []string { + return []string{"additionalItems"} +} + +func (k *AdditionalItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("last %d additionalItem(s) not allowed", k.Count) +} + +// -- + +type Required struct { + Missing []string +} + +func (*Required) KeywordPath() []string { + return []string{"required"} +} + +func (k *Required) LocalizedString(p *message.Printer) string { + if len(k.Missing) == 1 { + return p.Sprintf("missing property %s", quote(k.Missing[0])) + } + return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", ")) +} + +// -- + +type Dependency struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *Dependency) KeywordPath() []string { + return []string{"dependency", k.Prop} +} + +func (k *Dependency) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type DependentRequired struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *DependentRequired) KeywordPath() []string { + return []string{"dependentRequired", k.Prop} +} + +func (k *DependentRequired) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type AdditionalProperties struct { + Properties []string +} + +func (*AdditionalProperties) KeywordPath() []string { + return []string{"additionalProperties"} +} + +func (k *AdditionalProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", ")) +} + +// -- + +type PropertyNames struct { + Property string +} + +func (*PropertyNames) KeywordPath() []string { + return []string{"propertyNames"} +} + +func (k *PropertyNames) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid propertyName %s", quote(k.Property)) +} + +// -- + +type UniqueItems struct { + Duplicates [2]int +} + +func (*UniqueItems) KeywordPath() []string { + return []string{"uniqueItems"} +} + +func (k *UniqueItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1]) +} + +// -- + +type Contains struct{} + +func (*Contains) KeywordPath() []string { + return []string{"contains"} +} + +func (*Contains) LocalizedString(p *message.Printer) string { + return p.Sprintf("no items match contains schema") +} + +// -- + +type MinContains struct { + Got []int + Want int +} + +func (*MinContains) KeywordPath() []string { + return []string{"minContains"} +} + +func (k *MinContains) LocalizedString(p *message.Printer) string { + if len(k.Got) == 0 { + return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want) + } else { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) + } +} + +// -- + +type MaxContains struct { + Got []int + Want int +} + +func (*MaxContains) KeywordPath() []string { + return []string{"maxContains"} +} + +func (k *MaxContains) LocalizedString(p *message.Printer) string { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) +} + +// -- + +type MinLength struct { + Got, Want int +} + +func (*MinLength) KeywordPath() []string { + return []string{"minLength"} +} + +func (k *MinLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxLength struct { + Got, Want int +} + +func (*MaxLength) KeywordPath() []string { + return []string{"maxLength"} +} + +func (k *MaxLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type Pattern struct { + Got string + Want string +} + +func (*Pattern) KeywordPath() []string { + return []string{"pattern"} +} + +func (k *Pattern) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want)) +} + +// -- + +type ContentEncoding struct { + Want string + Err error +} + +func (*ContentEncoding) KeywordPath() []string { + return []string{"contentEncoding"} +} + +func (k *ContentEncoding) LocalizedString(p *message.Printer) string { + return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p)) +} + +// -- + +type ContentMediaType struct { + Got []byte + Want string + Err error +} + +func (*ContentMediaType) KeywordPath() []string { + return []string{"contentMediaType"} +} + +func (k *ContentMediaType) LocalizedString(p *message.Printer) string { + return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err) +} + +// -- + +type ContentSchema struct{} + +func (*ContentSchema) KeywordPath() []string { + return []string{"contentSchema"} +} + +func (*ContentSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("'contentSchema' failed") +} + +// -- + +type Minimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Minimum) KeywordPath() []string { + return []string{"minimum"} +} + +func (k *Minimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("minimum: got %v, want %v", got, want) +} + +// -- + +type Maximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Maximum) KeywordPath() []string { + return []string{"maximum"} +} + +func (k *Maximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("maximum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMinimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMinimum) KeywordPath() []string { + return []string{"exclusiveMinimum"} +} + +func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMaximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMaximum) KeywordPath() []string { + return []string{"exclusiveMaximum"} +} + +func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want) +} + +// -- + +type MultipleOf struct { + Got *big.Rat + Want *big.Rat +} + +func (*MultipleOf) KeywordPath() []string { + return []string{"multipleOf"} +} + +func (k *MultipleOf) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("multipleOf: got %v, want %v", got, want) +} + +// -- + +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func joinQuoted(arr []string, sep string) string { + var sb strings.Builder + for _, s := range arr { + if sb.Len() > 0 { + sb.WriteString(sep) + } + sb.WriteString(quote(s)) + } + return sb.String() +} + +// to be used only for primitive. +func display(v any) string { + switch v := v.(type) { + case string: + return quote(v) + case []any, map[string]any: + return "value" + default: + return fmt.Sprintf("%v", v) + } +} + +func localizedError(err error, p *message.Printer) string { + if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok { + return err.LocalizedError(p) + } + return err.Error() +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go new file mode 100644 index 000000000..ce0170e20 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go @@ -0,0 +1,266 @@ +package jsonschema + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + gourl "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +// URLLoader knows how to load json from given url. +type URLLoader interface { + // Load loads json from given absolute url. + Load(url string) (any, error) +} + +// -- + +// FileLoader loads json file url. +type FileLoader struct{} + +func (l FileLoader) Load(url string) (any, error) { + path, err := l.ToFile(url) + if err != nil { + return nil, err + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return UnmarshalJSON(f) +} + +// ToFile is helper method to convert file url to file path. +func (l FileLoader) ToFile(url string) (string, error) { + u, err := gourl.Parse(url) + if err != nil { + return "", err + } + if u.Scheme != "file" { + return "", fmt.Errorf("invalid file url: %s", u) + } + path := u.Path + if runtime.GOOS == "windows" { + path = strings.TrimPrefix(path, "/") + path = filepath.FromSlash(path) + } + return path, nil +} + +// -- + +// SchemeURLLoader delegates to other [URLLoaders] +// based on url scheme. +type SchemeURLLoader map[string]URLLoader + +func (l SchemeURLLoader) Load(url string) (any, error) { + u, err := gourl.Parse(url) + if err != nil { + return nil, err + } + ll, ok := l[u.Scheme] + if !ok { + return nil, &UnsupportedURLSchemeError{u.String()} + } + return ll.Load(url) +} + +// -- + +//go:embed metaschemas +var metaFS embed.FS + +func openMeta(url string) (fs.File, error) { + u, meta := strings.CutPrefix(url, "http://json-schema.org/") + if !meta { + u, meta = strings.CutPrefix(url, "https://json-schema.org/") + } + if meta { + if u == "schema" { + return openMeta(draftLatest.url) + } + f, err := metaFS.Open("metaschemas/" + u) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + return f, err + } + return nil, nil + +} + +func isMeta(url string) bool { + f, err := openMeta(url) + if err != nil { + return true + } + if f != nil { + f.Close() + return true + } + return false +} + +func loadMeta(url string) (any, error) { + f, err := openMeta(url) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + defer f.Close() + return UnmarshalJSON(f) +} + +// -- + +type defaultLoader struct { + docs map[url]any // docs loaded so far + loader URLLoader +} + +func (l *defaultLoader) add(url url, doc any) bool { + if _, ok := l.docs[url]; ok { + return false + } + l.docs[url] = doc + return true +} + +func (l *defaultLoader) load(url url) (any, error) { + if doc, ok := l.docs[url]; ok { + return doc, nil + } + doc, err := loadMeta(url.String()) + if err != nil { + return nil, err + } + if doc != nil { + l.add(url, doc) + return doc, nil + } + if l.loader == nil { + return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")} + } + doc, err = l.loader.Load(url.String()) + if err != nil { + return nil, &LoadURLError{URL: url.String(), Err: err} + } + l.add(url, doc) + return doc, nil +} + +func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) { + obj, ok := doc.(map[string]any) + if !ok { + return defaultDraft, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return defaultDraft, nil + } + if draft := draftFromURL(sch); draft != nil { + return draft, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &InvalidMetaSchemaURLError{up.String(), err} + } + schUrl := url(sch) + if up.ptr.isEmpty() && schUrl == up.url { + return nil, &UnsupportedDraftError{schUrl.String()} + } + if _, ok := cycle[schUrl]; ok { + return nil, &MetaSchemaCycleError{schUrl.String()} + } + cycle[schUrl] = struct{}{} + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle) +} + +func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) { + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return nil, nil + } + if draft := draftFromURL(sch); draft != nil { + return nil, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &ParseURLError{sch, err} + } + schUrl := url(sch) + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return draft.getVocabs(schUrl, doc, vocabularies) +} + +// -- + +type LoadURLError struct { + URL string + Err error +} + +func (e *LoadURLError) Error() string { + return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedURLSchemeError struct { + url string +} + +func (e *UnsupportedURLSchemeError) Error() string { + return fmt.Sprintf("no URLLoader registered for %q", e.url) +} + +// -- + +type ResourceExistsError struct { + url string +} + +func (e *ResourceExistsError) Error() string { + return fmt.Sprintf("resource for %q already exists", e.url) +} + +// -- + +// UnmarshalJSON unmarshals into [any] without losing +// number precision using [json.Number]. +func UnmarshalJSON(r io.Reader) (any, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc any + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if _, err := decoder.Token(); err == nil || err != io.EOF { + return nil, fmt.Errorf("invalid character after top-level value") + } + return doc, nil +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema new file mode 100644 index 000000000..b2a7ff0f5 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema @@ -0,0 +1,151 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema new file mode 100644 index 000000000..fa22ad1b0 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema @@ -0,0 +1,150 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema new file mode 100644 index 000000000..326759a62 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema @@ -0,0 +1,172 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator new file mode 100644 index 000000000..857d2d495 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator @@ -0,0 +1,55 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content new file mode 100644 index 000000000..fa5d20b8d --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core new file mode 100644 index 000000000..bf5731985 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format new file mode 100644 index 000000000..fe553c239 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data new file mode 100644 index 000000000..5c95715c4 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation new file mode 100644 index 000000000..f3525e076 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema new file mode 100644 index 000000000..f433389be --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator new file mode 100644 index 000000000..0ef24edc8 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator @@ -0,0 +1,47 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content new file mode 100644 index 000000000..0330ff0a8 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core new file mode 100644 index 000000000..c4de7005a --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core @@ -0,0 +1,50 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation new file mode 100644 index 000000000..0aa07d1c1 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion new file mode 100644 index 000000000..38613bff6 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data new file mode 100644 index 000000000..30e283714 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated new file mode 100644 index 000000000..e9e093d12 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation new file mode 100644 index 000000000..4e016ed2b --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema new file mode 100644 index 000000000..364f8ada6 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go new file mode 100644 index 000000000..f1494b13a --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go @@ -0,0 +1,549 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" +) + +type objCompiler struct { + c *Compiler + obj map[string]any + up urlPtr + r *root + res *resource + q *queue +} + +func (c *objCompiler) compile(s *Schema) error { + // id -- + if id := c.res.dialect.draft.getID(c.obj); id != "" { + s.ID = id + } + + // anchor -- + if s.DraftVersion < 2019 { + // anchor is specified in id + id := c.string(c.res.dialect.draft.id) + if id != "" { + _, f := split(id) + if f != "" { + var err error + s.Anchor, err = decode(f) + if err != nil { + return &ParseAnchorError{URL: s.Location} + } + } + } + } else { + s.Anchor = c.string("$anchor") + } + + if err := c.compileDraft4(s); err != nil { + return err + } + if s.DraftVersion >= 6 { + if err := c.compileDraft6(s); err != nil { + return err + } + } + if s.DraftVersion >= 7 { + if err := c.compileDraft7(s); err != nil { + return err + } + } + if s.DraftVersion >= 2019 { + if err := c.compileDraft2019(s); err != nil { + return err + } + } + if s.DraftVersion >= 2020 { + if err := c.compileDraft2020(s); err != nil { + return err + } + } + + // vocabularies + vocabs := c.res.dialect.activeVocabs(c.c.roots.assertVocabs, c.c.roots.vocabularies) + for _, vocab := range vocabs { + v := c.c.roots.vocabularies[vocab] + if v == nil { + continue + } + ext, err := v.Compile(&CompilerContext{c}, c.obj) + if err != nil { + return err + } + if ext != nil { + s.Extensions = append(s.Extensions, ext) + } + } + + return nil +} + +func (c *objCompiler) compileDraft4(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.Ref, err = c.enqueueRef("$ref"); err != nil { + return err + } + if s.DraftVersion < 2019 && s.Ref != nil { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if c.hasVocab("applicator") { + s.AllOf = c.enqueueArr("allOf") + s.AnyOf = c.enqueueArr("anyOf") + s.OneOf = c.enqueueArr("oneOf") + s.Not = c.enqueueProp("not") + + if s.DraftVersion < 2020 { + if items, ok := c.obj["items"]; ok { + if _, ok := items.([]any); ok { + s.Items = c.enqueueArr("items") + s.AdditionalItems = c.enqueueAdditional("additionalItems") + } else { + s.Items = c.enqueueProp("items") + } + } + } + + s.Properties = c.enqueueMap("properties") + if m := c.enqueueMap("patternProperties"); m != nil { + s.PatternProperties = map[Regexp]*Schema{} + for pname, sch := range m { + re, err := c.c.roots.regexpEngine(pname) + if err != nil { + return &InvalidRegexError{c.up.format("patternProperties"), pname, err} + } + s.PatternProperties[re] = sch + } + } + s.AdditionalProperties = c.enqueueAdditional("additionalProperties") + + if m := c.objVal("dependencies"); m != nil { + s.Dependencies = map[string]any{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.Dependencies[pname] = toStrings(arr) + } else { + ptr := c.up.ptr.append2("dependencies", pname) + s.Dependencies[pname] = c.enqueuePtr(ptr) + } + } + } + } + + if c.hasVocab("validation") { + if t, ok := c.obj["type"]; ok { + s.Types = newTypes(t) + } + if arr := c.arrVal("enum"); arr != nil { + s.Enum = newEnum(arr) + } + s.MultipleOf = c.numVal("multipleOf") + s.Maximum = c.numVal("maximum") + if c.boolean("exclusiveMaximum") { + s.ExclusiveMaximum = s.Maximum + s.Maximum = nil + } else { + s.ExclusiveMaximum = c.numVal("exclusiveMaximum") + } + s.Minimum = c.numVal("minimum") + if c.boolean("exclusiveMinimum") { + s.ExclusiveMinimum = s.Minimum + s.Minimum = nil + } else { + s.ExclusiveMinimum = c.numVal("exclusiveMinimum") + } + + s.MinLength = c.intVal("minLength") + s.MaxLength = c.intVal("maxLength") + if pat := c.strVal("pattern"); pat != nil { + s.Pattern, err = c.c.roots.regexpEngine(*pat) + if err != nil { + return &InvalidRegexError{c.up.format("pattern"), *pat, err} + } + } + + s.MinItems = c.intVal("minItems") + s.MaxItems = c.intVal("maxItems") + s.UniqueItems = c.boolean("uniqueItems") + + s.MaxProperties = c.intVal("maxProperties") + s.MinProperties = c.intVal("minProperties") + if arr := c.arrVal("required"); arr != nil { + s.Required = toStrings(arr) + } + } + + // format -- + if c.assertFormat(s.DraftVersion) { + if f := c.strVal("format"); f != nil { + if *f == "regex" { + s.Format = &Format{ + Name: "regex", + Validate: c.c.roots.regexpEngine.validate, + } + } else { + s.Format = c.c.formats[*f] + if s.Format == nil { + s.Format = formats[*f] + } + } + } + } + + // annotations -- + s.Title = c.string("title") + s.Description = c.string("description") + if v, ok := c.obj["default"]; ok { + s.Default = &v + } + + return nil +} + +func (c *objCompiler) compileDraft6(s *Schema) error { + if c.hasVocab("applicator") { + s.Contains = c.enqueueProp("contains") + s.PropertyNames = c.enqueueProp("propertyNames") + } + if c.hasVocab("validation") { + if v, ok := c.obj["const"]; ok { + s.Const = &v + } + } + return nil +} + +func (c *objCompiler) compileDraft7(s *Schema) error { + if c.hasVocab("applicator") { + s.If = c.enqueueProp("if") + if s.If != nil { + b := c.boolVal("if") + if b == nil || *b { + s.Then = c.enqueueProp("then") + } + if b == nil || !*b { + s.Else = c.enqueueProp("else") + } + } + } + + if c.c.assertContent { + if ce := c.strVal("contentEncoding"); ce != nil { + s.ContentEncoding = c.c.decoders[*ce] + if s.ContentEncoding == nil { + s.ContentEncoding = decoders[*ce] + } + } + if cm := c.strVal("contentMediaType"); cm != nil { + s.ContentMediaType = c.c.mediaTypes[*cm] + if s.ContentMediaType == nil { + s.ContentMediaType = mediaTypes[*cm] + } + } + } + + // annotations -- + s.Comment = c.string("$comment") + s.ReadOnly = c.boolean("readOnly") + s.WriteOnly = c.boolean("writeOnly") + if arr, ok := c.obj["examples"].([]any); ok { + s.Examples = arr + } + + return nil +} + +func (c *objCompiler) compileDraft2019(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.RecursiveRef, err = c.enqueueRef("$recursiveRef"); err != nil { + return err + } + s.RecursiveAnchor = c.boolean("$recursiveAnchor") + } + + if c.hasVocab("validation") { + if s.Contains != nil { + s.MinContains = c.intVal("minContains") + s.MaxContains = c.intVal("maxContains") + } + if m := c.objVal("dependentRequired"); m != nil { + s.DependentRequired = map[string][]string{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.DependentRequired[pname] = toStrings(arr) + } + } + } + } + + if c.hasVocab("applicator") { + s.DependentSchemas = c.enqueueMap("dependentSchemas") + } + + var unevaluated bool + if s.DraftVersion == 2019 { + unevaluated = c.hasVocab("applicator") + } else { + unevaluated = c.hasVocab("unevaluated") + } + if unevaluated { + s.UnevaluatedItems = c.enqueueProp("unevaluatedItems") + s.UnevaluatedProperties = c.enqueueProp("unevaluatedProperties") + } + + if c.c.assertContent { + if s.ContentMediaType != nil && s.ContentMediaType.UnmarshalJSON != nil { + s.ContentSchema = c.enqueueProp("contentSchema") + } + } + + // annotations -- + s.Deprecated = c.boolean("deprecated") + + return nil +} + +func (c *objCompiler) compileDraft2020(s *Schema) error { + if c.hasVocab("core") { + sch, err := c.enqueueRef("$dynamicRef") + if err != nil { + return err + } + if sch != nil { + dref := c.strVal("$dynamicRef") + _, frag, err := splitFragment(*dref) + if err != nil { + return err + } + var anch string + if anchor, ok := frag.convert().(anchor); ok { + anch = string(anchor) + } + s.DynamicRef = &DynamicRef{sch, anch} + } + s.DynamicAnchor = c.string("$dynamicAnchor") + } + + if c.hasVocab("applicator") { + s.PrefixItems = c.enqueueArr("prefixItems") + s.Items2020 = c.enqueueProp("items") + } + + return nil +} + +// enqueue helpers -- + +func (c *objCompiler) enqueuePtr(ptr jsonPointer) *Schema { + up := urlPtr{c.up.url, ptr} + return c.c.enqueue(c.q, up) +} + +func (c *objCompiler) enqueueRef(pname string) (*Schema, error) { + ref := c.strVal(pname) + if ref == nil { + return nil, nil + } + baseURL := c.res.id + // baseURL := c.r.baseURL(c.up.ptr) + uf, err := baseURL.join(*ref) + if err != nil { + return nil, err + } + + up, err := c.r.resolve(*uf) + if err != nil { + return nil, err + } + if up != nil { + // local ref + return c.enqueuePtr(up.ptr), nil + } + + // remote ref + up_, err := c.c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.c.enqueue(c.q, up_), nil +} + +func (c *objCompiler) enqueueProp(pname string) *Schema { + if _, ok := c.obj[pname]; !ok { + return nil + } + ptr := c.up.ptr.append(pname) + return c.enqueuePtr(ptr) +} + +func (c *objCompiler) enqueueArr(pname string) []*Schema { + arr := c.arrVal(pname) + if arr == nil { + return nil + } + sch := make([]*Schema, len(arr)) + for i := range arr { + ptr := c.up.ptr.append2(pname, strconv.Itoa(i)) + sch[i] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueMap(pname string) map[string]*Schema { + obj := c.objVal(pname) + if obj == nil { + return nil + } + sch := make(map[string]*Schema) + for k := range obj { + ptr := c.up.ptr.append2(pname, k) + sch[k] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueAdditional(pname string) any { + if b := c.boolVal(pname); b != nil { + return *b + } + if sch := c.enqueueProp(pname); sch != nil { + return sch + } + return nil +} + +// -- + +func (c *objCompiler) hasVocab(name string) bool { + return c.res.dialect.hasVocab(name) +} + +func (c *objCompiler) assertFormat(draftVersion int) bool { + if c.c.assertFormat || draftVersion < 2019 { + return true + } + if draftVersion == 2019 { + return c.hasVocab("format") + } else { + return c.hasVocab("format-assertion") + } +} + +// value helpers -- + +func (c *objCompiler) boolVal(pname string) *bool { + v, ok := c.obj[pname] + if !ok { + return nil + } + b, ok := v.(bool) + if !ok { + return nil + } + return &b +} + +func (c *objCompiler) boolean(pname string) bool { + b := c.boolVal(pname) + return b != nil && *b +} + +func (c *objCompiler) strVal(pname string) *string { + v, ok := c.obj[pname] + if !ok { + return nil + } + s, ok := v.(string) + if !ok { + return nil + } + return &s +} + +func (c *objCompiler) string(pname string) string { + if s := c.strVal(pname); s != nil { + return *s + } + return "" +} + +func (c *objCompiler) numVal(pname string) *big.Rat { + v, ok := c.obj[pname] + if !ok { + return nil + } + switch v.(type) { + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if n, ok := new(big.Rat).SetString(fmt.Sprint(v)); ok { + return n + } + } + return nil +} + +func (c *objCompiler) intVal(pname string) *int { + if n := c.numVal(pname); n != nil && n.IsInt() { + n := int(n.Num().Int64()) + return &n + } + return nil +} + +func (c *objCompiler) objVal(pname string) map[string]any { + v, ok := c.obj[pname] + if !ok { + return nil + } + obj, ok := v.(map[string]any) + if !ok { + return nil + } + return obj +} + +func (c *objCompiler) arrVal(pname string) []any { + v, ok := c.obj[pname] + if !ok { + return nil + } + arr, ok := v.([]any) + if !ok { + return nil + } + return arr +} + +// -- + +type InvalidRegexError struct { + URL string + Regex string + Err error +} + +func (e *InvalidRegexError) Error() string { + return fmt.Sprintf("invalid regex %q at %q: %v", e.Regex, e.URL, e.Err) +} + +// -- + +func toStrings(arr []any) []string { + var strings []string + for _, item := range arr { + if s, ok := item.(string); ok { + strings = append(strings, s) + } + } + return strings +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go new file mode 100644 index 000000000..69d3f26de --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go @@ -0,0 +1,216 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +var defaultPrinter = message.NewPrinter(language.English) + +// format --- + +func (e *ValidationError) schemaURL() string { + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + return ref.URL + } else { + return e.SchemaURL + } +} + +func (e *ValidationError) absoluteKeywordLocation() string { + var schemaURL string + var keywordPath []string + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + schemaURL = ref.URL + keywordPath = nil + } else { + schemaURL = e.SchemaURL + keywordPath = e.ErrorKind.KeywordPath() + } + return fmt.Sprintf("%s%s", schemaURL, encode(jsonPtr(keywordPath))) +} + +func (e *ValidationError) skip() bool { + if len(e.Causes) == 1 { + _, ok := e.ErrorKind.(*kind.Reference) + return ok + } + return false +} + +func (e *ValidationError) display(sb *strings.Builder, verbose bool, indent int, absKwLoc string, p *message.Printer) { + if !e.skip() { + if indent > 0 { + sb.WriteByte('\n') + for i := 0; i < indent-1; i++ { + sb.WriteString(" ") + } + sb.WriteString("- ") + } + indent = indent + 1 + + prevAbsKwLoc := absKwLoc + absKwLoc = e.absoluteKeywordLocation() + + if _, ok := e.ErrorKind.(*kind.Schema); ok { + sb.WriteString(e.ErrorKind.LocalizedString(p)) + } else { + sb.WriteString(p.Sprintf("at %s", quote(jsonPtr(e.InstanceLocation)))) + if verbose { + schLoc := absKwLoc + if prevAbsKwLoc != "" { + pu, _ := split(prevAbsKwLoc) + u, f := split(absKwLoc) + if u == pu { + schLoc = fmt.Sprintf("S#%s", f) + } + } + fmt.Fprintf(sb, " [%s]", schLoc) + } + fmt.Fprintf(sb, ": %s", e.ErrorKind.LocalizedString(p)) + } + } + for _, cause := range e.Causes { + cause.display(sb, verbose, indent, absKwLoc, p) + } +} + +func (e *ValidationError) Error() string { + return e.LocalizedError(defaultPrinter) +} + +func (e *ValidationError) LocalizedError(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, false, 0, "", p) + return sb.String() +} + +func (e *ValidationError) GoString() string { + return e.LocalizedGoString(defaultPrinter) +} + +func (e *ValidationError) LocalizedGoString(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, true, 0, "", p) + return sb.String() +} + +func jsonPtr(tokens []string) string { + var sb strings.Builder + for _, tok := range tokens { + sb.WriteByte('/') + sb.WriteString(escape(tok)) + } + return sb.String() +} + +// -- + +// Flag is output format with simple boolean property valid. +type FlagOutput struct { + Valid bool `json:"valid"` +} + +// The `Flag` output format, merely the boolean result. +func (e *ValidationError) FlagOutput() *FlagOutput { + return &FlagOutput{Valid: false} +} + +// -- + +type OutputUnit struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"AbsoluteKeywordLocation,omitempty"` + InstanceLocation string `json:"instanceLocation"` + Error *OutputError `json:"error,omitempty"` + Errors []OutputUnit `json:"errors,omitempty"` +} + +type OutputError struct { + Kind ErrorKind + p *message.Printer +} + +func (k OutputError) String() string { + return k.Kind.LocalizedString(k.p) +} + +func (k OutputError) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Kind.LocalizedString(k.p)) +} + +// The `Basic` structure, a flat list of output units. +func (e *ValidationError) BasicOutput() *OutputUnit { + return e.LocalizedBasicOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedBasicOutput(p *message.Printer) *OutputUnit { + out := e.output(true, false, "", "", p) + return &out +} + +// The `Detailed` structure, based on the schema. +func (e *ValidationError) DetailedOutput() *OutputUnit { + return e.LocalizedDetailedOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedDetailedOutput(p *message.Printer) *OutputUnit { + out := e.output(false, false, "", "", p) + return &out +} + +func (e *ValidationError) output(flatten, inRef bool, schemaURL, kwLoc string, p *message.Printer) OutputUnit { + if !inRef { + if _, ok := e.ErrorKind.(*kind.Reference); ok { + inRef = true + } + } + if schemaURL != "" { + kwLoc += e.SchemaURL[len(schemaURL):] + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + kwLoc += jsonPtr(ref.KeywordPath()) + } + } + schemaURL = e.schemaURL() + + keywordLocation := kwLoc + if _, ok := e.ErrorKind.(*kind.Reference); !ok { + keywordLocation += jsonPtr(e.ErrorKind.KeywordPath()) + } + + out := OutputUnit{ + Valid: false, + InstanceLocation: jsonPtr(e.InstanceLocation), + KeywordLocation: keywordLocation, + } + if inRef { + out.AbsoluteKeywordLocation = e.absoluteKeywordLocation() + } + for _, cause := range e.Causes { + causeOut := cause.output(flatten, inRef, schemaURL, kwLoc, p) + if cause.skip() { + causeOut = causeOut.Errors[0] + } + if flatten { + errors := causeOut.Errors + causeOut.Errors = nil + causeOut.Error = &OutputError{cause.ErrorKind, p} + out.Errors = append(out.Errors, causeOut) + if len(errors) > 0 { + out.Errors = append(out.Errors, errors...) + } + } else { + out.Errors = append(out.Errors, causeOut) + } + } + if len(out.Errors) == 0 { + out.Error = &OutputError{e.ErrorKind, p} + } + return out +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go new file mode 100644 index 000000000..576a2a47f --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go @@ -0,0 +1,142 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// Position tells possible tokens in json. +type Position interface { + collect(v any, ptr jsonPointer) map[jsonPointer]any +} + +// -- + +type AllProp struct{} + +func (AllProp) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for pname, pvalue := range obj { + m[ptr.append(pname)] = pvalue + } + return m +} + +// -- + +type AllItem struct{} + +func (AllItem) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for i, item := range arr { + m[ptr.append(strconv.Itoa(i))] = item + } + return m +} + +// -- + +type Prop string + +func (p Prop) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + pvalue, ok := obj[string(p)] + if !ok { + return nil + } + return map[jsonPointer]any{ + ptr.append(string(p)): pvalue, + } +} + +// -- + +type Item int + +func (i Item) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + if i < 0 || int(i) >= len(arr) { + return nil + } + return map[jsonPointer]any{ + ptr.append(strconv.Itoa(int(i))): arr[int(i)], + } +} + +// -- + +// SchemaPath tells where to look for subschema inside keyword. +type SchemaPath []Position + +func schemaPath(path string) SchemaPath { + var sp SchemaPath + for _, tok := range strings.Split(path, "/") { + var pos Position + switch tok { + case "*": + pos = AllProp{} + case "[]": + pos = AllItem{} + default: + if i, err := strconv.Atoi(tok); err == nil { + pos = Item(i) + } else { + pos = Prop(tok) + } + } + sp = append(sp, pos) + } + return sp +} + +func (sp SchemaPath) collect(v any, ptr jsonPointer) map[jsonPointer]any { + if len(sp) == 0 { + return map[jsonPointer]any{ + ptr: v, + } + } + p, sp := sp[0], sp[1:] + m := p.collect(v, ptr) + mm := map[jsonPointer]any{} + for ptr, v := range m { + m = sp.collect(v, ptr) + for k, v := range m { + mm[k] = v + } + } + return mm +} + +func (sp SchemaPath) String() string { + var sb strings.Builder + for _, pos := range sp { + if sb.Len() != 0 { + sb.WriteByte('/') + } + switch pos := pos.(type) { + case AllProp: + sb.WriteString("*") + case AllItem: + sb.WriteString("[]") + case Prop: + sb.WriteString(string(pos)) + case Item: + sb.WriteString(strconv.Itoa(int(pos))) + } + } + return sb.String() +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go new file mode 100644 index 000000000..a8b819bab --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go @@ -0,0 +1,202 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +type root struct { + url url + doc any + resources map[jsonPointer]*resource + subschemasProcessed map[jsonPointer]struct{} +} + +func (r *root) rootResource() *resource { + return r.resources[""] +} + +func (r *root) resource(ptr jsonPointer) *resource { + for { + if res, ok := r.resources[ptr]; ok { + return res + } + slash := strings.LastIndexByte(string(ptr), '/') + if slash == -1 { + break + } + ptr = ptr[:slash] + } + return r.rootResource() +} + +func (r *root) resolveFragmentIn(frag fragment, res *resource) (urlPtr, error) { + var ptr jsonPointer + switch f := frag.convert().(type) { + case jsonPointer: + ptr = res.ptr.concat(f) + case anchor: + aptr, ok := res.anchors[f] + if !ok { + return urlPtr{}, &AnchorNotFoundError{ + URL: r.url.String(), + Reference: (&urlFrag{res.id, frag}).String(), + } + } + ptr = aptr + } + return urlPtr{r.url, ptr}, nil +} + +func (r *root) resolveFragment(frag fragment) (urlPtr, error) { + return r.resolveFragmentIn(frag, r.rootResource()) +} + +// resolves urlFrag to urlPtr from root. +// returns nil if it is external. +func (r *root) resolve(uf urlFrag) (*urlPtr, error) { + var res *resource + if uf.url == r.url { + res = r.rootResource() + } else { + // look for resource with id==uf.url + for _, v := range r.resources { + if v.id == uf.url { + res = v + break + } + } + if res == nil { + return nil, nil // external url + } + } + up, err := r.resolveFragmentIn(uf.frag, res) + return &up, err +} + +func (r *root) collectAnchors(sch any, schPtr jsonPointer, res *resource) error { + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + addAnchor := func(anchor anchor) error { + ptr1, ok := res.anchors[anchor] + if ok { + if ptr1 == schPtr { + // anchor with same root_ptr already exists + return nil + } + return &DuplicateAnchorError{ + string(anchor), r.url.String(), string(ptr1), string(schPtr), + } + } + res.anchors[anchor] = schPtr + return nil + } + + if res.dialect.draft.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return nil + } + // anchor is specified in id + if id, ok := strVal(obj, res.dialect.draft.id); ok { + _, frag, err := splitFragment(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseAnchorError{loc.String()} + } + if anchor, ok := frag.convert().(anchor); ok { + if err := addAnchor(anchor); err != nil { + return err + } + } + } + } + if res.dialect.draft.version >= 2019 { + if s, ok := strVal(obj, "$anchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + } + } + if res.dialect.draft.version >= 2020 { + if s, ok := strVal(obj, "$dynamicAnchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + res.dynamicAnchors = append(res.dynamicAnchors, anchor(s)) + } + } + + return nil +} + +func (r *root) clone() *root { + processed := map[jsonPointer]struct{}{} + for k := range r.subschemasProcessed { + processed[k] = struct{}{} + } + resources := map[jsonPointer]*resource{} + for k, v := range r.resources { + resources[k] = v.clone() + } + return &root{ + url: r.url, + doc: r.doc, + resources: resources, + subschemasProcessed: processed, + } +} + +// -- + +type resource struct { + ptr jsonPointer + id url + dialect dialect + anchors map[anchor]jsonPointer + dynamicAnchors []anchor +} + +func newResource(ptr jsonPointer, id url) *resource { + return &resource{ptr: ptr, id: id, anchors: make(map[anchor]jsonPointer)} +} + +func (res *resource) clone() *resource { + anchors := map[anchor]jsonPointer{} + for k, v := range res.anchors { + anchors[k] = v + } + return &resource{ + ptr: res.ptr, + id: res.id, + dialect: res.dialect, + anchors: anchors, + dynamicAnchors: slices.Clone(res.dynamicAnchors), + } +} + +//-- + +type UnsupportedVocabularyError struct { + URL string + Vocabulary string +} + +func (e *UnsupportedVocabularyError) Error() string { + return fmt.Sprintf("unsupported vocabulary %q in %q", e.Vocabulary, e.URL) +} + +// -- + +type AnchorNotFoundError struct { + URL string + Reference string +} + +func (e *AnchorNotFoundError) Error() string { + return fmt.Sprintf("anchor in %q not found in schema %q", e.Reference, e.URL) +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go new file mode 100644 index 000000000..a8d0ef0ce --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go @@ -0,0 +1,286 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +type roots struct { + defaultDraft *Draft + roots map[url]*root + loader defaultLoader + regexpEngine RegexpEngine + vocabularies map[string]*Vocabulary + assertVocabs bool +} + +func newRoots() *roots { + return &roots{ + defaultDraft: draftLatest, + roots: map[url]*root{}, + loader: defaultLoader{ + docs: map[url]any{}, + loader: FileLoader{}, + }, + regexpEngine: goRegexpCompile, + vocabularies: map[string]*Vocabulary{}, + } +} + +func (rr *roots) orLoad(u url) (*root, error) { + if r, ok := rr.roots[u]; ok { + return r, nil + } + doc, err := rr.loader.load(u) + if err != nil { + return nil, err + } + return rr.addRoot(u, doc) +} + +func (rr *roots) addRoot(u url, doc any) (*root, error) { + r := &root{ + url: u, + doc: doc, + resources: map[jsonPointer]*resource{}, + subschemasProcessed: map[jsonPointer]struct{}{}, + } + if err := rr.collectResources(r, doc, u, "", dialect{rr.defaultDraft, nil}); err != nil { + return nil, err + } + if !strings.HasPrefix(u.String(), "http://json-schema.org/") && + !strings.HasPrefix(u.String(), "https://json-schema.org/") { + if err := rr.validate(r, doc, ""); err != nil { + return nil, err + } + } + + rr.roots[u] = r + return r, nil +} + +func (rr *roots) resolveFragment(uf urlFrag) (urlPtr, error) { + r, err := rr.orLoad(uf.url) + if err != nil { + return urlPtr{}, err + } + return r.resolveFragment(uf.frag) +} + +func (rr *roots) collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := r.subschemasProcessed[schPtr]; ok { + return nil + } + if err := rr._collectResources(r, sch, base, schPtr, fallback); err != nil { + return err + } + r.subschemasProcessed[schPtr] = struct{}{} + return nil +} + +func (rr *roots) _collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + obj, ok := sch.(map[string]any) + if !ok { + if schPtr.isEmpty() { + // root resource + res := newResource(schPtr, base) + res.dialect = fallback + r.resources[schPtr] = res + } + return nil + } + + hasSchema := false + if sch, ok := obj["$schema"]; ok { + if _, ok := sch.(string); ok { + hasSchema = true + } + } + + draft, err := rr.loader.getDraft(urlPtr{r.url, schPtr}, sch, fallback.draft, map[url]struct{}{}) + if err != nil { + return err + } + id := draft.getID(obj) + if id == "" && !schPtr.isEmpty() { + // ignore $schema + draft = fallback.draft + hasSchema = false + id = draft.getID(obj) + } + + var res *resource + if id != "" { + uf, err := base.join(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseIDError{loc.String()} + } + base = uf.url + res = newResource(schPtr, base) + } else if schPtr.isEmpty() { + // root resource + res = newResource(schPtr, base) + } + + if res != nil { + found := false + for _, res := range r.resources { + if res.id == base { + found = true + if res.ptr != schPtr { + return &DuplicateIDError{base.String(), r.url.String(), string(schPtr), string(res.ptr)} + } + } + } + if !found { + if hasSchema { + vocabs, err := rr.loader.getMetaVocabs(sch, draft, rr.vocabularies) + if err != nil { + return err + } + res.dialect = dialect{draft, vocabs} + } else { + res.dialect = fallback + } + r.resources[schPtr] = res + } + } + + var baseRes *resource + for _, res := range r.resources { + if res.id == base { + baseRes = res + break + } + } + if baseRes == nil { + panic("baseres is nil") + } + + // found base resource + if err := r.collectAnchors(sch, schPtr, baseRes); err != nil { + return err + } + + // process subschemas + subschemas := map[jsonPointer]any{} + for _, sp := range draft.subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + for _, vocab := range baseRes.dialect.activeVocabs(true, rr.vocabularies) { + if v := rr.vocabularies[vocab]; v != nil { + for _, sp := range v.Subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + } + } + for ptr, v := range subschemas { + if err := rr.collectResources(r, v, base, ptr, baseRes.dialect); err != nil { + return err + } + } + + return nil +} + +func (rr *roots) ensureSubschema(up urlPtr) error { + r, err := rr.orLoad(up.url) + if err != nil { + return err + } + if _, ok := r.subschemasProcessed[up.ptr]; ok { + return nil + } + v, err := up.lookup(r.doc) + if err != nil { + return err + } + rClone := r.clone() + if err := rr.addSubschema(rClone, up.ptr); err != nil { + return err + } + if err := rr.validate(rClone, v, up.ptr); err != nil { + return err + } + rr.roots[r.url] = rClone + return nil +} + +func (rr *roots) addSubschema(r *root, ptr jsonPointer) error { + v, err := (&urlPtr{r.url, ptr}).lookup(r.doc) + if err != nil { + return err + } + base := r.resource(ptr) + baseURL := base.id + if err := rr.collectResources(r, v, baseURL, ptr, base.dialect); err != nil { + return err + } + + // collect anchors + if _, ok := r.resources[ptr]; !ok { + res := r.resource(ptr) + if err := r.collectAnchors(v, ptr, res); err != nil { + return err + } + } + return nil +} + +func (rr *roots) validate(r *root, v any, ptr jsonPointer) error { + dialect := r.resource(ptr).dialect + meta := dialect.getSchema(rr.assertVocabs, rr.vocabularies) + if err := meta.validate(v, rr.regexpEngine, meta, r.resources, rr.assertVocabs, rr.vocabularies); err != nil { + up := urlPtr{r.url, ptr} + return &SchemaValidationError{URL: up.String(), Err: err} + } + return nil +} + +// -- + +type InvalidMetaSchemaURLError struct { + URL string + Err error +} + +func (e *InvalidMetaSchemaURLError) Error() string { + return fmt.Sprintf("invalid $schema in %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedDraftError struct { + URL string +} + +func (e *UnsupportedDraftError) Error() string { + return fmt.Sprintf("draft %q is not supported", e.URL) +} + +// -- + +type MetaSchemaCycleError struct { + URL string +} + +func (e *MetaSchemaCycleError) Error() string { + return fmt.Sprintf("cycle in resolving $schema in %q", e.URL) +} + +// -- + +type MetaSchemaMismatchError struct { + URL string +} + +func (e *MetaSchemaMismatchError) Error() string { + return fmt.Sprintf("$schema in %q does not match with $schema in root", e.URL) +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go new file mode 100644 index 000000000..b4c1f37af --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go @@ -0,0 +1,254 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Schema is the representation of a compiled +// jsonschema. +type Schema struct { + up urlPtr + resource *Schema + dynamicAnchors map[string]*Schema + allPropsEvaluated bool + allItemsEvaluated bool + numItemsEvaluated int + + DraftVersion int + Location string + + // type agnostic -- + Bool *bool // boolean schema + ID string + Ref *Schema + Anchor string + RecursiveRef *Schema + RecursiveAnchor bool + DynamicRef *DynamicRef + DynamicAnchor string // "" if not specified + Types *Types + Enum *Enum + Const *any + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema + Else *Schema + Format *Format + + // object -- + MaxProperties *int + MinProperties *int + Required []string + PropertyNames *Schema + Properties map[string]*Schema + PatternProperties map[Regexp]*Schema + AdditionalProperties any // nil or bool or *Schema + Dependencies map[string]any // value is []string or *Schema + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array -- + MinItems *int + MaxItems *int + UniqueItems bool + Contains *Schema + MinContains *int + MaxContains *int + Items any // nil or []*Schema or *Schema + AdditionalItems any // nil or bool or *Schema + PrefixItems []*Schema + Items2020 *Schema + UnevaluatedItems *Schema + + // string -- + MinLength *int + MaxLength *int + Pattern Regexp + ContentEncoding *Decoder + ContentMediaType *MediaType + ContentSchema *Schema + + // number -- + Maximum *big.Rat + Minimum *big.Rat + ExclusiveMaximum *big.Rat + ExclusiveMinimum *big.Rat + MultipleOf *big.Rat + + Extensions []SchemaExt + + // annotations -- + Title string + Description string + Default *any + Comment string + ReadOnly bool + WriteOnly bool + Examples []any + Deprecated bool +} + +// -- + +type jsonType int + +const ( + invalidType jsonType = 0 + nullType jsonType = 1 << iota + booleanType + numberType + integerType + stringType + arrayType + objectType +) + +func typeOf(v any) jsonType { + switch v.(type) { + case nil: + return nullType + case bool: + return booleanType + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return numberType + case string: + return stringType + case []any: + return arrayType + case map[string]any: + return objectType + default: + return invalidType + } +} + +func typeFromString(s string) jsonType { + switch s { + case "null": + return nullType + case "boolean": + return booleanType + case "number": + return numberType + case "integer": + return integerType + case "string": + return stringType + case "array": + return arrayType + case "object": + return objectType + } + return invalidType +} + +func (jt jsonType) String() string { + switch jt { + case nullType: + return "null" + case booleanType: + return "boolean" + case numberType: + return "number" + case integerType: + return "integer" + case stringType: + return "string" + case arrayType: + return "array" + case objectType: + return "object" + } + return "" +} + +// -- + +// Types encapsulates list of json value types. +type Types int + +func newTypes(v any) *Types { + var types Types + switch v := v.(type) { + case string: + types.Add(v) + case []any: + for _, item := range v { + if s, ok := item.(string); ok { + types.Add(s) + } + } + } + if types.IsEmpty() { + return nil + } + return &types +} + +func (tt Types) IsEmpty() bool { + return tt == 0 +} + +// Add specified json type. If typ is +// not valid json type it is ignored. +func (tt *Types) Add(typ string) { + tt.add(typeFromString(typ)) +} + +func (tt *Types) add(t jsonType) { + *tt = Types(int(*tt) | int(t)) +} + +func (tt Types) contains(t jsonType) bool { + return int(tt)&int(t) != 0 +} + +func (tt Types) ToStrings() []string { + types := []jsonType{ + nullType, booleanType, numberType, integerType, + stringType, arrayType, objectType, + } + var arr []string + for _, t := range types { + if tt.contains(t) { + arr = append(arr, t.String()) + } + } + return arr +} + +func (tt Types) String() string { + return fmt.Sprintf("%v", tt.ToStrings()) +} + +// -- + +type Enum struct { + Values []any + types Types +} + +func newEnum(arr []any) *Enum { + var types Types + for _, item := range arr { + types.add(typeOf(item)) + } + return &Enum{arr, types} +} + +// -- + +type DynamicRef struct { + Ref *Schema + Anchor string // "" if not specified +} + +func newSchema(up urlPtr) *Schema { + return &Schema{up: up, Location: up.String()} +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go new file mode 100644 index 000000000..c6f8e7752 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go @@ -0,0 +1,464 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + gourl "net/url" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +// -- + +type url (string) + +func (u url) String() string { + return string(u) +} + +func (u url) join(ref string) (*urlFrag, error) { + base, err := gourl.Parse(string(u)) + if err != nil { + return nil, &ParseURLError{URL: u.String(), Err: err} + } + + ref, frag, err := splitFragment(ref) + if err != nil { + return nil, err + } + refURL, err := gourl.Parse(ref) + if err != nil { + return nil, &ParseURLError{URL: ref, Err: err} + } + resolved := base.ResolveReference(refURL) + + // see https://github.com/golang/go/issues/66084 (net/url: ResolveReference ignores Opaque value) + if !refURL.IsAbs() && base.Opaque != "" { + resolved.Opaque = base.Opaque + } + + return &urlFrag{url: url(resolved.String()), frag: frag}, nil +} + +// -- + +type jsonPointer string + +func escape(tok string) string { + tok = strings.ReplaceAll(tok, "~", "~0") + tok = strings.ReplaceAll(tok, "/", "~1") + return tok +} + +func unescape(tok string) (string, bool) { + tilde := strings.IndexByte(tok, '~') + if tilde == -1 { + return tok, true + } + sb := new(strings.Builder) + for { + sb.WriteString(tok[:tilde]) + tok = tok[tilde+1:] + if tok == "" { + return "", false + } + switch tok[0] { + case '0': + sb.WriteByte('~') + case '1': + sb.WriteByte('/') + default: + return "", false + } + tok = tok[1:] + tilde = strings.IndexByte(tok, '~') + if tilde == -1 { + sb.WriteString(tok) + break + } + } + return sb.String(), true +} + +func (ptr jsonPointer) isEmpty() bool { + return string(ptr) == "" +} + +func (ptr jsonPointer) concat(next jsonPointer) jsonPointer { + return jsonPointer(fmt.Sprintf("%s%s", ptr, next)) +} + +func (ptr jsonPointer) append(tok string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s", ptr, escape(tok))) +} + +func (ptr jsonPointer) append2(tok1, tok2 string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s/%s", ptr, escape(tok1), escape(tok2))) +} + +// -- + +type anchor string + +// -- + +type fragment string + +func decode(frag string) (string, error) { + return gourl.PathUnescape(frag) +} + +// avoids escaping /. +func encode(frag string) string { + var sb strings.Builder + for i, tok := range strings.Split(frag, "/") { + if i > 0 { + sb.WriteByte('/') + } + sb.WriteString(gourl.PathEscape(tok)) + } + return sb.String() +} + +func splitFragment(str string) (string, fragment, error) { + u, f := split(str) + f, err := decode(f) + if err != nil { + return "", fragment(""), &ParseURLError{URL: str, Err: err} + } + return u, fragment(f), nil +} + +func split(str string) (string, string) { + hash := strings.IndexByte(str, '#') + if hash == -1 { + return str, "" + } + return str[:hash], str[hash+1:] +} + +func (frag fragment) convert() any { + str := string(frag) + if str == "" || strings.HasPrefix(str, "/") { + return jsonPointer(str) + } + return anchor(str) +} + +// -- + +type urlFrag struct { + url url + frag fragment +} + +func startsWithWindowsDrive(s string) bool { + if s != "" && strings.HasPrefix(s[1:], `:\`) { + return (s[0] >= 'a' && s[0] <= 'z') || (s[0] >= 'A' && s[0] <= 'Z') + } + return false +} + +func absolute(input string) (*urlFrag, error) { + u, frag, err := splitFragment(input) + if err != nil { + return nil, err + } + + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && startsWithWindowsDrive(u) { + u = "file:///" + filepath.ToSlash(u) + } + + gourl, err := gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + if gourl.IsAbs() { + return &urlFrag{url(u), frag}, nil + } + + // avoid filesystem api in wasm + if runtime.GOOS != "js" { + abs, err := filepath.Abs(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + u = abs + } + if !strings.HasPrefix(u, "/") { + u = "/" + u + } + u = "file://" + filepath.ToSlash(u) + + _, err = gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + return &urlFrag{url: url(u), frag: frag}, nil +} + +func (uf *urlFrag) String() string { + return fmt.Sprintf("%s#%s", uf.url, encode(string(uf.frag))) +} + +// -- + +type urlPtr struct { + url url + ptr jsonPointer +} + +func (up *urlPtr) lookup(v any) (any, error) { + for _, tok := range strings.Split(string(up.ptr), "/")[1:] { + tok, ok := unescape(tok) + if !ok { + return nil, &InvalidJsonPointerError{up.String()} + } + switch val := v.(type) { + case map[string]any: + if pvalue, ok := val[tok]; ok { + v = pvalue + continue + } + case []any: + if index, err := strconv.Atoi(tok); err == nil { + if index >= 0 && index < len(val) { + v = val[index] + continue + } + } + } + return nil, &JSONPointerNotFoundError{up.String()} + } + return v, nil +} + +func (up *urlPtr) format(tok string) string { + return fmt.Sprintf("%s#%s/%s", up.url, encode(string(up.ptr)), encode(escape(tok))) +} + +func (up *urlPtr) String() string { + return fmt.Sprintf("%s#%s", up.url, encode(string(up.ptr))) +} + +// -- + +func minInt(i, j int) int { + if i < j { + return i + } + return j +} + +func strVal(obj map[string]any, prop string) (string, bool) { + v, ok := obj[prop] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func isInteger(num any) bool { + rat, ok := new(big.Rat).SetString(fmt.Sprint(num)) + return ok && rat.IsInt() +} + +// quote returns single-quoted string. +// used for embedding quoted strings in json. +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func equals(v1, v2 any) (bool, ErrorKind) { + switch v1 := v1.(type) { + case map[string]any: + v2, ok := v2.(map[string]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for k, val1 := range v1 { + val2, ok := v2[k] + if !ok { + return false, nil + } + if ok, k := equals(val1, val2); !ok || k != nil { + return ok, k + } + } + return true, nil + case []any: + v2, ok := v2.([]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for i := range v1 { + if ok, k := equals(v1[i], v2[i]); !ok || k != nil { + return ok, k + } + } + return true, nil + case nil: + return v2 == nil, nil + case bool: + v2, ok := v2.(bool) + return ok && v1 == v2, nil + case string: + v2, ok := v2.(string) + return ok && v1 == v2, nil + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num1, ok1 := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, ok2 := new(big.Rat).SetString(fmt.Sprint(v2)) + return ok1 && ok2 && num1.Cmp(num2) == 0, nil + default: + return false, &kind.InvalidJsonValue{Value: v1} + } +} + +func duplicates(arr []any) (int, int, ErrorKind) { + if len(arr) <= 20 { + for i := 1; i < len(arr); i++ { + for j := 0; j < i; j++ { + if ok, k := equals(arr[i], arr[j]); ok || k != nil { + return j, i, k + } + } + } + return -1, -1, nil + } + + m := make(map[uint64][]int) + h := new(maphash.Hash) + for i, item := range arr { + h.Reset() + writeHash(item, h) + hash := h.Sum64() + indexes, ok := m[hash] + if ok { + for _, j := range indexes { + if ok, k := equals(item, arr[j]); ok || k != nil { + return j, i, k + } + } + } + indexes = append(indexes, i) + m[hash] = indexes + } + return -1, -1, nil +} + +func writeHash(v any, h *maphash.Hash) ErrorKind { + switch v := v.(type) { + case map[string]any: + _ = h.WriteByte(0) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + slices.Sort(props) + for _, prop := range props { + writeHash(prop, h) + writeHash(v[prop], h) + } + case []any: + _ = h.WriteByte(1) + for _, item := range v { + writeHash(item, h) + } + case nil: + _ = h.WriteByte(2) + case bool: + _ = h.WriteByte(3) + if v { + _ = h.WriteByte(1) + } else { + _ = h.WriteByte(0) + } + case string: + _ = h.WriteByte(4) + _, _ = h.WriteString(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + _ = h.WriteByte(5) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + _, _ = h.Write(num.Num().Bytes()) + _, _ = h.Write(num.Denom().Bytes()) + default: + return &kind.InvalidJsonValue{Value: v} + } + return nil +} + +// -- + +type ParseURLError struct { + URL string + Err error +} + +func (e *ParseURLError) Error() string { + return fmt.Sprintf("error in parsing %q: %v", e.URL, e.Err) +} + +// -- + +type InvalidJsonPointerError struct { + URL string +} + +func (e *InvalidJsonPointerError) Error() string { + return fmt.Sprintf("invalid json-pointer %q", e.URL) +} + +// -- + +type JSONPointerNotFoundError struct { + URL string +} + +func (e *JSONPointerNotFoundError) Error() string { + return fmt.Sprintf("json-pointer in %q not found", e.URL) +} + +// -- + +type SchemaValidationError struct { + URL string + Err error +} + +func (e *SchemaValidationError) Error() string { + return fmt.Sprintf("%q is not valid against metaschema: %v", e.URL, e.Err) +} + +// -- + +// LocalizableError is an error whose message is localizable. +func LocalizableError(format string, args ...any) error { + return &localizableError{format, args} +} + +type localizableError struct { + msg string + args []any +} + +func (e *localizableError) Error() string { + return fmt.Sprintf(e.msg, e.args...) +} + +func (e *localizableError) LocalizedError(p *message.Printer) string { + return p.Sprintf(e.msg, e.args...) +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go new file mode 100644 index 000000000..e2ace37a9 --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go @@ -0,0 +1,975 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "slices" + "strconv" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +func (sch *Schema) Validate(v any) error { + return sch.validate(v, nil, nil, nil, false, nil) +} + +func (sch *Schema) validate(v any, regexpEngine RegexpEngine, meta *Schema, resources map[jsonPointer]*resource, assertVocabs bool, vocabularies map[string]*Vocabulary) error { + vd := validator{ + v: v, + vloc: make([]string, 0, 8), + sch: sch, + scp: &scope{sch, "", 0, nil}, + uneval: unevalFrom(v, sch, false), + errors: nil, + boolResult: false, + regexpEngine: regexpEngine, + meta: meta, + resources: resources, + assertVocabs: assertVocabs, + vocabularies: vocabularies, + } + if _, err := vd.validate(); err != nil { + verr := err.(*ValidationError) + var causes []*ValidationError + if _, ok := verr.ErrorKind.(*kind.Group); ok { + causes = verr.Causes + } else { + causes = []*ValidationError{verr} + } + return &ValidationError{ + SchemaURL: sch.Location, + InstanceLocation: nil, + ErrorKind: &kind.Schema{Location: sch.Location}, + Causes: causes, + } + } + + return nil +} + +type validator struct { + v any + vloc []string + sch *Schema + scp *scope + uneval *uneval + errors []*ValidationError + boolResult bool // is interested to know valid or not (but not actuall error) + regexpEngine RegexpEngine + + // meta validation + meta *Schema // set only when validating with metaschema + resources map[jsonPointer]*resource // resources which should be validated with their dialect + assertVocabs bool + vocabularies map[string]*Vocabulary +} + +func (vd *validator) validate() (*uneval, error) { + s := vd.sch + v := vd.v + + // boolean -- + if s.Bool != nil { + if *s.Bool { + return vd.uneval, nil + } else { + return nil, vd.error(&kind.FalseSchema{}) + } + } + + // check cycle -- + if scp := vd.scp.checkCycle(); scp != nil { + return nil, vd.error(&kind.RefCycle{ + URL: s.Location, + KeywordLocation1: vd.scp.kwLoc(), + KeywordLocation2: scp.kwLoc(), + }) + } + + t := typeOf(v) + if t == invalidType { + return nil, vd.error(&kind.InvalidJsonValue{Value: v}) + } + + // type -- + if s.Types != nil && !s.Types.IsEmpty() { + matched := s.Types.contains(t) || (s.Types.contains(integerType) && t == numberType && isInteger(v)) + if !matched { + return nil, vd.error(&kind.Type{Got: t.String(), Want: s.Types.ToStrings()}) + } + } + + // const -- + if s.Const != nil { + ok, k := equals(v, *s.Const) + if k != nil { + return nil, vd.error(k) + } else if !ok { + return nil, vd.error(&kind.Const{Got: v, Want: *s.Const}) + } + } + + // enum -- + if s.Enum != nil { + matched := s.Enum.types.contains(typeOf(v)) + if matched { + matched = false + for _, item := range s.Enum.Values { + ok, k := equals(v, item) + if k != nil { + return nil, vd.error(k) + } else if ok { + matched = true + break + } + } + } + if !matched { + return nil, vd.error(&kind.Enum{Got: v, Want: s.Enum.Values}) + } + } + + // format -- + if s.Format != nil { + var err error + if s.Format.Name == "regex" && vd.regexpEngine != nil { + err = vd.regexpEngine.validate(v) + } else { + err = s.Format.Validate(v) + } + if err != nil { + return nil, vd.error(&kind.Format{Got: v, Want: s.Format.Name, Err: err}) + } + } + + // $ref -- + if s.Ref != nil { + err := vd.validateRef(s.Ref, "$ref") + if s.DraftVersion < 2019 { + return vd.uneval, err + } + if err != nil { + vd.addErr(err) + } + } + + // type specific validations -- + switch v := v.(type) { + case map[string]any: + vd.objValidate(v) + case []any: + vd.arrValidate(v) + case string: + vd.strValidate(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + vd.numValidate(v) + } + + if len(vd.errors) == 0 || !vd.boolResult { + if s.DraftVersion >= 2019 { + vd.validateRefs() + } + vd.condValidate() + + for _, ext := range s.Extensions { + ext.Validate(&ValidatorContext{vd}, v) + } + + if s.DraftVersion >= 2019 { + vd.unevalValidate() + } + } + + switch len(vd.errors) { + case 0: + return vd.uneval, nil + case 1: + return nil, vd.errors[0] + default: + verr := vd.error(&kind.Group{}) + verr.Causes = vd.errors + return nil, verr + } +} + +func (vd *validator) objValidate(obj map[string]any) { + s := vd.sch + + // minProperties -- + if s.MinProperties != nil { + if len(obj) < *s.MinProperties { + vd.addError(&kind.MinProperties{Got: len(obj), Want: *s.MinProperties}) + } + } + + // maxProperties -- + if s.MaxProperties != nil { + if len(obj) > *s.MaxProperties { + vd.addError(&kind.MaxProperties{Got: len(obj), Want: *s.MaxProperties}) + } + } + + // required -- + if len(s.Required) > 0 { + if missing := vd.findMissing(obj, s.Required); missing != nil { + vd.addError(&kind.Required{Missing: missing}) + } + } + + if vd.boolResult && len(vd.errors) > 0 { + return + } + + // dependencies -- + for pname, dep := range s.Dependencies { + if _, ok := obj[pname]; ok { + switch dep := dep.(type) { + case []string: + if missing := vd.findMissing(obj, dep); missing != nil { + vd.addError(&kind.Dependency{Prop: pname, Missing: missing}) + } + case *Schema: + vd.addErr(vd.validateSelf(dep, "", false)) + } + } + } + + var additionalPros []string + for pname, pvalue := range obj { + if vd.boolResult && len(vd.errors) > 0 { + return + } + evaluated := false + + // properties -- + if sch, ok := s.Properties[pname]; ok { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + + // patternProperties -- + for regex, sch := range s.PatternProperties { + if regex.MatchString(pname) { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + } + + if !evaluated && s.AdditionalProperties != nil { + evaluated = true + switch additional := s.AdditionalProperties.(type) { + case bool: + if !additional { + additionalPros = append(additionalPros, pname) + } + case *Schema: + vd.addErr(vd.validateVal(additional, pvalue, pname)) + } + } + + if evaluated { + delete(vd.uneval.props, pname) + } + } + if len(additionalPros) > 0 { + vd.addError(&kind.AdditionalProperties{Properties: additionalPros}) + } + + if s.DraftVersion == 4 { + return + } + + // propertyNames -- + if s.PropertyNames != nil { + for pname := range obj { + sch, meta, resources := s.PropertyNames, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err := sch.validate(pname, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.PropertyNames.Location + verr.ErrorKind = &kind.PropertyNames{Property: pname} + vd.addErr(verr) + } + } + } + + if s.DraftVersion == 6 { + return + } + + // dependentSchemas -- + for pname, sch := range s.DependentSchemas { + if _, ok := obj[pname]; ok { + vd.addErr(vd.validateSelf(sch, "", false)) + } + } + + // dependentRequired -- + for pname, reqd := range s.DependentRequired { + if _, ok := obj[pname]; ok { + if missing := vd.findMissing(obj, reqd); missing != nil { + vd.addError(&kind.DependentRequired{Prop: pname, Missing: missing}) + } + } + } +} + +func (vd *validator) arrValidate(arr []any) { + s := vd.sch + + // minItems -- + if s.MinItems != nil { + if len(arr) < *s.MinItems { + vd.addError(&kind.MinItems{Got: len(arr), Want: *s.MinItems}) + } + } + + // maxItems -- + if s.MaxItems != nil { + if len(arr) > *s.MaxItems { + vd.addError(&kind.MaxItems{Got: len(arr), Want: *s.MaxItems}) + } + } + + // uniqueItems -- + if s.UniqueItems && len(arr) > 1 { + i, j, k := duplicates(arr) + if k != nil { + vd.addError(k) + } else if i != -1 { + vd.addError(&kind.UniqueItems{Duplicates: [2]int{i, j}}) + } + } + + if s.DraftVersion < 2020 { + evaluated := 0 + + // items -- + switch items := s.Items.(type) { + case *Schema: + for i, item := range arr { + vd.addErr(vd.validateVal(items, item, strconv.Itoa(i))) + } + evaluated = len(arr) + case []*Schema: + min := minInt(len(arr), len(items)) + for i, item := range arr[:min] { + vd.addErr(vd.validateVal(items[i], item, strconv.Itoa(i))) + } + evaluated = min + } + + // additionalItems -- + if s.AdditionalItems != nil { + switch additional := s.AdditionalItems.(type) { + case bool: + if !additional && evaluated != len(arr) { + vd.addError(&kind.AdditionalItems{Count: len(arr) - evaluated}) + } + case *Schema: + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(additional, item, strconv.Itoa(i))) + } + } + } + } else { + evaluated := minInt(len(s.PrefixItems), len(arr)) + + // prefixItems -- + for i, item := range arr[:evaluated] { + vd.addErr(vd.validateVal(s.PrefixItems[i], item, strconv.Itoa(i))) + } + + // items2020 -- + if s.Items2020 != nil { + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(s.Items2020, item, strconv.Itoa(i))) + } + } + } + + // contains -- + if s.Contains != nil { + var errors []*ValidationError + var matched []int + + for i, item := range arr { + if err := vd.validateVal(s.Contains, item, strconv.Itoa(i)); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = append(matched, i) + if s.DraftVersion >= 2020 { + delete(vd.uneval.items, i) + } + } + } + + // minContains -- + if s.MinContains != nil { + if len(matched) < *s.MinContains { + vd.addErrors(errors, &kind.MinContains{Got: matched, Want: *s.MinContains}) + } + } else if len(matched) == 0 { + vd.addErrors(errors, &kind.Contains{}) + } + + // maxContains -- + if s.MaxContains != nil { + if len(matched) > *s.MaxContains { + vd.addError(&kind.MaxContains{Got: matched, Want: *s.MaxContains}) + } + } + } +} + +func (vd *validator) strValidate(str string) { + s := vd.sch + + strLen := -1 + if s.MinLength != nil || s.MaxLength != nil { + strLen = utf8.RuneCount([]byte(str)) + } + + // minLength -- + if s.MinLength != nil { + if strLen < *s.MinLength { + vd.addError(&kind.MinLength{Got: strLen, Want: *s.MinLength}) + } + } + + // maxLength -- + if s.MaxLength != nil { + if strLen > *s.MaxLength { + vd.addError(&kind.MaxLength{Got: strLen, Want: *s.MaxLength}) + } + } + + // pattern -- + if s.Pattern != nil { + if !s.Pattern.MatchString(str) { + vd.addError(&kind.Pattern{Got: str, Want: s.Pattern.String()}) + } + } + + if s.DraftVersion == 6 { + return + } + + var err error + + // contentEncoding -- + decoded := []byte(str) + if s.ContentEncoding != nil { + decoded, err = s.ContentEncoding.Decode(str) + if err != nil { + decoded = nil + vd.addError(&kind.ContentEncoding{Want: s.ContentEncoding.Name, Err: err}) + } + } + + var deserialized *any + if decoded != nil && s.ContentMediaType != nil { + if s.ContentSchema == nil { + err = s.ContentMediaType.Validate(decoded) + } else { + var value any + value, err = s.ContentMediaType.UnmarshalJSON(decoded) + if err == nil { + deserialized = &value + } + } + if err != nil { + vd.addError(&kind.ContentMediaType{ + Got: decoded, + Want: s.ContentMediaType.Name, + Err: err, + }) + } + } + + if deserialized != nil && s.ContentSchema != nil { + sch, meta, resources := s.ContentSchema, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err = sch.validate(*deserialized, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.Location + verr.ErrorKind = &kind.ContentSchema{} + vd.addErr(verr) + } + } +} + +func (vd *validator) numValidate(v any) { + s := vd.sch + + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprintf("%v", v)) + } + return numVal + } + + // minimum -- + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + vd.addError(&kind.Minimum{Got: num(), Want: s.Minimum}) + } + + // maximum -- + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + vd.addError(&kind.Maximum{Got: num(), Want: s.Maximum}) + } + + // exclusiveMinimum + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + vd.addError(&kind.ExclusiveMinimum{Got: num(), Want: s.ExclusiveMinimum}) + } + + // exclusiveMaximum + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + vd.addError(&kind.ExclusiveMaximum{Got: num(), Want: s.ExclusiveMaximum}) + } + + // multipleOf + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + vd.addError(&kind.MultipleOf{Got: num(), Want: s.MultipleOf}) + } + } +} + +func (vd *validator) condValidate() { + s := vd.sch + + // not -- + if s.Not != nil { + if vd.validateSelf(s.Not, "", true) == nil { + vd.addError(&kind.Not{}) + } + } + + // allOf -- + if len(s.AllOf) > 0 { + var errors []*ValidationError + for _, sch := range s.AllOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + if vd.boolResult { + break + } + } + } + if len(errors) != 0 { + vd.addErrors(errors, &kind.AllOf{}) + } + } + + // anyOf + if len(s.AnyOf) > 0 { + var matched bool + var errors []*ValidationError + for _, sch := range s.AnyOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = true + // for uneval, all schemas must be evaluated + if vd.uneval.isEmpty() { + break + } + } + } + if !matched { + vd.addErrors(errors, &kind.AnyOf{}) + } + } + + // oneOf + if len(s.OneOf) > 0 { + var matched = -1 + var errors []*ValidationError + for i, sch := range s.OneOf { + if err := vd.validateSelf(sch, "", matched != -1); err != nil { + if matched == -1 { + errors = append(errors, err.(*ValidationError)) + } + } else { + if matched == -1 { + matched = i + } else { + vd.addError(&kind.OneOf{Subschemas: []int{matched, i}}) + break + } + } + } + if matched == -1 { + vd.addErrors(errors, &kind.OneOf{Subschemas: nil}) + } + } + + // if, then, else -- + if s.If != nil { + if vd.validateSelf(s.If, "", true) == nil { + if s.Then != nil { + vd.addErr(vd.validateSelf(s.Then, "", false)) + } + } else if s.Else != nil { + vd.addErr(vd.validateSelf(s.Else, "", false)) + } + } +} + +func (vd *validator) unevalValidate() { + s := vd.sch + + // unevaluatedProperties + if obj, ok := vd.v.(map[string]any); ok && s.UnevaluatedProperties != nil { + for pname := range vd.uneval.props { + if pvalue, ok := obj[pname]; ok { + vd.addErr(vd.validateVal(s.UnevaluatedProperties, pvalue, pname)) + } + } + vd.uneval.props = nil + } + + // unevaluatedItems + if arr, ok := vd.v.([]any); ok && s.UnevaluatedItems != nil { + for i := range vd.uneval.items { + vd.addErr(vd.validateVal(s.UnevaluatedItems, arr[i], strconv.Itoa(i))) + } + vd.uneval.items = nil + } +} + +// validation helpers -- + +func (vd *validator) validateSelf(sch *Schema, refKw string, boolResult bool) error { + scp := vd.scp.child(sch, refKw, vd.scp.vid) + uneval := unevalFrom(vd.v, sch, !vd.uneval.isEmpty()) + subvd := validator{ + v: vd.v, + vloc: vd.vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult || boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + uneval, err := subvd.validate() + if err == nil { + vd.uneval.merge(uneval) + } + return err +} + +func (vd *validator) validateVal(sch *Schema, v any, vtok string) error { + vloc := append(vd.vloc, vtok) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) validateValue(sch *Schema, v any, vpath []string) error { + vloc := append(vd.vloc, vpath...) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) metaResource(sch *Schema) *resource { + if sch != vd.meta { + return nil + } + ptr := "" + for _, tok := range vd.instanceLocation() { + ptr += "/" + ptr += escape(tok) + } + return vd.resources[jsonPointer(ptr)] +} + +func (vd *validator) handleMeta() { + res := vd.metaResource(vd.sch) + if res == nil { + return + } + sch := res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + vd.meta = sch + vd.sch = sch +} + +// reference validation -- + +func (vd *validator) validateRef(sch *Schema, kw string) error { + err := vd.validateSelf(sch, kw, false) + if err != nil { + refErr := vd.error(&kind.Reference{Keyword: kw, URL: sch.Location}) + verr := err.(*ValidationError) + if _, ok := verr.ErrorKind.(*kind.Group); ok { + refErr.Causes = verr.Causes + } else { + refErr.Causes = append(refErr.Causes, verr) + } + return refErr + } + return nil +} + +func (vd *validator) resolveRecursiveAnchor(fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if scp.sch.resource.RecursiveAnchor { + sch = scp.sch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) resolveDynamicAnchor(name string, fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if dsch, ok := scp.sch.resource.dynamicAnchors[name]; ok { + sch = dsch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) validateRefs() { + // $recursiveRef -- + if sch := vd.sch.RecursiveRef; sch != nil { + if sch.RecursiveAnchor { + sch = vd.resolveRecursiveAnchor(sch) + } + vd.addErr(vd.validateRef(sch, "$recursiveRef")) + } + + // $dynamicRef -- + if dref := vd.sch.DynamicRef; dref != nil { + sch := dref.Ref // initial target + if dref.Anchor != "" { + // $dynamicRef includes anchor + if sch.DynamicAnchor == dref.Anchor { + // initial target has matching $dynamicAnchor + sch = vd.resolveDynamicAnchor(dref.Anchor, sch) + } + } + vd.addErr(vd.validateRef(sch, "$dynamicRef")) + } +} + +// error helpers -- + +func (vd *validator) instanceLocation() []string { + return slices.Clone(vd.vloc) +} + +func (vd *validator) error(kind ErrorKind) *ValidationError { + if vd.boolResult { + return &ValidationError{} + } + return &ValidationError{ + SchemaURL: vd.sch.Location, + InstanceLocation: vd.instanceLocation(), + ErrorKind: kind, + Causes: nil, + } +} + +func (vd *validator) addErr(err error) { + if err != nil { + vd.errors = append(vd.errors, err.(*ValidationError)) + } +} + +func (vd *validator) addError(kind ErrorKind) { + vd.errors = append(vd.errors, vd.error(kind)) +} + +func (vd *validator) addErrors(errors []*ValidationError, kind ErrorKind) { + err := vd.error(kind) + err.Causes = errors + vd.errors = append(vd.errors, err) +} + +func (vd *validator) findMissing(obj map[string]any, reqd []string) []string { + var missing []string + for _, pname := range reqd { + if _, ok := obj[pname]; !ok { + if vd.boolResult { + return []string{} // non-nil + } + missing = append(missing, pname) + } + } + return missing +} + +// -- + +type scope struct { + sch *Schema + + // if empty, compute from self.sch and self.parent.sch. + // not empty, only when there is a jump i.e, $ref, $XXXRef + refKeyword string + + // unique id of value being validated + // if two scopes validate same value, they will have + // same vid + vid int + + parent *scope +} + +func (sc *scope) child(sch *Schema, refKeyword string, vid int) *scope { + return &scope{sch, refKeyword, vid, sc} +} + +func (sc *scope) checkCycle() *scope { + scp := sc.parent + for scp != nil { + if scp.vid != sc.vid { + break + } + if scp.sch == sc.sch { + return scp + } + scp = scp.parent + } + return nil +} + +func (sc *scope) kwLoc() string { + var loc string + for sc.parent != nil { + if sc.refKeyword != "" { + loc = fmt.Sprintf("/%s%s", escape(sc.refKeyword), loc) + } else { + cur := sc.sch.Location + parent := sc.parent.sch.Location + loc = fmt.Sprintf("%s%s", cur[len(parent):], loc) + } + sc = sc.parent + } + return loc +} + +// -- + +type uneval struct { + props map[string]struct{} + items map[int]struct{} +} + +func unevalFrom(v any, sch *Schema, callerNeeds bool) *uneval { + uneval := &uneval{} + switch v := v.(type) { + case map[string]any: + if !sch.allPropsEvaluated && (callerNeeds || sch.UnevaluatedProperties != nil) { + uneval.props = map[string]struct{}{} + for k := range v { + uneval.props[k] = struct{}{} + } + } + case []any: + if !sch.allItemsEvaluated && (callerNeeds || sch.UnevaluatedItems != nil) && sch.numItemsEvaluated < len(v) { + uneval.items = map[int]struct{}{} + for i := sch.numItemsEvaluated; i < len(v); i++ { + uneval.items[i] = struct{}{} + } + } + } + return uneval +} + +func (ue *uneval) merge(other *uneval) { + for k := range ue.props { + if _, ok := other.props[k]; !ok { + delete(ue.props, k) + } + } + for i := range ue.items { + if _, ok := other.items[i]; !ok { + delete(ue.items, i) + } + } +} + +func (ue *uneval) isEmpty() bool { + return len(ue.props) == 0 && len(ue.items) == 0 +} + +// -- + +type ValidationError struct { + // absolute, dereferenced schema location. + SchemaURL string + + // location of the JSON value within the instance being validated. + InstanceLocation []string + + // kind of error + ErrorKind ErrorKind + + // holds nested errors + Causes []*ValidationError +} + +type ErrorKind interface { + KeywordPath() []string + LocalizedString(*message.Printer) string +} diff --git a/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go new file mode 100644 index 000000000..c81cb700f --- /dev/null +++ b/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go @@ -0,0 +1,111 @@ +package jsonschema + +// CompilerContext provides helpers for +// compiling a [Vocabulary]. +type CompilerContext struct { + c *objCompiler +} + +func (ctx *CompilerContext) Enqueue(schPath []string) *Schema { + ptr := ctx.c.up.ptr + for _, tok := range schPath { + ptr = ptr.append(tok) + } + return ctx.c.enqueuePtr(ptr) +} + +// Vocabulary defines a set of keywords, their syntax and +// their semantics. +type Vocabulary struct { + // URL identifier for this Vocabulary. + URL string + + // Schema that is used to validate the keywords that is introduced by this + // vocabulary. + Schema *Schema + + // Subschemas lists the possible locations of subschemas introduced by + // this vocabulary. + Subschemas []SchemaPath + + // Compile compiles the keywords(introduced by this vocabulary) in obj into [SchemaExt]. + // If obj does not contain any keywords introduced by this vocabulary, nil SchemaExt must + // be returned. + Compile func(ctx *CompilerContext, obj map[string]any) (SchemaExt, error) +} + +// -- + +// SchemaExt is compled form of vocabulary. +type SchemaExt interface { + // Validate validates v against and errors if any are reported + // to ctx. + Validate(ctx *ValidatorContext, v any) +} + +// ValidatorContext provides helpers for +// validating with [SchemaExt]. +type ValidatorContext struct { + vd *validator +} + +// ValueLocation returns location of value as jsonpath token array. +func (ctx *ValidatorContext) ValueLocation() []string { + return ctx.vd.vloc +} + +// Validate validates v with sch. vpath gives path of v from current context value. +func (ctx *ValidatorContext) Validate(sch *Schema, v any, vpath []string) error { + switch len(vpath) { + case 0: + return ctx.vd.validateSelf(sch, "", false) + case 1: + return ctx.vd.validateVal(sch, v, vpath[0]) + default: + return ctx.vd.validateValue(sch, v, vpath) + } +} + +// EvaluatedProp marks given property of current object as evaluated. +func (ctx *ValidatorContext) EvaluatedProp(pname string) { + delete(ctx.vd.uneval.props, pname) +} + +// EvaluatedItem marks items at given index of current array as evaluated. +func (ctx *ValidatorContext) EvaluatedItem(index int) { + delete(ctx.vd.uneval.items, index) +} + +// AddError reports validation-error of given kind. +func (ctx *ValidatorContext) AddError(k ErrorKind) { + ctx.vd.addError(k) +} + +// AddErrors reports validation-errors of given kind. +func (ctx *ValidatorContext) AddErrors(errors []*ValidationError, k ErrorKind) { + ctx.vd.addErrors(errors, k) +} + +// AddErr reports the given err. This is typically used to report +// the error created by subschema validation. +// +// NOTE that err must be of type *ValidationError. +func (ctx *ValidatorContext) AddErr(err error) { + ctx.vd.addErr(err) +} + +func (ctx *ValidatorContext) Equals(v1, v2 any) (bool, error) { + b, k := equals(v1, v2) + if k != nil { + return false, ctx.vd.error(k) + } + return b, nil +} + +func (ctx *ValidatorContext) Duplicates(arr []any) (int, int, error) { + i, j, k := duplicates(arr) + if k != nil { + return -1, -1, ctx.vd.error(k) + } + return i, j, nil +} diff --git a/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE b/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE new file mode 100644 index 000000000..e51324f9b --- /dev/null +++ b/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 NYU Secure Systems Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go b/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go new file mode 100644 index 000000000..037a718ab --- /dev/null +++ b/tools/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go @@ -0,0 +1,290 @@ +// Package encrypted provides a simple, secure system for encrypting data +// symmetrically with a passphrase. +// +// It uses scrypt derive a key from the passphrase and the NaCl secret box +// cipher for authenticated encryption. +package encrypted + +import ( + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/scrypt" +) + +const saltSize = 32 + +const ( + boxKeySize = 32 + boxNonceSize = 24 +) + +// KDFParameterStrength defines the KDF parameter strength level to be used for +// encryption key derivation. +type KDFParameterStrength uint8 + +const ( + // Legacy defines legacy scrypt parameters (N:2^15, r:8, p:1) + Legacy KDFParameterStrength = iota + 1 + // Standard defines standard scrypt parameters which is focusing 100ms of computation (N:2^16, r:8, p:1) + Standard + // OWASP defines OWASP recommended scrypt parameters (N:2^17, r:8, p:1) + OWASP +) + +var ( + // legacyParams represents old scrypt derivation parameters for backward + // compatibility. + legacyParams = scryptParams{ + N: 32768, // 2^15 + R: 8, + P: 1, + } + + // standardParams defines scrypt parameters based on the scrypt creator + // recommendation to limit key derivation in time boxed to 100ms. + standardParams = scryptParams{ + N: 65536, // 2^16 + R: 8, + P: 1, + } + + // owaspParams defines scrypt parameters recommended by OWASP + owaspParams = scryptParams{ + N: 131072, // 2^17 + R: 8, + P: 1, + } + + // defaultParams defines scrypt parameters which will be used to generate a + // new key. + defaultParams = standardParams +) + +const ( + nameScrypt = "scrypt" + nameSecretBox = "nacl/secretbox" +) + +type data struct { + KDF scryptKDF `json:"kdf"` + Cipher secretBoxCipher `json:"cipher"` + Ciphertext []byte `json:"ciphertext"` +} + +type scryptParams struct { + N int `json:"N"` + R int `json:"r"` + P int `json:"p"` +} + +func (sp *scryptParams) Equal(in *scryptParams) bool { + return in != nil && sp.N == in.N && sp.P == in.P && sp.R == in.R +} + +func newScryptKDF(level KDFParameterStrength) (scryptKDF, error) { + salt := make([]byte, saltSize) + if err := fillRandom(salt); err != nil { + return scryptKDF{}, fmt.Errorf("unable to generate a random salt: %w", err) + } + + var params scryptParams + switch level { + case Legacy: + params = legacyParams + case Standard: + params = standardParams + case OWASP: + params = owaspParams + default: + // Fallback to default parameters + params = defaultParams + } + + return scryptKDF{ + Name: nameScrypt, + Params: params, + Salt: salt, + }, nil +} + +type scryptKDF struct { + Name string `json:"name"` + Params scryptParams `json:"params"` + Salt []byte `json:"salt"` +} + +func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { + return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) +} + +// CheckParams checks that the encoded KDF parameters are what we expect them to +// be. If we do not do this, an attacker could cause a DoS by tampering with +// them. +func (s *scryptKDF) CheckParams() error { + switch { + case legacyParams.Equal(&s.Params): + case standardParams.Equal(&s.Params): + case owaspParams.Equal(&s.Params): + default: + return errors.New("unsupported scrypt parameters") + } + + return nil +} + +func newSecretBoxCipher() (secretBoxCipher, error) { + nonce := make([]byte, boxNonceSize) + if err := fillRandom(nonce); err != nil { + return secretBoxCipher{}, err + } + return secretBoxCipher{ + Name: nameSecretBox, + Nonce: nonce, + }, nil +} + +type secretBoxCipher struct { + Name string `json:"name"` + Nonce []byte `json:"nonce"` + + encrypted bool +} + +func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { + var keyBytes [boxKeySize]byte + var nonceBytes [boxNonceSize]byte + + if len(key) != len(keyBytes) { + panic("incorrect key size") + } + if len(s.Nonce) != len(nonceBytes) { + panic("incorrect nonce size") + } + + copy(keyBytes[:], key) + copy(nonceBytes[:], s.Nonce) + + // ensure that we don't re-use nonces + if s.encrypted { + panic("Encrypt must only be called once for each cipher instance") + } + s.encrypted = true + + return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) +} + +func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { + var keyBytes [boxKeySize]byte + var nonceBytes [boxNonceSize]byte + + if len(key) != len(keyBytes) { + panic("incorrect key size") + } + if len(s.Nonce) != len(nonceBytes) { + // return an error instead of panicking since the nonce is user input + return nil, errors.New("encrypted: incorrect nonce size") + } + + copy(keyBytes[:], key) + copy(nonceBytes[:], s.Nonce) + + res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) + if !ok { + return nil, errors.New("encrypted: decryption failed") + } + return res, nil +} + +// Encrypt takes a passphrase and plaintext, and returns a JSON object +// containing ciphertext and the details necessary to decrypt it. +func Encrypt(plaintext, passphrase []byte) ([]byte, error) { + return EncryptWithCustomKDFParameters(plaintext, passphrase, Standard) +} + +// EncryptWithCustomKDFParameters takes a passphrase, the plaintext and a KDF +// parameter level (Legacy, Standard, or OWASP), and returns a JSON object +// containing ciphertext and the details necessary to decrypt it. +func EncryptWithCustomKDFParameters(plaintext, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { + k, err := newScryptKDF(kdfLevel) + if err != nil { + return nil, err + } + key, err := k.Key(passphrase) + if err != nil { + return nil, err + } + + c, err := newSecretBoxCipher() + if err != nil { + return nil, err + } + + data := &data{ + KDF: k, + Cipher: c, + } + data.Ciphertext = c.Encrypt(plaintext, key) + + return json.Marshal(data) +} + +// Marshal encrypts the JSON encoding of v using passphrase. +func Marshal(v interface{}, passphrase []byte) ([]byte, error) { + return MarshalWithCustomKDFParameters(v, passphrase, Standard) +} + +// MarshalWithCustomKDFParameters encrypts the JSON encoding of v using passphrase. +func MarshalWithCustomKDFParameters(v interface{}, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { + data, err := json.MarshalIndent(v, "", "\t") + if err != nil { + return nil, err + } + return EncryptWithCustomKDFParameters(data, passphrase, kdfLevel) +} + +// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and +// tries to decrypt it using passphrase. If successful, it returns the +// plaintext. +func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { + data := &data{} + if err := json.Unmarshal(ciphertext, data); err != nil { + return nil, err + } + + if data.KDF.Name != nameScrypt { + return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) + } + if data.Cipher.Name != nameSecretBox { + return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) + } + if err := data.KDF.CheckParams(); err != nil { + return nil, err + } + + key, err := data.KDF.Key(passphrase) + if err != nil { + return nil, err + } + + return data.Cipher.Decrypt(data.Ciphertext, key) +} + +// Unmarshal decrypts the data using passphrase and unmarshals the resulting +// plaintext into the value pointed to by v. +func Unmarshal(data []byte, v interface{}, passphrase []byte) error { + decrypted, err := Decrypt(data, passphrase) + if err != nil { + return err + } + return json.Unmarshal(decrypted, v) +} + +func fillRandom(b []byte) error { + _, err := io.ReadFull(rand.Reader, b) + return err +} diff --git a/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index 915d5090d..08c36e74f 100644 --- a/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -1146,13 +1146,28 @@ func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { switch diff.Type { case DiffInsert: - _, _ = buff.WriteString("\x1b[32m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") + lines := strings.Split(text, "\n") + for i, line := range lines { + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(line) + if i < len(lines)-1 { + _, _ = buff.WriteString("\x1b[0m\n") + } else { + _, _ = buff.WriteString("\x1b[0m") + } + } + case DiffDelete: - _, _ = buff.WriteString("\x1b[31m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") + lines := strings.Split(text, "\n") + for i, line := range lines { + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(line) + if i < len(lines)-1 { + _, _ = buff.WriteString("\x1b[0m\n") + } else { + _, _ = buff.WriteString("\x1b[0m") + } + } case DiffEqual: _, _ = buff.WriteString(text) } @@ -1305,7 +1320,6 @@ func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Di // diffLinesToStrings splits two texts into a list of strings. Each string represents one line. func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' lineHash := make(map[string]int) @@ -1316,12 +1330,11 @@ func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, stri return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray } -// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. -func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []uint32 { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []index. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []index { lineStart := 0 lineEnd := -1 - strs := []uint32{} + strs := []index{} for lineEnd < len(text)-1 { lineEnd = indexOf(text, "\n", lineStart) @@ -1335,11 +1348,11 @@ func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]str lineValue, ok := lineHash[line] if ok { - strs = append(strs, uint32(lineValue)) + strs = append(strs, index(lineValue)) } else { *lineArray = append(*lineArray, line) lineHash[line] = len(*lineArray) - 1 - strs = append(strs, uint32(len(*lineArray)-1)) + strs = append(strs, index(len(*lineArray)-1)) } } diff --git a/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go new file mode 100644 index 000000000..965a1c64b --- /dev/null +++ b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go @@ -0,0 +1,32 @@ +package diffmatchpatch + +type index uint32 + +const runeSkipStart = 0xd800 +const runeSkipEnd = 0xdfff + 1 +const runeMax = 0x110000 // next invalid code point + +func stringToIndex(text string) []index { + runes := []rune(text) + indexes := make([]index, len(runes)) + for i, r := range runes { + if r < runeSkipEnd { + indexes[i] = index(r) + } else { + indexes[i] = index(r) - (runeSkipEnd - runeSkipStart) + } + } + return indexes +} + +func indexesToString(indexes []index) string { + runes := make([]rune, len(indexes)) + for i, index := range indexes { + if index < runeSkipStart { + runes[i] = rune(index) + } else { + runes[i] = rune(index + (runeSkipEnd - runeSkipStart)) + } + } + return string(runes) +} diff --git a/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index eb727bb59..573b6bf75 100644 --- a/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/tools/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -93,14 +93,14 @@ func runesIndex(r1, r2 []rune) int { return -1 } -func intArrayToString(ns []uint32) string { +func intArrayToString(ns []index) string { if len(ns) == 0 { return "" } b := []rune{} for _, n := range ns { - b = append(b, intToRune(n)) + b = append(b, intToRune(uint32(n))) } return string(b) } diff --git a/tools/vendor/github.com/google/gofuzz/doc.go b/tools/vendor/github.com/sigstore/fulcio/COPYRIGHT.txt similarity index 78% rename from tools/vendor/github.com/google/gofuzz/doc.go rename to tools/vendor/github.com/sigstore/fulcio/COPYRIGHT.txt index 9f9956d4a..7a01c8498 100644 --- a/tools/vendor/github.com/google/gofuzz/doc.go +++ b/tools/vendor/github.com/sigstore/fulcio/COPYRIGHT.txt @@ -1,5 +1,5 @@ -/* -Copyright 2014 Google Inc. All rights reserved. + +Copyright 2021 The Sigstore Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,7 +12,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/tools/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/tools/vendor/github.com/sigstore/fulcio/LICENSE similarity index 99% rename from tools/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt rename to tools/vendor/github.com/sigstore/fulcio/LICENSE index 55ede8a42..261eeb9e9 100644 --- a/tools/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt +++ b/tools/vendor/github.com/sigstore/fulcio/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2015 xeipuuv + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go b/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go new file mode 100644 index 000000000..a61decc20 --- /dev/null +++ b/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package certificate contains helpers for getting data from Fulcio issued +// x509 certificates. +package certificate diff --git a/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go new file mode 100644 index 000000000..584aac971 --- /dev/null +++ b/tools/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -0,0 +1,439 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certificate + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI + OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} +) + +// Extensions contains all custom x509 extensions defined by Fulcio +type Extensions struct { + // NB: New extensions must be added here and documented + // at docs/oidc-info.md + + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Deprecated + // Triggering event of the Github Workflow. Matches the `event_name` claim of ID + // tokens from Github Actions + GithubWorkflowTrigger string `json:"GithubWorkflowTrigger,omitempty" yaml:"github-workflow-trigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2 + + // Deprecated + // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID + // tokens from Github Actions + GithubWorkflowSHA string `json:"GithubWorkflowSHA,omitempty" yaml:"github-workflow-sha,omitempty"` // OID 1.3.6.1.4.1.57264.1.3 + + // Deprecated + // Name of Github Actions Workflow. Matches the `workflow` claim of the ID + // tokens from Github Actions + GithubWorkflowName string `json:"GithubWorkflowName,omitempty" yaml:"github-workflow-name,omitempty"` // OID 1.3.6.1.4.1.57264.1.4 + + // Deprecated + // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID + // tokens from Github Actions + GithubWorkflowRepository string `json:"GithubWorkflowRepository,omitempty" yaml:"github-workflow-repository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5 + + // Deprecated + // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens + // from Github Actions + GithubWorkflowRef string `json:"GithubWorkflowRef,omitempty" yaml:"github-workflow-ref,omitempty"` // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"BuildSignerURI,omitempty" yaml:"build-signer-uri,omitempty"` // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"BuildSignerDigest,omitempty" yaml:"build-signer-digest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"RunnerEnvironment,omitempty" yaml:"runner-environment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty" yaml:"source-repository-uri,omitempty"` // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty" yaml:"source-repository-digest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty" yaml:"source-repository-ref,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty" yaml:"source-repository-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty" yaml:"source-repository-owner-uri,omitempty"` // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty" yaml:"source-repository-owner-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"BuildConfigURI,omitempty" yaml:"build-config-uri,omitempty"` // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"BuildConfigDigest,omitempty" yaml:"build-config-digest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"BuildTrigger,omitempty" yaml:"build-trigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"RunInvocationURI,omitempty" yaml:"run-invocation-uri,omitempty"` // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} + +func (e Extensions) Render() ([]pkix.Extension, error) { + var exts []pkix.Extension + + // BEGIN: Deprecated + if e.Issuer != "" { + // deprecated issuer extension due to incorrect encoding + exts = append(exts, pkix.Extension{ + Id: OIDIssuer, + Value: []byte(e.Issuer), + }) + } else { + return nil, errors.New("extensions must have a non-empty issuer url") + } + if e.GithubWorkflowTrigger != "" { + exts = append(exts, pkix.Extension{ + Id: OIDGitHubWorkflowTrigger, + Value: []byte(e.GithubWorkflowTrigger), + }) + } + if e.GithubWorkflowSHA != "" { + exts = append(exts, pkix.Extension{ + Id: OIDGitHubWorkflowSHA, + Value: []byte(e.GithubWorkflowSHA), + }) + } + if e.GithubWorkflowName != "" { + exts = append(exts, pkix.Extension{ + Id: OIDGitHubWorkflowName, + Value: []byte(e.GithubWorkflowName), + }) + } + if e.GithubWorkflowRepository != "" { + exts = append(exts, pkix.Extension{ + Id: OIDGitHubWorkflowRepository, + Value: []byte(e.GithubWorkflowRepository), + }) + } + if e.GithubWorkflowRef != "" { + exts = append(exts, pkix.Extension{ + Id: OIDGitHubWorkflowRef, + Value: []byte(e.GithubWorkflowRef), + }) + } + // END: Deprecated + + // duplicate issuer with correct RFC 5280 encoding + if e.Issuer != "" { + // construct DER encoding of issuer string + val, err := asn1.MarshalWithParams(e.Issuer, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDIssuerV2, + Value: val, + }) + } else { + return nil, errors.New("extensions must have a non-empty issuer url") + } + + if e.BuildSignerURI != "" { + val, err := asn1.MarshalWithParams(e.BuildSignerURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildSignerURI, + Value: val, + }) + } + if e.BuildSignerDigest != "" { + val, err := asn1.MarshalWithParams(e.BuildSignerDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildSignerDigest, + Value: val, + }) + } + if e.RunnerEnvironment != "" { + val, err := asn1.MarshalWithParams(e.RunnerEnvironment, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDRunnerEnvironment, + Value: val, + }) + } + if e.SourceRepositoryURI != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryURI, + Value: val, + }) + } + if e.SourceRepositoryDigest != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryDigest, + Value: val, + }) + } + if e.SourceRepositoryRef != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryRef, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryRef, + Value: val, + }) + } + if e.SourceRepositoryIdentifier != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryIdentifier, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryIdentifier, + Value: val, + }) + } + if e.SourceRepositoryOwnerURI != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryOwnerURI, + Value: val, + }) + } + if e.SourceRepositoryOwnerIdentifier != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerIdentifier, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryOwnerIdentifier, + Value: val, + }) + } + if e.BuildConfigURI != "" { + val, err := asn1.MarshalWithParams(e.BuildConfigURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildConfigURI, + Value: val, + }) + } + if e.BuildConfigDigest != "" { + val, err := asn1.MarshalWithParams(e.BuildConfigDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildConfigDigest, + Value: val, + }) + } + if e.BuildTrigger != "" { + val, err := asn1.MarshalWithParams(e.BuildTrigger, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildTrigger, + Value: val, + }) + } + if e.RunInvocationURI != "" { + val, err := asn1.MarshalWithParams(e.RunInvocationURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDRunInvocationURI, + Value: val, + }) + } + if e.SourceRepositoryVisibilityAtSigning != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryVisibilityAtSigning, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryVisibilityAtSigning, + Value: val, + }) + } + + return exts, nil +} + +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { + out := Extensions{} + + for _, e := range ext { + switch { + // BEGIN: Deprecated + case e.Id.Equal(OIDIssuer): + out.Issuer = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowTrigger): + out.GithubWorkflowTrigger = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowSHA): + out.GithubWorkflowSHA = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowName): + out.GithubWorkflowName = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRepository): + out.GithubWorkflowRepository = string(e.Value) + case e.Id.Equal(OIDGitHubWorkflowRef): + out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := ParseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } + } + } + + // We only ever return nil, but leaving error in place so that we can add + // more complex parsing of fields in a backwards compatible way if needed. + return out, nil +} + +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/tools/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt b/tools/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt new file mode 100644 index 000000000..8b16ae012 --- /dev/null +++ b/tools/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2022 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/tools/vendor/github.com/sigstore/protobuf-specs/LICENSE similarity index 99% rename from tools/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt rename to tools/vendor/github.com/sigstore/protobuf-specs/LICENSE index 55ede8a42..d64569567 100644 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt +++ b/tools/vendor/github.com/sigstore/protobuf-specs/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2015 xeipuuv + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/tools/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/tools/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go new file mode 100644 index 000000000..5f339b2d7 --- /dev/null +++ b/tools/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -0,0 +1,1299 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v6.30.2 +// source: sigstore_common.proto + +package v1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Only a subset of the secure hash standard algorithms are supported. +// See for more +// details. +// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force +// any proto JSON serialization to emit the used hash algorithm, as default +// option is to *omit* the default value of an enum (which is the first +// value, represented by '0'. +type HashAlgorithm int32 + +const ( + HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED HashAlgorithm = 0 + HashAlgorithm_SHA2_256 HashAlgorithm = 1 + HashAlgorithm_SHA2_384 HashAlgorithm = 2 + HashAlgorithm_SHA2_512 HashAlgorithm = 3 + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + HashAlgorithm_SHA3_384 HashAlgorithm = 5 +) + +// Enum value maps for HashAlgorithm. +var ( + HashAlgorithm_name = map[int32]string{ + 0: "HASH_ALGORITHM_UNSPECIFIED", + 1: "SHA2_256", + 2: "SHA2_384", + 3: "SHA2_512", + 4: "SHA3_256", + 5: "SHA3_384", + } + HashAlgorithm_value = map[string]int32{ + "HASH_ALGORITHM_UNSPECIFIED": 0, + "SHA2_256": 1, + "SHA2_384": 2, + "SHA2_512": 3, + "SHA3_256": 4, + "SHA3_384": 5, + } +) + +func (x HashAlgorithm) Enum() *HashAlgorithm { + p := new(HashAlgorithm) + *p = x + return p +} + +func (x HashAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HashAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[0].Descriptor() +} + +func (HashAlgorithm) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[0] +} + +func (x HashAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HashAlgorithm.Descriptor instead. +func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +// Details of a specific public key, capturing the the key encoding method, +// and signature algorithm. +// +// PublicKeyDetails captures the public key/hash algorithm combinations +// recommended in the Sigstore ecosystem. +// +// This is modelled as a linear set as we want to provide a small number of +// opinionated options instead of allowing every possible permutation. +// +// Any changes to this enum MUST be reflected in the algorithm registry. +// +// See: +// +// To avoid the possibility of contradicting formats such as PKCS1 with +// ED25519 the valid permutations are listed as a linear set instead of a +// cartesian set (i.e one combined variable instead of two, one for encoding +// and one for the signature algorithm). +type PublicKeyDetails int32 + +const ( + PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 + // RSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 + // RSA public key in PKIX format, PKCS#1v1.5 signature + PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 + PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 PublicKeyDetails = 10 + PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 PublicKeyDetails = 11 + // RSA public key in PKIX format, RSASSA-PSS signature + PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256 PublicKeyDetails = 16 // See RFC4055 + PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256 PublicKeyDetails = 17 + PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 + // ECDSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 + PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 + PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 + PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 PublicKeyDetails = 13 + // Ed 25519 + PublicKeyDetails_PKIX_ED25519 PublicKeyDetails = 7 // See RFC8032 + PublicKeyDetails_PKIX_ED25519_PH PublicKeyDetails = 8 + // These algorithms are deprecated and should not be used, but they + // were/are being used by most Sigstore clients implementations. + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P384_SHA_256 PublicKeyDetails = 19 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P521_SHA_256 PublicKeyDetails = 20 + // LMS and LM-OTS + // + // These algorithms are deprecated and should not be used. + // Keys and signatures MAY be used by private Sigstore + // deployments, but will not be supported by the public + // good instance. + // + // USER WARNING: LMS and LM-OTS are both stateful signature schemes. + // Using them correctly requires discretion and careful consideration + // to ensure that individual secret keys are not used more than once. + // In addition, LM-OTS is a single-use scheme, meaning that it + // MUST NOT be used for more than one signature per LM-OTS key. + // If you cannot maintain these invariants, you MUST NOT use these + // schemes. + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 + // ML-DSA + // + // These ML_DSA_65 and ML-DSA_87 algorithms are the pure variants that + // take data to sign rather than the prehash variants (HashML-DSA), which + // take digests. While considered quantum-resistant, their usage + // involves tradeoffs in that signatures and keys are much larger, and + // this makes deployments more costly. + // + // USER WARNING: ML_DSA_65 and ML_DSA_87 are experimental algorithms. + // In the future they MAY be used by private Sigstore deployments, but + // they are not yet fully functional. This warning will be removed when + // these algorithms are widely supported by Sigstore clients and servers, + // but care should still be taken for production environments. + PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 // See NIST FIPS 204 + PublicKeyDetails_ML_DSA_87 PublicKeyDetails = 22 +) + +// Enum value maps for PublicKeyDetails. +var ( + PublicKeyDetails_name = map[int32]string{ + 0: "PUBLIC_KEY_DETAILS_UNSPECIFIED", + 1: "PKCS1_RSA_PKCS1V5", + 2: "PKCS1_RSA_PSS", + 3: "PKIX_RSA_PKCS1V5", + 4: "PKIX_RSA_PSS", + 9: "PKIX_RSA_PKCS1V15_2048_SHA256", + 10: "PKIX_RSA_PKCS1V15_3072_SHA256", + 11: "PKIX_RSA_PKCS1V15_4096_SHA256", + 16: "PKIX_RSA_PSS_2048_SHA256", + 17: "PKIX_RSA_PSS_3072_SHA256", + 18: "PKIX_RSA_PSS_4096_SHA256", + 6: "PKIX_ECDSA_P256_HMAC_SHA_256", + 5: "PKIX_ECDSA_P256_SHA_256", + 12: "PKIX_ECDSA_P384_SHA_384", + 13: "PKIX_ECDSA_P521_SHA_512", + 7: "PKIX_ED25519", + 8: "PKIX_ED25519_PH", + 19: "PKIX_ECDSA_P384_SHA_256", + 20: "PKIX_ECDSA_P521_SHA_256", + 14: "LMS_SHA256", + 15: "LMOTS_SHA256", + 21: "ML_DSA_65", + 22: "ML_DSA_87", + } + PublicKeyDetails_value = map[string]int32{ + "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, + "PKCS1_RSA_PKCS1V5": 1, + "PKCS1_RSA_PSS": 2, + "PKIX_RSA_PKCS1V5": 3, + "PKIX_RSA_PSS": 4, + "PKIX_RSA_PKCS1V15_2048_SHA256": 9, + "PKIX_RSA_PKCS1V15_3072_SHA256": 10, + "PKIX_RSA_PKCS1V15_4096_SHA256": 11, + "PKIX_RSA_PSS_2048_SHA256": 16, + "PKIX_RSA_PSS_3072_SHA256": 17, + "PKIX_RSA_PSS_4096_SHA256": 18, + "PKIX_ECDSA_P256_HMAC_SHA_256": 6, + "PKIX_ECDSA_P256_SHA_256": 5, + "PKIX_ECDSA_P384_SHA_384": 12, + "PKIX_ECDSA_P521_SHA_512": 13, + "PKIX_ED25519": 7, + "PKIX_ED25519_PH": 8, + "PKIX_ECDSA_P384_SHA_256": 19, + "PKIX_ECDSA_P521_SHA_256": 20, + "LMS_SHA256": 14, + "LMOTS_SHA256": 15, + "ML_DSA_65": 21, + "ML_DSA_87": 22, + } +) + +func (x PublicKeyDetails) Enum() *PublicKeyDetails { + p := new(PublicKeyDetails) + *p = x + return p +} + +func (x PublicKeyDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicKeyDetails) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[1].Descriptor() +} + +func (PublicKeyDetails) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[1] +} + +func (x PublicKeyDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicKeyDetails.Descriptor instead. +func (PublicKeyDetails) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +type SubjectAlternativeNameType int32 + +const ( + SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED SubjectAlternativeNameType = 0 + SubjectAlternativeNameType_EMAIL SubjectAlternativeNameType = 1 + SubjectAlternativeNameType_URI SubjectAlternativeNameType = 2 + // OID 1.3.6.1.4.1.57264.1.7 + // See https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md#1361415726417--othername-san + // for more details. + SubjectAlternativeNameType_OTHER_NAME SubjectAlternativeNameType = 3 +) + +// Enum value maps for SubjectAlternativeNameType. +var ( + SubjectAlternativeNameType_name = map[int32]string{ + 0: "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "URI", + 3: "OTHER_NAME", + } + SubjectAlternativeNameType_value = map[string]int32{ + "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "URI": 2, + "OTHER_NAME": 3, + } +) + +func (x SubjectAlternativeNameType) Enum() *SubjectAlternativeNameType { + p := new(SubjectAlternativeNameType) + *p = x + return p +} + +func (x SubjectAlternativeNameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAlternativeNameType) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[2].Descriptor() +} + +func (SubjectAlternativeNameType) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[2] +} + +func (x SubjectAlternativeNameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAlternativeNameType.Descriptor instead. +func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +// HashOutput captures a digest of a 'message' (generic octet sequence) +// and the corresponding hash algorithm used. +type HashOutput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashOutput) Reset() { + *x = HashOutput{} + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashOutput) ProtoMessage() {} + +func (x *HashOutput) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashOutput.ProtoReflect.Descriptor instead. +func (*HashOutput) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +func (x *HashOutput) GetAlgorithm() HashAlgorithm { + if x != nil { + return x.Algorithm + } + return HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED +} + +func (x *HashOutput) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// MessageSignature stores the computed signature over a message. +type MessageSignature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message digest can be used to identify the artifact. + // Clients MUST NOT attempt to use this digest to verify the associated + // signature; it is intended solely for identification. + MessageDigest *HashOutput `protobuf:"bytes,1,opt,name=message_digest,json=messageDigest,proto3" json:"message_digest,omitempty"` + // The raw bytes as returned from the signature algorithm. + // The signature algorithm (and so the format of the signature bytes) + // are determined by the contents of the 'verification_material', + // either a key-pair or a certificate. If using a certificate, the + // certificate contains the required information on the signature + // algorithm. + // When using a key pair, the algorithm MUST be part of the public + // key, which MUST be communicated out-of-band. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageSignature) Reset() { + *x = MessageSignature{} + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageSignature) ProtoMessage() {} + +func (x *MessageSignature) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageSignature.ProtoReflect.Descriptor instead. +func (*MessageSignature) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageSignature) GetMessageDigest() *HashOutput { + if x != nil { + return x.MessageDigest + } + return nil +} + +func (x *MessageSignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +// LogId captures the identity of a transparency log. +type LogId struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogId) Reset() { + *x = LogId{} + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogId) ProtoMessage() {} + +func (x *LogId) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogId.ProtoReflect.Descriptor instead. +func (*LogId) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +func (x *LogId) GetKeyId() []byte { + if x != nil { + return x.KeyId + } + return nil +} + +// This message holds a RFC 3161 timestamp. +type RFC3161SignedTimestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RFC3161SignedTimestamp) Reset() { + *x = RFC3161SignedTimestamp{} + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RFC3161SignedTimestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RFC3161SignedTimestamp) ProtoMessage() {} + +func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RFC3161SignedTimestamp.ProtoReflect.Descriptor instead. +func (*RFC3161SignedTimestamp) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{3} +} + +func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { + if x != nil { + return x.SignedTimestamp + } + return nil +} + +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key, encoding method is specified by the + // key_details attribute. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` + // Key encoding and signature algorithm to use for this key. + KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + // Optional validity period for this key, *inclusive* of the endpoints. + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{4} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +func (x *PublicKey) GetKeyDetails() PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED +} + +func (x *PublicKey) GetValidFor() *TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// PublicKeyIdentifier can be used to identify an (out of band) delivered +// key, to verify a signature. +type PublicKeyIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional unauthenticated hint on which key to use. + // The format of the hint must be agreed upon out of band by the + // signer and the verifiers, and so is not subject to this + // specification. + // Example use-case is to specify the public key to use, from a + // trusted key-ring. + // Implementors are RECOMMENDED to derive the value from the public + // key as described in RFC 6962. + // See: + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKeyIdentifier) Reset() { + *x = PublicKeyIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKeyIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKeyIdentifier) ProtoMessage() {} + +func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKeyIdentifier.ProtoReflect.Descriptor instead. +func (*PublicKeyIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKeyIdentifier) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + +// An ASN.1 OBJECT IDENTIFIER +type ObjectIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{6} +} + +func (x *ObjectIdentifier) GetId() []int32 { + if x != nil { + return x.Id + } + return nil +} + +// An OID and the corresponding (byte) value. +type ObjectIdentifierValuePair struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifierValuePair) Reset() { + *x = ObjectIdentifierValuePair{} + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifierValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifierValuePair) ProtoMessage() {} + +func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifierValuePair.ProtoReflect.Descriptor instead. +func (*ObjectIdentifierValuePair) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{7} +} + +func (x *ObjectIdentifierValuePair) GetOid() *ObjectIdentifier { + if x != nil { + return x.Oid + } + return nil +} + +func (x *ObjectIdentifierValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type DistinguishedName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DistinguishedName) Reset() { + *x = DistinguishedName{} + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DistinguishedName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistinguishedName) ProtoMessage() {} + +func (x *DistinguishedName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistinguishedName.ProtoReflect.Descriptor instead. +func (*DistinguishedName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{8} +} + +func (x *DistinguishedName) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *DistinguishedName) GetCommonName() string { + if x != nil { + return x.CommonName + } + return "" +} + +type X509Certificate struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{9} +} + +func (x *X509Certificate) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +type SubjectAlternativeName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are valid to be assigned to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SubjectAlternativeName) Reset() { + *x = SubjectAlternativeName{} + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubjectAlternativeName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeName) ProtoMessage() {} + +func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeName.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{10} +} + +func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { + if x != nil { + return x.Type + } + return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED +} + +func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (x *SubjectAlternativeName) GetRegexp() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { + return x.Regexp + } + } + return "" +} + +func (x *SubjectAlternativeName) GetValue() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { + return x.Value + } + } + return "" +} + +type isSubjectAlternativeName_Identity interface { + isSubjectAlternativeName_Identity() +} + +type SubjectAlternativeName_Regexp struct { + // A regular expression describing the expected value for + // the SAN. + Regexp string `protobuf:"bytes,2,opt,name=regexp,proto3,oneof"` +} + +type SubjectAlternativeName_Value struct { + // The exact value to match against. + Value string `protobuf:"bytes,3,opt,name=value,proto3,oneof"` +} + +func (*SubjectAlternativeName_Regexp) isSubjectAlternativeName_Identity() {} + +func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} + +// A collection of X.509 certificates. +// +// This "chain" can be used in multiple contexts, such as providing a root CA +// certificate within a TUF root of trust or multiple untrusted certificates for +// the purpose of chain building. +type X509CertificateChain struct { + state protoimpl.MessageState `protogen:"open.v1"` + // One or more DER-encoded certificates. + // + // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence + // has an imposed order. Unless explicitly specified, there is otherwise no + // guaranteed order. + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509CertificateChain) Reset() { + *x = X509CertificateChain{} + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509CertificateChain) ProtoMessage() {} + +func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509CertificateChain.ProtoReflect.Descriptor instead. +func (*X509CertificateChain) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{11} +} + +func (x *X509CertificateChain) GetCertificates() []*X509Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +// The time range is closed and includes both the start and end times, +// (i.e., [start, end]). +// End is optional to be able to capture a period that has started but +// has no known end. +type TimeRange struct { + state protoimpl.MessageState `protogen:"open.v1"` + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimeRange) Reset() { + *x = TimeRange{} + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimeRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeRange) ProtoMessage() {} + +func (x *TimeRange) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeRange.ProtoReflect.Descriptor instead. +func (*TimeRange) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{12} +} + +func (x *TimeRange) GetStart() *timestamppb.Timestamp { + if x != nil { + return x.Start + } + return nil +} + +func (x *TimeRange) GetEnd() *timestamppb.Timestamp { + if x != nil { + return x.End + } + return nil +} + +var File_sigstore_common_proto protoreflect.FileDescriptor + +var file_sigstore_common_proto_rawDesc = string([]byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, + 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, + 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, + 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, + 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, + 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, + 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, + 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, + 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, + 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, + 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, + 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, + 0x34, 0x10, 0x05, 0x2a, 0x8f, 0x05, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, + 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, + 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, + 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, + 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36, + 0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, + 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x12, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x4c, + 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x1a, 0x02, 0x08, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x15, + 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x38, 0x37, 0x10, 0x16, 0x22, + 0x04, 0x08, 0x17, 0x10, 0x32, 0x2a, 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, + 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, + 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, + 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, + 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_common_proto_rawDescOnce sync.Once + file_sigstore_common_proto_rawDescData []byte +) + +func file_sigstore_common_proto_rawDescGZIP() []byte { + file_sigstore_common_proto_rawDescOnce.Do(func() { + file_sigstore_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc))) + }) + return file_sigstore_common_proto_rawDescData +} + +var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sigstore_common_proto_goTypes = []any{ + (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm + (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails + (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType + (*HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput + (*MessageSignature)(nil), // 4: dev.sigstore.common.v1.MessageSignature + (*LogId)(nil), // 5: dev.sigstore.common.v1.LogId + (*RFC3161SignedTimestamp)(nil), // 6: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*PublicKey)(nil), // 7: dev.sigstore.common.v1.PublicKey + (*PublicKeyIdentifier)(nil), // 8: dev.sigstore.common.v1.PublicKeyIdentifier + (*ObjectIdentifier)(nil), // 9: dev.sigstore.common.v1.ObjectIdentifier + (*ObjectIdentifierValuePair)(nil), // 10: dev.sigstore.common.v1.ObjectIdentifierValuePair + (*DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*X509Certificate)(nil), // 12: dev.sigstore.common.v1.X509Certificate + (*SubjectAlternativeName)(nil), // 13: dev.sigstore.common.v1.SubjectAlternativeName + (*X509CertificateChain)(nil), // 14: dev.sigstore.common.v1.X509CertificateChain + (*TimeRange)(nil), // 15: dev.sigstore.common.v1.TimeRange + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sigstore_common_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.common.v1.HashOutput.algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 3, // 1: dev.sigstore.common.v1.MessageSignature.message_digest:type_name -> dev.sigstore.common.v1.HashOutput + 1, // 2: dev.sigstore.common.v1.PublicKey.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 15, // 3: dev.sigstore.common.v1.PublicKey.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 9, // 4: dev.sigstore.common.v1.ObjectIdentifierValuePair.oid:type_name -> dev.sigstore.common.v1.ObjectIdentifier + 2, // 5: dev.sigstore.common.v1.SubjectAlternativeName.type:type_name -> dev.sigstore.common.v1.SubjectAlternativeNameType + 12, // 6: dev.sigstore.common.v1.X509CertificateChain.certificates:type_name -> dev.sigstore.common.v1.X509Certificate + 16, // 7: dev.sigstore.common.v1.TimeRange.start:type_name -> google.protobuf.Timestamp + 16, // 8: dev.sigstore.common.v1.TimeRange.end:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_common_proto_init() } +func file_sigstore_common_proto_init() { + if File_sigstore_common_proto != nil { + return + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc)), + NumEnums: 3, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_common_proto_goTypes, + DependencyIndexes: file_sigstore_common_proto_depIdxs, + EnumInfos: file_sigstore_common_proto_enumTypes, + MessageInfos: file_sigstore_common_proto_msgTypes, + }.Build() + File_sigstore_common_proto = out.File + file_sigstore_common_proto_goTypes = nil + file_sigstore_common_proto_depIdxs = nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt b/tools/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt new file mode 100644 index 000000000..7a01c8498 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/COPYRIGHT.txt @@ -0,0 +1,14 @@ + +Copyright 2021 The Sigstore Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/tools/vendor/github.com/sigstore/sigstore/LICENSE b/tools/vendor/github.com/sigstore/sigstore/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go new file mode 100644 index 000000000..348f47bdc --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go @@ -0,0 +1,173 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cryptoutils implements support for working with encoded certificates, public keys, and private keys +package cryptoutils + +import ( + "bytes" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "time" +) + +const ( + // CertificatePEMType is the string "CERTIFICATE" to be used during PEM encoding and decoding + CertificatePEMType PEMType = "CERTIFICATE" +) + +// MarshalCertificateToPEM converts the provided X509 certificate into PEM format +func MarshalCertificateToPEM(cert *x509.Certificate) ([]byte, error) { + if cert == nil { + return nil, errors.New("nil certificate provided") + } + return PEMEncode(CertificatePEMType, cert.Raw), nil +} + +// MarshalCertificatesToPEM converts the provided X509 certificates into PEM format +func MarshalCertificatesToPEM(certs []*x509.Certificate) ([]byte, error) { + buf := bytes.Buffer{} + for _, cert := range certs { + pemBytes, err := MarshalCertificateToPEM(cert) + if err != nil { + return nil, err + } + _, _ = buf.Write(pemBytes) + } + return buf.Bytes(), nil +} + +// UnmarshalCertificatesFromPEM extracts one or more X509 certificates from the provided +// byte slice, which is assumed to be in PEM-encoded format. +func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { + result := []*x509.Certificate{} + remaining := pemBytes + remaining = bytes.TrimSpace(remaining) + + for len(remaining) > 0 { + var certDer *pem.Block + certDer, remaining = pem.Decode(remaining) + + if certDer == nil { + return nil, errors.New("error during PEM decoding") + } + + cert, err := x509.ParseCertificate(certDer.Bytes) + if err != nil { + return nil, err + } + result = append(result, cert) + } + return result, nil +} + +// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided +// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified +// number of iterations. A reasonable limit is 10 iterations. +func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) { + result := []*x509.Certificate{} + remaining := pemBytes + remaining = bytes.TrimSpace(remaining) + + count := 0 + for len(remaining) > 0 { + if count == iterations { + return nil, errors.New("too many certificates specified in PEM block") + } + var certDer *pem.Block + certDer, remaining = pem.Decode(remaining) + + if certDer == nil { + return nil, errors.New("error during PEM decoding") + } + + cert, err := x509.ParseCertificate(certDer.Bytes) + if err != nil { + return nil, err + } + result = append(result, cert) + count++ + } + return result, nil +} + +// LoadCertificatesFromPEM extracts one or more X509 certificates from the provided +// io.Reader. +func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) { + fileBytes, err := io.ReadAll(pem) + if err != nil { + return nil, err + } + return UnmarshalCertificatesFromPEM(fileBytes) +} + +func formatTime(t time.Time) string { + return t.UTC().Format(time.RFC3339) +} + +// CheckExpiration verifies that epoch is during the validity period of +// the certificate provided. +// +// It returns nil if issueTime < epoch < expirationTime, and error otherwise. +func CheckExpiration(cert *x509.Certificate, epoch time.Time) error { + if cert == nil { + return errors.New("certificate is nil") + } + if cert.NotAfter.Before(epoch) { + return fmt.Errorf("certificate expiration time %s is before %s", formatTime(cert.NotAfter), formatTime(epoch)) + } + if cert.NotBefore.After(epoch) { + return fmt.Errorf("certificate issued time %s is before %s", formatTime(cert.NotBefore), formatTime(epoch)) + } + return nil +} + +// ParseCSR parses a PKCS#10 PEM-encoded CSR. +func ParseCSR(csr []byte) (*x509.CertificateRequest, error) { + derBlock, _ := pem.Decode(csr) + if derBlock == nil || derBlock.Bytes == nil { + return nil, errors.New("no CSR found while decoding") + } + correctType := false + acceptedHeaders := []string{"CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST"} + for _, v := range acceptedHeaders { + if derBlock.Type == v { + correctType = true + } + } + if !correctType { + return nil, fmt.Errorf("DER type %v is not of any type %v for CSR", derBlock.Type, acceptedHeaders) + } + + return x509.ParseCertificateRequest(derBlock.Bytes) +} + +// GenerateSerialNumber creates a compliant serial number as per RFC 5280 4.1.2.2. +// Serial numbers must be positive, and can be no longer than 20 bytes. +// The serial number is generated with 159 bits, so that the first bit will always +// be 0, resulting in a positive serial number. +func GenerateSerialNumber() (*big.Int, error) { + // Pick a random number from 0 to 2^159. + serial, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errors.New("error generating serial number") + } + return serial, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go new file mode 100644 index 000000000..4e7e73d55 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cryptoutils contains utilities related to handling cryptographic materials. +package cryptoutils diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go new file mode 100644 index 000000000..3fa6e7ba5 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go @@ -0,0 +1,31 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "encoding/pem" +) + +// PEMType is a specific type for string constants used during PEM encoding and decoding +type PEMType string + +// PEMEncode encodes the specified byte slice in PEM format using the provided type string +func PEMEncode(typeStr PEMType, bytes []byte) []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: string(typeStr), + Bytes: bytes, + }) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go new file mode 100644 index 000000000..89dd05e01 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go @@ -0,0 +1,94 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "errors" + "fmt" + "io" + "os" + + "golang.org/x/term" +) + +// PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred +type PassFunc func(bool) ([]byte, error) + +// Read is for fuzzing +var Read = readPasswordFn + +// readPasswordFn reads the password from the following sources, in order of preference: +// +// - COSIGN_PASSWORD environment variable +// +// - user input from from terminal (if present) +// +// - provided to stdin from pipe +func readPasswordFn() func() ([]byte, error) { + if pw, ok := os.LookupEnv("COSIGN_PASSWORD"); ok { + return func() ([]byte, error) { + return []byte(pw), nil + } + } + if term.IsTerminal(0) { + return func() ([]byte, error) { + return term.ReadPassword(0) + } + } + // Handle piped in passwords. + return func() ([]byte, error) { + return io.ReadAll(os.Stdin) + } +} + +// StaticPasswordFunc returns a PassFunc which returns the provided password. +func StaticPasswordFunc(pw []byte) PassFunc { + return func(bool) ([]byte, error) { + return pw, nil + } +} + +// SkipPassword is a PassFunc that does not interact with a user, but +// simply returns nil for both the password result and error struct. +func SkipPassword(_ bool) ([]byte, error) { + return nil, nil +} + +// GetPasswordFromStdIn gathers the password from stdin with an +// optional confirmation step. +func GetPasswordFromStdIn(confirm bool) ([]byte, error) { + read := Read() + fmt.Fprint(os.Stderr, "Enter password for private key: ") + pw1, err := read() + fmt.Fprintln(os.Stderr) + if err != nil { + return nil, err + } + if !confirm { + return pw1, nil + } + fmt.Fprint(os.Stderr, "Enter again: ") + pw2, err := read() + fmt.Fprintln(os.Stderr) + if err != nil { + return nil, err + } + + if string(pw1) != string(pw2) { + return nil, errors.New("passwords do not match") + } + return pw1, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go new file mode 100644 index 000000000..325813d69 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go @@ -0,0 +1,152 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" +) + +const ( + // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding + PrivateKeyPEMType PEMType = "PRIVATE KEY" + // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys + ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY" + // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys + PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY" + encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY" + // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding + EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +func pemEncodeKeyPair(priv crypto.PrivateKey, pub crypto.PublicKey, pf PassFunc) (privPEM, pubPEM []byte, err error) { + pubPEM, err = MarshalPublicKeyToPEM(pub) + if err != nil { + return nil, nil, err + } + derBytes, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, nil, err + } + + if pf == nil { + return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil + } + password, err := pf(true) + if err != nil { + return nil, nil, err + } + if password == nil { + return PEMEncode(PrivateKeyPEMType, derBytes), pubPEM, nil + } + if derBytes, err = encrypted.Encrypt(derBytes, password); err != nil { + return nil, nil, err + } + return PEMEncode(EncryptedSigstorePrivateKeyPEMType, derBytes), pubPEM, nil +} + +// GeneratePEMEncodedECDSAKeyPair generates an ECDSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. +func GeneratePEMEncodedECDSAKeyPair(curve elliptic.Curve, pf PassFunc) (privPEM, pubPEM []byte, err error) { + priv, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, nil, err + } + return pemEncodeKeyPair(priv, priv.Public(), pf) +} + +// GeneratePEMEncodedRSAKeyPair generates an RSA keypair, optionally password encrypted using a provided PassFunc, and PEM encoded. +func GeneratePEMEncodedRSAKeyPair(keyLengthBits int, pf PassFunc) (privPEM, pubPEM []byte, err error) { + priv, err := rsa.GenerateKey(rand.Reader, keyLengthBits) + if err != nil { + return nil, nil, err + } + return pemEncodeKeyPair(priv, priv.Public(), pf) +} + +// MarshalPrivateKeyToEncryptedDER marshals the private key and encrypts the DER-encoded value using the specified password function +func MarshalPrivateKeyToEncryptedDER(priv crypto.PrivateKey, pf PassFunc) ([]byte, error) { + derKey, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, err + } + password, err := pf(true) + if err != nil { + return nil, err + } + if password == nil { + return nil, errors.New("password was nil") + } + return encrypted.Encrypt(derKey, password) +} + +// UnmarshalPEMToPrivateKey converts a PEM-encoded byte slice into a crypto.PrivateKey +func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, error) { + derBlock, _ := pem.Decode(pemBytes) + if derBlock == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBlock.Type { + case string(PrivateKeyPEMType): + return x509.ParsePKCS8PrivateKey(derBlock.Bytes) + case string(PKCS1PrivateKeyPEMType): + return x509.ParsePKCS1PrivateKey(derBlock.Bytes) + case string(ECPrivateKeyPEMType): + return x509.ParseECPrivateKey(derBlock.Bytes) + case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType): + derBytes := derBlock.Bytes + if pf != nil { + password, err := pf(false) + if err != nil { + return nil, err + } + if password != nil { + derBytes, err = encrypted.Decrypt(derBytes, password) + if err != nil { + return nil, err + } + } + } + + return x509.ParsePKCS8PrivateKey(derBytes) + } + return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type) +} + +// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice +func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) { + if priv == nil { + return nil, errors.New("empty key") + } + return x509.MarshalPKCS8PrivateKey(priv) +} + +// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice +func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) { + derBytes, err := MarshalPrivateKeyToDER(priv) + if err != nil { + return nil, err + } + return PEMEncode(PrivateKeyPEMType, derBytes), nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go new file mode 100644 index 000000000..1e2fa031b --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -0,0 +1,186 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha1" // nolint:gosec + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + + "github.com/letsencrypt/boulder/goodkey" +) + +const ( + // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding + PublicKeyPEMType PEMType = "PUBLIC KEY" + // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys + PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" +) + +// subjectPublicKeyInfo is used to construct a subject key ID. +// https://tools.ietf.org/html/rfc5280#section-4.1.2.7 +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + SubjectPublicKey asn1.BitString +} + +// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey +func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { + derBytes, _ := pem.Decode(pemBytes) + if derBytes == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBytes.Type { + case string(PublicKeyPEMType): + return x509.ParsePKIXPublicKey(derBytes.Bytes) + case string(PKCS1PublicKeyPEMType): + return x509.ParsePKCS1PublicKey(derBytes.Bytes) + default: + return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", + derBytes.Type) + } +} + +// MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice +func MarshalPublicKeyToDER(pub crypto.PublicKey) ([]byte, error) { + if pub == nil { + return nil, errors.New("empty key") + } + return x509.MarshalPKIXPublicKey(pub) +} + +// MarshalPublicKeyToPEM converts a crypto.PublicKey into a PEM-encoded byte slice +func MarshalPublicKeyToPEM(pub crypto.PublicKey) ([]byte, error) { + derBytes, err := MarshalPublicKeyToDER(pub) + if err != nil { + return nil, err + } + return PEMEncode(PublicKeyPEMType, derBytes), nil +} + +// SKID generates a 160-bit SHA-1 hash of the value of the BIT STRING +// subjectPublicKey (excluding the tag, length, and number of unused bits). +// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 +func SKID(pub crypto.PublicKey) ([]byte, error) { + derPubBytes, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + var spki subjectPublicKeyInfo + if _, err := asn1.Unmarshal(derPubBytes, &spki); err != nil { + return nil, err + } + skid := sha1.Sum(spki.SubjectPublicKey.Bytes) // nolint:gosec + return skid[:], nil +} + +// EqualKeys compares two public keys. Supports RSA, ECDSA and ED25519. +// If not equal, the error message contains hex-encoded SHA1 hashes of the DER-encoded keys +func EqualKeys(first, second crypto.PublicKey) error { + switch pub := first.(type) { + case *rsa.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "rsa")) + } + case *ecdsa.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "ecdsa")) + } + case ed25519.PublicKey: + if !pub.Equal(second) { + return errors.New(genErrMsg(first, second, "ed25519")) + } + default: + return errors.New("unsupported key type") + } + return nil +} + +// genErrMsg generates an error message for EqualKeys +func genErrMsg(first, second crypto.PublicKey, keyType string) string { + msg := fmt.Sprintf("%s public keys are not equal", keyType) + // Calculate SKID to include in error message + firstSKID, err := SKID(first) + if err != nil { + return msg + } + secondSKID, err := SKID(second) + if err != nil { + return msg + } + return fmt.Sprintf("%s (%s, %s)", msg, hex.EncodeToString(firstSKID), hex.EncodeToString(secondSKID)) +} + +// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key. +func ValidatePubKey(pub crypto.PublicKey) error { + // goodkey policy enforces: + // * RSA + // * Size of key: 2048 <= size <= 4096, size % 8 = 0 + // * Exponent E = 65537 (Default exponent for OpenSSL and Golang) + // * Small primes check for modulus + // * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17) + // * Key is easily factored with Fermat's factorization method + // * EC + // * Public key Q is not the identity element (Ø) + // * Public key Q's x and y are within [0, p-1] + // * Public key Q is on the curve + // * Public key Q's order matches the subgroups (nQ = Ø) + allowedKeys := &goodkey.AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + ECDSAP521: true, + } + cfg := &goodkey.Config{ + FermatRounds: 100, + AllowedKeys: allowedKeys, + } + p, err := goodkey.NewPolicy(cfg, nil) + if err != nil { + // Should not occur, only chances to return errors are if fermat rounds + // are <0 or when loading blocked/weak keys from disk (not used here) + return errors.New("unable to initialize key policy") + } + + switch pk := pub.(type) { + case *rsa.PublicKey: + // ctx is unused + return p.GoodKey(context.Background(), pub) + case *ecdsa.PublicKey: + // ctx is unused + return p.GoodKey(context.Background(), pub) + case ed25519.PublicKey: + return validateEd25519Key(pk) + } + return errors.New("unsupported public key type") +} + +// No validations currently, ED25519 supports only one key size. +func validateEd25519Key(_ ed25519.PublicKey) error { + return nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go new file mode 100644 index 000000000..d237ef58e --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go @@ -0,0 +1,149 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutils + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + // OIDOtherName is the OID for the OtherName SAN per RFC 5280 + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + // SANOID is the OID for Subject Alternative Name per RFC 5280 + SANOID = asn1.ObjectIdentifier{2, 5, 29, 17} +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +// +// OtherName for Fulcio-issued certificates only supports UTF-8 strings as values. +type OtherName struct { + ID asn1.ObjectIdentifier + Value string `asn1:"utf8,explicit,tag:0"` +} + +// MarshalOtherNameSAN creates a Subject Alternative Name extension +// with an OtherName sequence. RFC 5280, 4.2.1.6: +// +// SubjectAltName ::= GeneralNames +// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName +// GeneralName ::= CHOICE { +// +// otherName [0] OtherName, +// ... } +func MarshalOtherNameSAN(name string, critical bool) (*pkix.Extension, error) { + o := OtherName{ + ID: OIDOtherName, + Value: name, + } + bytes, err := asn1.MarshalWithParams(o, "tag:0") + if err != nil { + return nil, err + } + + sans, err := asn1.Marshal([]asn1.RawValue{{FullBytes: bytes}}) + if err != nil { + return nil, err + } + return &pkix.Extension{ + Id: SANOID, + Critical: critical, + Value: sans, + }, nil +} + +// UnmarshalOtherNameSAN extracts a UTF-8 string from the OtherName +// field in the Subject Alternative Name extension. +func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) { + var otherNames []string + + for _, e := range exts { + if !e.Id.Equal(SANOID) { + continue + } + + var seq asn1.RawValue + rest, err := asn1.Unmarshal(e.Value, &seq) + if err != nil { + return "", err + } else if len(rest) != 0 { + return "", fmt.Errorf("trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return "", asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return "", err + } + + // skip all GeneralName fields except OtherName + if v.Tag != 0 { + continue + } + + var other OtherName + if _, err := asn1.UnmarshalWithParams(v.FullBytes, &other, "tag:0"); err != nil { + return "", fmt.Errorf("could not parse requested OtherName SAN: %w", err) + } + if !other.ID.Equal(OIDOtherName) { + return "", fmt.Errorf("unexpected OID for OtherName, expected %v, got %v", OIDOtherName, other.ID) + } + otherNames = append(otherNames, other.Value) + } + } + + if len(otherNames) == 0 { + return "", errors.New("no OtherName found") + } + if len(otherNames) != 1 { + return "", errors.New("expected only one OtherName") + } + + return otherNames[0], nil +} + +// GetSubjectAlternateNames extracts all subject alternative names from +// the certificate, including email addresses, DNS, IP addresses, URIs, +// and OtherName SANs +func GetSubjectAlternateNames(cert *x509.Certificate) []string { + sans := []string{} + sans = append(sans, cert.DNSNames...) + sans = append(sans, cert.EmailAddresses...) + for _, ip := range cert.IPAddresses { + sans = append(sans, ip.String()) + } + for _, uri := range cert.URIs { + sans = append(sans, uri.String()) + } + // ignore error if there's no OtherName SAN + otherName, _ := UnmarshalOtherNameSAN(cert.Extensions) + if len(otherName) > 0 { + sans = append(sans, otherName) + } + return sans +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go new file mode 100644 index 000000000..02c032b02 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -0,0 +1,314 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "errors" + "fmt" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" +) + +// PublicKeyType represents the public key algorithm for a given signature algorithm. +type PublicKeyType uint + +const ( + // RSA public key + RSA PublicKeyType = iota + // ECDSA public key + ECDSA + // ED25519 public key + ED25519 +) + +// RSAKeySize represents the size of an RSA public key in bits. +type RSAKeySize int + +// AlgorithmDetails exposes relevant information for a given signature algorithm. +type AlgorithmDetails struct { + // knownAlgorithm is the signature algorithm that the following details refer to. + knownAlgorithm v1.PublicKeyDetails + + // keyType is the public key algorithm being used. + keyType PublicKeyType + + // hashType is the hash algorithm being used. + hashType crypto.Hash + + // protoHashType is the hash algorithm being used as a proto message, it must be the protobuf-specs + // v1.HashAlgorithm equivalent of the hashType. + protoHashType v1.HashAlgorithm + + // extraKeyParams contains any extra parameters required to check a given public key against this entry. + // + // The underlying type of these parameters is dependent on the keyType. + // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. + // Algorithms that don't require any extra parameters leave this set to nil. + extraKeyParams interface{} + + // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI + // arguments that are used for Sigstore services. + flagValue string +} + +// GetSignatureAlgorithm returns the PublicKeyDetails associated with the algorithm details. +func (a AlgorithmDetails) GetSignatureAlgorithm() v1.PublicKeyDetails { + return a.knownAlgorithm +} + +// GetKeyType returns the PublicKeyType for the algorithm details. +func (a AlgorithmDetails) GetKeyType() PublicKeyType { + return a.keyType +} + +// GetHashType returns the hash algorithm that should be used with this algorithm. +func (a AlgorithmDetails) GetHashType() crypto.Hash { + return a.hashType +} + +// GetProtoHashType is a convenience method to get the protobuf-specs type of the hash algorithm. +func (a AlgorithmDetails) GetProtoHashType() v1.HashAlgorithm { + return a.protoHashType +} + +// GetRSAKeySize returns the RSA key size for the algorithm details, if the key type is RSA. +func (a AlgorithmDetails) GetRSAKeySize() (RSAKeySize, error) { + if a.keyType != RSA { + return 0, fmt.Errorf("unable to retrieve RSA key size for key type: %T", a.keyType) + } + rsaKeySize, ok := a.extraKeyParams.(RSAKeySize) + if !ok { + // This should be unreachable. + return 0, fmt.Errorf("unable to retrieve key size for RSA, malformed algorithm details?: %T", a.keyType) + } + return rsaKeySize, nil +} + +// GetECDSACurve returns the elliptic curve for the algorithm details, if the key type is ECDSA. +func (a AlgorithmDetails) GetECDSACurve() (*elliptic.Curve, error) { + if a.keyType != ECDSA { + return nil, fmt.Errorf("unable to retrieve ECDSA curve for key type: %T", a.keyType) + } + ecdsaCurve, ok := a.extraKeyParams.(elliptic.Curve) + if !ok { + // This should be unreachable. + return nil, fmt.Errorf("unable to retrieve curve for ECDSA, malformed algorithm details?: %T", a.keyType) + } + return &ecdsaCurve, nil +} + +func (a AlgorithmDetails) checkKey(pubKey crypto.PublicKey) (bool, error) { + switch a.keyType { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return false, nil + } + keySize, err := a.GetRSAKeySize() + if err != nil { + return false, err + } + return rsaKey.Size()*8 == int(keySize), nil + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return false, nil + } + curve, err := a.GetECDSACurve() + if err != nil { + return false, err + } + return ecdsaKey.Curve == *curve, nil + case ED25519: + _, ok := pubKey.(ed25519.PublicKey) + return ok, nil + } + return false, fmt.Errorf("unrecognized key type: %T", a.keyType) +} + +func (a AlgorithmDetails) checkHash(hashType crypto.Hash) bool { + return a.hashType == hashType +} + +// Note that deprecated options in PublicKeyDetails are not included in this +// list, including PKCS1v1.5 encoded RSA. Refer to the v1.PublicKeyDetails enum +// for more details. +var supportedAlgorithms = []AlgorithmDetails{ + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pkcs1-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pkcs1-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, v1.HashAlgorithm_SHA2_384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P384(), "ecdsa-sha2-256-nistp384"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, ECDSA, crypto.SHA512, v1.HashAlgorithm_SHA2_512, elliptic.P521(), "ecdsa-sha2-512-nistp521"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P521(), "ecdsa-sha2-256-nistp521"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ED25519, ED25519, crypto.Hash(0), v1.HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED, nil, "ed25519"}, + {v1.PublicKeyDetails_PKIX_ED25519_PH, ED25519, crypto.SHA512, v1.HashAlgorithm_SHA2_512, nil, "ed25519-ph"}, +} + +// AlgorithmRegistryConfig represents a set of permitted algorithms for a given Sigstore service or component. +// +// Individual services may wish to restrict what algorithms are allowed to a subset of what is covered in the algorithm +// registry (represented by v1.PublicKeyDetails). +type AlgorithmRegistryConfig struct { + permittedAlgorithms []AlgorithmDetails +} + +// GetAlgorithmDetails retrieves a set of details for a given v1.PublicKeyDetails flag that allows users to +// introspect the public key algorithm, hash algorithm and more. +func GetAlgorithmDetails(knownSignatureAlgorithm v1.PublicKeyDetails) (AlgorithmDetails, error) { + for _, detail := range supportedAlgorithms { + if detail.knownAlgorithm == knownSignatureAlgorithm { + return detail, nil + } + } + return AlgorithmDetails{}, fmt.Errorf("could not find algorithm details for known signature algorithm: %s", knownSignatureAlgorithm) +} + +// NewAlgorithmRegistryConfig creates a new AlgorithmRegistryConfig for a set of permitted signature algorithms. +func NewAlgorithmRegistryConfig(algorithmConfig []v1.PublicKeyDetails) (*AlgorithmRegistryConfig, error) { + permittedAlgorithms := make([]AlgorithmDetails, 0, len(supportedAlgorithms)) + for _, algorithm := range algorithmConfig { + a, err := GetAlgorithmDetails(algorithm) + if err != nil { + return nil, err + } + permittedAlgorithms = append(permittedAlgorithms, a) + } + return &AlgorithmRegistryConfig{permittedAlgorithms: permittedAlgorithms}, nil +} + +// IsAlgorithmPermitted checks whether a given public key/hash algorithm combination is permitted by a registry config. +func (registryConfig AlgorithmRegistryConfig) IsAlgorithmPermitted(key crypto.PublicKey, hash crypto.Hash) (bool, error) { + for _, algorithm := range registryConfig.permittedAlgorithms { + keyMatch, err := algorithm.checkKey(key) + if err != nil { + return false, err + } + if keyMatch && algorithm.checkHash(hash) { + return true, nil + } + } + return false, nil +} + +// FormatSignatureAlgorithmFlag formats a v1.PublicKeyDetails to a string that conforms to the naming conventions +// of CLI arguments that are used for Sigstore services. +func FormatSignatureAlgorithmFlag(algorithm v1.PublicKeyDetails) (string, error) { + for _, a := range supportedAlgorithms { + if a.knownAlgorithm == algorithm { + return a.flagValue, nil + } + } + return "", fmt.Errorf("could not find matching flag for signature algorithm: %s", algorithm) +} + +// ParseSignatureAlgorithmFlag parses a string produced by FormatSignatureAlgorithmFlag and returns the corresponding +// v1.PublicKeyDetails value. +func ParseSignatureAlgorithmFlag(flag string) (v1.PublicKeyDetails, error) { + for _, a := range supportedAlgorithms { + if a.flagValue == flag { + return a.knownAlgorithm, nil + } + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, fmt.Errorf("could not find matching signature algorithm for flag: %s", flag) +} + +// GetDefaultPublicKeyDetails returns the default public key details for a given key. +// +// RSA 2048 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 +// RSA 3072 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 +// RSA 4096 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 +// ECDSA P256 => v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 +// ECDSA P384 => v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 +// ECDSA P521 => v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 +// ED25519 => v1.PublicKeyDetails_PKIX_ED25519_PH +// +// This function accepts LoadOptions, which are used to determine the default +// public key details when there may be ambiguities. For example, RSA keys may +// be PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultPublicKeyDetails(publicKey crypto.PublicKey, opts ...LoadOption) (v1.PublicKeyDetails, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if rsaPSSOptions != nil { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, nil + } + } else { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, nil + } + } + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + return v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, nil + case elliptic.P384(): + return v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, nil + case elliptic.P521(): + return v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, nil + } + case ed25519.PublicKey: + if useED25519ph { + return v1.PublicKeyDetails_PKIX_ED25519_PH, nil + } + return v1.PublicKeyDetails_PKIX_ED25519, nil + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, errors.New("unsupported public key type") +} + +// GetDefaultAlgorithmDetails returns the default algorithm details for a given +// key, according to GetDefaultPublicKeyDetails. +// +// This function accepts LoadOptions, which are used to determine the default +// algorithm details when there may be ambiguities. For example, RSA keys may be +// PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultAlgorithmDetails(publicKey crypto.PublicKey, opts ...LoadOption) (AlgorithmDetails, error) { + knownAlgorithm, err := GetDefaultPublicKeyDetails(publicKey, opts...) + if err != nil { + return AlgorithmDetails{}, err + } + return GetAlgorithmDetails(knownAlgorithm) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go new file mode 100644 index 000000000..dbd3314f6 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package signature contains types and utilities related to Sigstore signatures. +package signature diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go new file mode 100644 index 000000000..d333cb23f --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go @@ -0,0 +1,270 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/asn1" + "errors" + "fmt" + "io" + "math/big" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// checked on LoadSigner, LoadVerifier and SignMessage +var ecdsaSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, + crypto.SHA224, +} + +// checked on VerifySignature. Supports SHA1 verification. +var ecdsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, + crypto.SHA224, + crypto.SHA1, +} + +// ECDSASigner is a signature.Signer that uses an Elliptic Curve DSA algorithm +type ECDSASigner struct { + hashFunc crypto.Hash + priv *ecdsa.PrivateKey +} + +// LoadECDSASigner calculates signatures using the specified private key and hash algorithm. +// +// hf must not be crypto.Hash(0). +func LoadECDSASigner(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASigner, error) { + if priv == nil { + return nil, errors.New("invalid ECDSA private key specified") + } + + if !isSupportedAlg(hf, ecdsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &ECDSASigner{ + priv: priv, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ECDSASigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (e ECDSASigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, e.hashFunc, ecdsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + + return ecdsa.SignASN1(rand, e.priv, digest) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ECDSASigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSASigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified digest. If a source of entropy is +// given in rand, it will be used instead of the default value (rand.Reader from crypto/rand). +// +// If opts are specified, the hash function in opts.Hash should be the one used to compute +// digest. If opts are not specified, the value provided when the signer was created will be used instead. +func (e ECDSASigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + ecdsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + ecdsaOpts = append(ecdsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return e.SignMessage(nil, ecdsaOpts...) +} + +// ECDSAVerifier is a signature.Verifier that uses an Elliptic Curve DSA algorithm +type ECDSAVerifier struct { + publicKey *ecdsa.PublicKey + hashFunc crypto.Hash +} + +// LoadECDSAVerifier returns a Verifier that verifies signatures using the specified +// ECDSA public key and hash algorithm. +// +// hf must not be crypto.Hash(0). +func LoadECDSAVerifier(pub *ecdsa.PublicKey, hashFunc crypto.Hash) (*ECDSAVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ECDSA public key specified") + } + + if !isSupportedAlg(hashFunc, ecdsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &ECDSAVerifier{ + publicKey: pub, + hashFunc: hashFunc, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSAVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ECDSAVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if e.publicKey == nil { + return errors.New("no public key set for ECDSAVerifier") + } + + digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + // Without this check, VerifyASN1 panics on an invalid key. + if !e.publicKey.IsOnCurve(e.publicKey.X, e.publicKey.Y) { + return fmt.Errorf("invalid ECDSA public key for %s", e.publicKey.Params().Name) + } + + asnParseTest := struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(sigBytes, &asnParseTest); err == nil { + if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) { + return errors.New("invalid signature when validating ASN.1 encoded signature") + } + } else { + // deal with IEEE P1363 encoding of signatures + if len(sigBytes) == 0 || len(sigBytes) > 132 || len(sigBytes)%2 != 0 { + return errors.New("ecdsa: Invalid IEEE_P1363 encoded bytes") + } + r := new(big.Int).SetBytes(sigBytes[:len(sigBytes)/2]) + s := new(big.Int).SetBytes(sigBytes[len(sigBytes)/2:]) + if !ecdsa.Verify(e.publicKey, digest, r, s) { + return errors.New("invalid signature when validating IEEE_P1363 encoded signature") + } + } + + return nil +} + +// ECDSASignerVerifier is a signature.SignerVerifier that uses an Elliptic Curve DSA algorithm +type ECDSASignerVerifier struct { + *ECDSASigner + *ECDSAVerifier +} + +// LoadECDSASignerVerifier creates a combined signer and verifier. This is a convenience object +// that simply wraps an instance of ECDSASigner and ECDSAVerifier. +func LoadECDSASignerVerifier(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASignerVerifier, error) { + signer, err := LoadECDSASigner(priv, hf) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadECDSAVerifier(&priv.PublicKey, hf) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ECDSASignerVerifier{ + ECDSASigner: signer, + ECDSAVerifier: verifier, + }, nil +} + +// NewDefaultECDSASignerVerifier creates a combined signer and verifier using ECDSA. +// +// This creates a new ECDSA key using the P-256 curve and uses the SHA256 hashing algorithm. +func NewDefaultECDSASignerVerifier() (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) { + return NewECDSASignerVerifier(elliptic.P256(), rand.Reader, crypto.SHA256) +} + +// NewECDSASignerVerifier creates a combined signer and verifier using ECDSA. +// +// This creates a new ECDSA key using the specified elliptic curve, entropy source, and hashing function. +func NewECDSASignerVerifier(curve elliptic.Curve, rand io.Reader, hashFunc crypto.Hash) (*ECDSASignerVerifier, *ecdsa.PrivateKey, error) { + priv, err := ecdsa.GenerateKey(curve, rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadECDSASignerVerifier(priv, hashFunc) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ECDSASignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go new file mode 100644 index 000000000..23a8638ff --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go @@ -0,0 +1,197 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "bytes" + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" +) + +var ed25519SupportedHashFuncs = []crypto.Hash{ + crypto.Hash(0), +} + +// ED25519Signer is a signature.Signer that uses the Ed25519 public-key signature system +type ED25519Signer struct { + priv ed25519.PrivateKey +} + +// LoadED25519Signer calculates signatures using the specified private key. +func LoadED25519Signer(priv ed25519.PrivateKey) (*ED25519Signer, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + // check this to avoid panic and throw error gracefully + if len(priv) != ed25519.PrivateKeySize { + return nil, errors.New("invalid size for ED25519 key") + } + + return &ED25519Signer{ + priv: priv, + }, nil +} + +// SignMessage signs the provided message. Passing the WithDigest option is not +// supported as ED25519 performs a two pass hash over the message during the +// signing process. +// +// All options are ignored. +func (e ED25519Signer) SignMessage(message io.Reader, _ ...SignOption) ([]byte, error) { + messageBytes, _, err := ComputeDigestForSigning(message, crypto.Hash(0), ed25519SupportedHashFuncs) + if err != nil { + return nil, err + } + + return ed25519.Sign(e.priv, messageBytes), nil +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519Signer) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519 algorithm. +func (e ED25519Signer) Sign(_ io.Reader, message []byte, _ crypto.SignerOpts) ([]byte, error) { + if message == nil { + return nil, errors.New("message must not be nil") + } + return e.SignMessage(bytes.NewReader(message)) +} + +// ED25519Verifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519Verifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519Verifier returns a Verifier that verifies signatures using the specified ED25519 public key. +func LoadED25519Verifier(pub ed25519.PublicKey) (*ED25519Verifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519Verifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// All options are ignored if specified. +func (e *ED25519Verifier) VerifySignature(signature, message io.Reader, _ ...VerifyOption) error { + messageBytes, _, err := ComputeDigestForVerifying(message, crypto.Hash(0), ed25519SupportedHashFuncs) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if !ed25519.Verify(e.publicKey, messageBytes, sigBytes) { + return errors.New("failed to verify signature") + } + return nil +} + +// ED25519SignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519SignerVerifier struct { + *ED25519Signer + *ED25519Verifier +} + +// LoadED25519SignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519Signer and ED25519Verifier. +func LoadED25519SignerVerifier(priv ed25519.PrivateKey) (*ED25519SignerVerifier, error) { + signer, err := LoadED25519Signer(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519Verifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519SignerVerifier{ + ED25519Signer: signer, + ED25519Verifier: verifier, + }, nil +} + +// NewDefaultED25519SignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519SignerVerifier() (*ED25519SignerVerifier, ed25519.PrivateKey, error) { + return NewED25519SignerVerifier(rand.Reader) +} + +// NewED25519SignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519SignerVerifier(rand io.Reader) (*ED25519SignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519SignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go new file mode 100644 index 000000000..d1660796a --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go @@ -0,0 +1,211 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ed25519phSupportedHashFuncs = []crypto.Hash{ + crypto.SHA512, +} + +// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing +type ED25519phSigner struct { + priv ed25519.PrivateKey +} + +// LoadED25519phSigner calculates signatures using the specified private key. +func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + return &ED25519phSigner{ + priv: priv, + }, nil +} + +// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier +// +// Clients that use ED25519phSignerVerifier should use this method to get a +// SignerVerifier that uses the same ED25519 private key, but with the Pure +// Ed25519 algorithm. This might be necessary to interact with Fulcio, which +// only supports the Pure Ed25519 algorithm. +func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) { + return LoadED25519SignerVerifier(e.priv) +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ED25519phSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return e.priv.Sign(nil, digest, crypto.SHA512) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519phSigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519ph algorithm. +func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) { + return e.SignMessage(nil, options.WithDigest(digest)) +} + +// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519phVerifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519phVerifier returns a Verifier that verifies signatures using the +// specified ED25519 public key. +func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519phVerifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ED25519phVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil { + return fmt.Errorf("failed to verify signature: %w", err) + } + return nil +} + +// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519phSignerVerifier struct { + *ED25519phSigner + *ED25519phVerifier +} + +// LoadED25519phSignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier. +func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) { + signer, err := LoadED25519phSigner(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519phVerifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519phSignerVerifier{ + ED25519phSigner: signer, + ED25519phVerifier: verifier, + }, nil +} + +// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + return NewED25519phSignerVerifier(rand.Reader) +} + +// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519phSignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/message.go new file mode 100644 index 000000000..6f8449eea --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/message.go @@ -0,0 +1,111 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + crand "crypto/rand" + "errors" + "fmt" + "io" +) + +func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { + if supportedAlgs == nil { + return true + } + for _, supportedAlg := range supportedAlgs { + if alg == supportedAlg { + return true + } + } + return false +} + +// ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process: +// +// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the +// digest value will be returned without any further computation +// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list) +// - otherwise defaultHashFunc will be used (if it is in the supported list) +func ComputeDigestForSigning(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...SignOption) (digest []byte, hashedWith crypto.Hash, err error) { + var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&cryptoSignerOpts) + } + hashedWith = cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + if len(digest) > 0 { + if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() { + err = errors.New("unexpected length of digest for hash function specified") + } + return + } + digest, err = hashMessage(rawMessage, hashedWith) + return +} + +// ComputeDigestForVerifying calculates the digest value for the specified message using a hash function selected by the following process: +// +// - if a digest value is already specified in a SignOption and the length of the digest matches that of the selected hash function, the +// digest value will be returned without any further computation +// - if a hash function is given using WithCryptoSignerOpts(opts) as a SignOption, it will be used (if it is in the supported list) +// - otherwise defaultHashFunc will be used (if it is in the supported list) +func ComputeDigestForVerifying(rawMessage io.Reader, defaultHashFunc crypto.Hash, supportedHashFuncs []crypto.Hash, opts ...VerifyOption) (digest []byte, hashedWith crypto.Hash, err error) { + var cryptoSignerOpts crypto.SignerOpts = defaultHashFunc + for _, opt := range opts { + opt.ApplyDigest(&digest) + opt.ApplyCryptoSignerOpts(&cryptoSignerOpts) + } + hashedWith = cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + if len(digest) > 0 { + if hashedWith != crypto.Hash(0) && len(digest) != hashedWith.Size() { + err = errors.New("unexpected length of digest for hash function specified") + } + return + } + digest, err = hashMessage(rawMessage, hashedWith) + return +} + +func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) { + if rawMessage == nil { + return nil, errors.New("message cannot be nil") + } + if hashFunc == crypto.Hash(0) { + return io.ReadAll(rawMessage) + } + hasher := hashFunc.New() + // avoids reading entire message into memory + if _, err := io.Copy(hasher, rawMessage); err != nil { + return nil, fmt.Errorf("hashing message: %w", err) + } + return hasher.Sum(nil), nil +} + +func selectRandFromOpts(opts ...SignOption) io.Reader { + rand := crand.Reader + for _, opt := range opts { + opt.ApplyRand(&rand) + } + return rand +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options.go new file mode 100644 index 000000000..e17e768c2 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options.go @@ -0,0 +1,65 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "context" + "crypto" + "crypto/rsa" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// RPCOption specifies options to be used when performing RPC +type RPCOption interface { + ApplyContext(*context.Context) + ApplyRemoteVerification(*bool) + ApplyRPCAuthOpts(opts *options.RPCAuth) + ApplyKeyVersion(keyVersion *string) +} + +// PublicKeyOption specifies options to be used when obtaining a public key +type PublicKeyOption interface { + RPCOption +} + +// MessageOption specifies options to be used when processing messages during signing or verification +type MessageOption interface { + ApplyDigest(*[]byte) + ApplyCryptoSignerOpts(*crypto.SignerOpts) +} + +// SignOption specifies options to be used when signing a message +type SignOption interface { + RPCOption + MessageOption + ApplyRand(*io.Reader) + ApplyKeyVersionUsed(**string) +} + +// VerifyOption specifies options to be used when verifying a signature +type VerifyOption interface { + RPCOption + MessageOption +} + +// LoadOption specifies options to be used when creating a Signer/Verifier +type LoadOption interface { + ApplyHash(*crypto.Hash) + ApplyED25519ph(*bool) + ApplyRSAPSS(**rsa.PSSOptions) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go new file mode 100644 index 000000000..2282a863e --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go @@ -0,0 +1,37 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options defines options for KMS clients +package options + +import ( + "context" +) + +// RequestContext implements the functional option pattern for including a context during RPC +type RequestContext struct { + NoOpOptionImpl + ctx context.Context +} + +// ApplyContext sets the specified context as the functional option +func (r RequestContext) ApplyContext(ctx *context.Context) { + *ctx = r.ctx +} + +// WithContext specifies that the given context should be used in RPC to external services +func WithContext(ctx context.Context) RequestContext { + return RequestContext{ctx: ctx} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go new file mode 100644 index 000000000..21875dc8c --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go @@ -0,0 +1,35 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestDigest implements the functional option pattern for specifying a digest value +type RequestDigest struct { + NoOpOptionImpl + digest []byte +} + +// ApplyDigest sets the specified digest value as the functional option +func (r RequestDigest) ApplyDigest(digest *[]byte) { + *digest = r.digest +} + +// WithDigest specifies that the given digest can be used by underlying signature implementations +// WARNING: When verifying a digest with ECDSA, it is trivial to craft a valid signature +// over a random message given a public key. Do not use this unles you understand the +// implications and do not need to protect against malleability. +func WithDigest(digest []byte) RequestDigest { + return RequestDigest{digest: digest} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go new file mode 100644 index 000000000..ebdeda22b --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package options contains functional options for the various SignerVerifiers +package options diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go new file mode 100644 index 000000000..751418f9d --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go @@ -0,0 +1,50 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestKeyVersion implements the functional option pattern for specifying the KMS key version during signing or verification +type RequestKeyVersion struct { + NoOpOptionImpl + keyVersion string +} + +// ApplyKeyVersion sets the KMS's key version as a functional option +func (r RequestKeyVersion) ApplyKeyVersion(keyVersion *string) { + *keyVersion = r.keyVersion +} + +// WithKeyVersion specifies that a specific KMS key version be used during signing and verification operations; +// a value of 0 will use the latest version of the key (default) +func WithKeyVersion(keyVersion string) RequestKeyVersion { + return RequestKeyVersion{keyVersion: keyVersion} +} + +// RequestKeyVersionUsed implements the functional option pattern for obtaining the KMS key version used during signing +type RequestKeyVersionUsed struct { + NoOpOptionImpl + keyVersionUsed *string +} + +// ApplyKeyVersionUsed requests to store the KMS's key version that was used as a functional option +func (r RequestKeyVersionUsed) ApplyKeyVersionUsed(keyVersionUsed **string) { + *keyVersionUsed = r.keyVersionUsed +} + +// ReturnKeyVersionUsed specifies that the specific KMS key version that was used during signing should be stored +// in the pointer provided +func ReturnKeyVersionUsed(keyVersionUsed *string) RequestKeyVersionUsed { + return RequestKeyVersionUsed{keyVersionUsed: keyVersionUsed} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go new file mode 100644 index 000000000..e5f3f0116 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" + "crypto/rsa" +) + +// RequestHash implements the functional option pattern for setting a Hash +// function when loading a signer or verifier +type RequestHash struct { + NoOpOptionImpl + hashFunc crypto.Hash +} + +// ApplyHash sets the hash as requested by the functional option +func (r RequestHash) ApplyHash(hash *crypto.Hash) { + *hash = r.hashFunc +} + +// WithHash specifies that the given hash function should be used when loading a signer or verifier +func WithHash(hash crypto.Hash) RequestHash { + return RequestHash{hashFunc: hash} +} + +// RequestED25519ph implements the functional option pattern for specifying +// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a +// ED25519 key is +type RequestED25519ph struct { + NoOpOptionImpl + useED25519ph bool +} + +// ApplyED25519ph sets the ED25519ph flag as requested by the functional option +func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) { + *useED25519ph = r.useED25519ph +} + +// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used +func WithED25519ph() RequestED25519ph { + return RequestED25519ph{useED25519ph: true} +} + +// RequestPSSOptions implements the functional option pattern for specifying RSA +// PSS should be used when loading a signer or verifier and a RSA key is +// detected +type RequestPSSOptions struct { + NoOpOptionImpl + opts *rsa.PSSOptions +} + +// ApplyRSAPSS sets the RSAPSS options as requested by the functional option +func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) { + *opts = r.opts +} + +// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used +// Note that the RSA PSSOptions contains an hash algorithm, which will override +// the hash function specified with WithHash. +func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions { + return RequestPSSOptions{opts: opts} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go new file mode 100644 index 000000000..0c0e51856 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go @@ -0,0 +1,59 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "context" + "crypto" + "crypto/rsa" + "io" +) + +// NoOpOptionImpl implements the RPCOption, SignOption, VerifyOption interfaces as no-ops. +type NoOpOptionImpl struct{} + +// ApplyContext is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyContext(_ *context.Context) {} + +// ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyCryptoSignerOpts(_ *crypto.SignerOpts) {} + +// ApplyDigest is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyDigest(_ *[]byte) {} + +// ApplyRand is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRand(_ *io.Reader) {} + +// ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRemoteVerification(_ *bool) {} + +// ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRPCAuthOpts(_ *RPCAuth) {} + +// ApplyKeyVersion is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} + +// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} + +// ApplyHash is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {} + +// ApplyED25519ph is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {} + +// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go new file mode 100644 index 000000000..fd3a17f9e --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go @@ -0,0 +1,41 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + crand "crypto/rand" + "io" +) + +// RequestRand implements the functional option pattern for using a specific source of entropy +type RequestRand struct { + NoOpOptionImpl + rand io.Reader +} + +// ApplyRand sets the specified source of entropy as the functional option +func (r RequestRand) ApplyRand(rand *io.Reader) { + *rand = r.rand +} + +// WithRand specifies that the given source of entropy should be used in signing operations +func WithRand(rand io.Reader) RequestRand { + r := rand + if r == nil { + r = crand.Reader + } + return RequestRand{rand: r} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go new file mode 100644 index 000000000..26144adbb --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go @@ -0,0 +1,32 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RequestRemoteVerification implements the functional option pattern for remotely verifiying signatures when possible +type RequestRemoteVerification struct { + NoOpOptionImpl + remoteVerification bool +} + +// ApplyRemoteVerification sets remote verification as a functional option +func (r RequestRemoteVerification) ApplyRemoteVerification(remoteVerification *bool) { + *remoteVerification = r.remoteVerification +} + +// WithRemoteVerification specifies that the verification operation should be performed remotely (vs in the process of the caller) +func WithRemoteVerification(remoteVerification bool) RequestRemoteVerification { + return RequestRemoteVerification{remoteVerification: remoteVerification} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go new file mode 100644 index 000000000..188de92dc --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go @@ -0,0 +1,58 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +// RPCAuthOpts includes authentication settings for RPC calls +type RPCAuthOpts struct { + NoOpOptionImpl + opts RPCAuth +} + +// RPCAuth provides credentials for RPC calls, empty fields are ignored +type RPCAuth struct { + Address string // address is the remote server address, e.g. https://vault:8200 + Path string // path for the RPC, in vault this is the transit path which default to "transit" + Token string // token used for RPC, in vault this is the VAULT_TOKEN value + OIDC RPCAuthOIDC +} + +// RPCAuthOIDC is used to perform the RPC login using OIDC instead of a fixed token +type RPCAuthOIDC struct { + Path string // path defaults to "jwt" for vault + Role string // role is required for jwt logins + Token string // token is a jwt with vault +} + +// ApplyRPCAuthOpts sets the RPCAuth as a function option +func (r RPCAuthOpts) ApplyRPCAuthOpts(opts *RPCAuth) { + if r.opts.Address != "" { + opts.Address = r.opts.Address + } + if r.opts.Path != "" { + opts.Path = r.opts.Path + } + if r.opts.Token != "" { + opts.Token = r.opts.Token + } + if r.opts.OIDC.Token != "" { + opts.OIDC = r.opts.OIDC + } +} + +// WithRPCAuthOpts specifies RPCAuth settings to be used with RPC logins +func WithRPCAuthOpts(opts RPCAuth) RPCAuthOpts { + return RPCAuthOpts{opts: opts} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go new file mode 100644 index 000000000..1a3ac7394 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go @@ -0,0 +1,40 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" +) + +// RequestCryptoSignerOpts implements the functional option pattern for supplying crypto.SignerOpts when signing or verifying +type RequestCryptoSignerOpts struct { + NoOpOptionImpl + opts crypto.SignerOpts +} + +// ApplyCryptoSignerOpts sets crypto.SignerOpts as a functional option +func (r RequestCryptoSignerOpts) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) { + *opts = r.opts +} + +// WithCryptoSignerOpts specifies that provided crypto.SignerOpts be used during signing and verification operations +func WithCryptoSignerOpts(opts crypto.SignerOpts) RequestCryptoSignerOpts { + var optsToUse crypto.SignerOpts = crypto.SHA256 + if opts != nil { + optsToUse = opts + } + return RequestCryptoSignerOpts{opts: optsToUse} +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go new file mode 100644 index 000000000..3664185a0 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go @@ -0,0 +1,17 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package payload contains types and utilities related to the Cosign signature format. +package payload diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go new file mode 100644 index 000000000..cab6f5b98 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -0,0 +1,122 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package payload defines a container image +package payload + +import ( + "encoding/json" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" +) + +// CosignSignatureType is the value of `critical.type` in a SimpleContainerImage payload. +const CosignSignatureType = "cosign container image signature" + +// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: +// https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format +type SimpleContainerImage struct { + Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature + Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image +} + +// Critical data critical to correctly evaluating the validity of a signature +type Critical struct { + Identity Identity `json:"identity"` // Identity claimed identity of the image + Image Image `json:"image"` // Image identifies the container that the signature applies to + Type string `json:"type"` // Type must be 'atomic container signature' +} + +// Identity is the claimed identity of the image +type Identity struct { + DockerReference string `json:"docker-reference"` // DockerReference is a reference used to refer to or download the image +} + +// Image identifies the container image that the signature applies to +type Image struct { + DockerManifestDigest string `json:"docker-manifest-digest"` // DockerManifestDigest the manifest digest of the signed container image +} + +// Cosign describes a container image signed using Cosign +type Cosign struct { + Image name.Digest + // ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead. + // ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image; + // e.g. if the user asks to access a signed image example.com/repo/mysql:3.14, + // it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14 + // + // Considerations: + // - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match + // - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images + // (possibly even if ClaimedIdentity contains a digest!) + // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users + // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) + ClaimedIdentity string + Annotations map[string]interface{} +} + +// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format +func (p Cosign) SimpleContainerImage() SimpleContainerImage { + dockerReference := p.Image.Repository.Name() + if p.ClaimedIdentity != "" { + dockerReference = p.ClaimedIdentity + } + return SimpleContainerImage{ + Critical: Critical{ + Identity: Identity{ + DockerReference: dockerReference, + }, + Image: Image{ + DockerManifestDigest: p.Image.DigestStr(), + }, + Type: CosignSignatureType, + }, + Optional: p.Annotations, + } +} + +// MarshalJSON marshals the container signature into a []byte of JSON data +func (p Cosign) MarshalJSON() ([]byte, error) { + return json.Marshal(p.SimpleContainerImage()) +} + +var _ json.Marshaler = Cosign{} + +// UnmarshalJSON unmarshals []byte of JSON data into a container signature object +func (p *Cosign) UnmarshalJSON(data []byte) error { + if string(data) == "null" { + // JSON "null" is a no-op by convention + return nil + } + var simple SimpleContainerImage + if err := json.Unmarshal(data, &simple); err != nil { + return err + } + if simple.Critical.Type != CosignSignatureType { + return fmt.Errorf("Cosign signature payload was of an unknown type: %q", simple.Critical.Type) + } + digestStr := simple.Critical.Identity.DockerReference + "@" + simple.Critical.Image.DockerManifestDigest + digest, err := name.NewDigest(digestStr) + if err != nil { + return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err) + } + p.Image = digest + p.ClaimedIdentity = simple.Critical.Identity.DockerReference + p.Annotations = simple.Optional + return nil +} + +var _ json.Unmarshaler = (*Cosign)(nil) diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go new file mode 100644 index 000000000..6f6a47a9f --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go @@ -0,0 +1,25 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" +) + +// PublicKeyProvider returns a PublicKey associated with a digital signature +type PublicKeyProvider interface { + PublicKey(opts ...PublicKeyOption) (crypto.PublicKey, error) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go new file mode 100644 index 000000000..1cac68a53 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go @@ -0,0 +1,225 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// RSAPKCS1v15Signer is a signature.Signer that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15Signer struct { + hashFunc crypto.Hash + priv *rsa.PrivateKey +} + +// LoadRSAPKCS1v15Signer calculates signatures using the specified private key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. +func LoadRSAPKCS1v15Signer(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15Signer, error) { + if priv == nil { + return nil, errors.New("invalid RSA private key specified") + } + + if !isSupportedAlg(hf, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPKCS1v15Signer{ + priv: priv, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message using PKCS1v15. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the RSAPKCS1v15Signer was created. +// +// SignMessage recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPKCS1v15Signer) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + + return rsa.SignPKCS1v15(rand, r.priv, hf, digest) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (r RSAPKCS1v15Signer) Public() crypto.PublicKey { + if r.priv == nil { + return nil + } + + return r.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15Signer) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.Public(), nil +} + +// Sign computes the signature for the specified digest using PKCS1v15. +// +// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader +// from crypto/rand). +// +// If opts are specified, they should specify the hash function used to compute digest. If opts are +// not specified, this function assumes the hash function provided when the signer was created was +// used to create the value specified in digest. +func (r RSAPKCS1v15Signer) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return r.SignMessage(nil, rsaOpts...) +} + +// RSAPKCS1v15Verifier is a signature.Verifier that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15Verifier struct { + publicKey *rsa.PublicKey + hashFunc crypto.Hash +} + +// LoadRSAPKCS1v15Verifier returns a Verifier that verifies signatures using the specified +// RSA public key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. +func LoadRSAPKCS1v15Verifier(pub *rsa.PublicKey, hashFunc crypto.Hash) (*RSAPKCS1v15Verifier, error) { + if pub == nil { + return nil, errors.New("invalid RSA public key specified") + } + + if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPKCS1v15Verifier{ + publicKey: pub, + hashFunc: hashFunc, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} + +// VerifySignature verifies the signature for the given message using PKCS1v15. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the RSAPKCS1v15Verifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + return rsa.VerifyPKCS1v15(r.publicKey, hf, digest, sigBytes) +} + +// RSAPKCS1v15SignerVerifier is a signature.SignerVerifier that uses the RSA PKCS1v15 algorithm +type RSAPKCS1v15SignerVerifier struct { + *RSAPKCS1v15Signer + *RSAPKCS1v15Verifier +} + +// LoadRSAPKCS1v15SignerVerifier creates a combined signer and verifier. This is a convenience object +// that simply wraps an instance of RSAPKCS1v15Signer and RSAPKCS1v15Verifier. +func LoadRSAPKCS1v15SignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15SignerVerifier, error) { + signer, err := LoadRSAPKCS1v15Signer(priv, hf) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadRSAPKCS1v15Verifier(&priv.PublicKey, hf) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &RSAPKCS1v15SignerVerifier{ + RSAPKCS1v15Signer: signer, + RSAPKCS1v15Verifier: verifier, + }, nil +} + +// NewDefaultRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15. +// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm. +func NewDefaultRSAPKCS1v15SignerVerifier() (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) { + return NewRSAPKCS1v15SignerVerifier(rand.Reader, 2048, crypto.SHA256) +} + +// NewRSAPKCS1v15SignerVerifier creates a combined signer and verifier using RSA PKCS1v15. +// This creates a new RSA key of the specified length of bits, entropy source, and hash function. +func NewRSAPKCS1v15SignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPKCS1v15SignerVerifier, *rsa.PrivateKey, error) { + priv, err := rsa.GenerateKey(rand, bits) + if err != nil { + return nil, nil, err + } + + sv, err := LoadRSAPKCS1v15SignerVerifier(priv, hashFunc) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPKCS1v15SignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go new file mode 100644 index 000000000..6e52bed9b --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go @@ -0,0 +1,260 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// checked on LoadSigner, LoadVerifier, and SignMessage +var rsaSupportedHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + +// checked on VerifySignature. Supports SHA1 verification. +var rsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA1, + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + +// RSAPSSSigner is a signature.Signer that uses the RSA PSS algorithm +type RSAPSSSigner struct { + hashFunc crypto.Hash + priv *rsa.PrivateKey + pssOpts *rsa.PSSOptions +} + +// LoadRSAPSSSigner calculates signatures using the specified private key and hash algorithm. +// +// If opts are specified, then they will be stored and used as a default if not overridden +// by the value passed to Sign(). +// +// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored. +func LoadRSAPSSSigner(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSigner, error) { + if priv == nil { + return nil, errors.New("invalid RSA private key specified") + } + + if !isSupportedAlg(hf, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPSSSigner{ + priv: priv, + pssOpts: opts, + hashFunc: hf, + }, nil +} + +// SignMessage signs the provided message using PSS. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the RSAPSSSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithRand() +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPSSSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, hf, err := ComputeDigestForSigning(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + rand := selectRandFromOpts(opts...) + pssOpts := r.pssOpts + if pssOpts == nil { + pssOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + } + } + pssOpts.Hash = hf + + return rsa.SignPSS(rand, r.priv, hf, digest, pssOpts) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (r RSAPSSSigner) Public() crypto.PublicKey { + if r.priv == nil { + return nil + } + + return r.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.Public(), nil +} + +// Sign computes the signature for the specified digest using PSS. +// +// If a source of entropy is given in rand, it will be used instead of the default value (rand.Reader +// from crypto/rand). +// +// If opts are specified, they must be *rsa.PSSOptions. If opts are not specified, the hash function +// provided when the signer was created will be assumed. +func (r RSAPSSSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + rsaOpts := []SignOption{options.WithDigest(digest), options.WithRand(rand)} + if opts != nil { + rsaOpts = append(rsaOpts, options.WithCryptoSignerOpts(opts)) + } + + return r.SignMessage(nil, rsaOpts...) +} + +// RSAPSSVerifier is a signature.Verifier that uses the RSA PSS algorithm +type RSAPSSVerifier struct { + publicKey *rsa.PublicKey + hashFunc crypto.Hash + pssOpts *rsa.PSSOptions +} + +// LoadRSAPSSVerifier verifies signatures using the specified public key and hash algorithm. +// +// hf must be either SHA256, SHA388, or SHA512. opts.Hash is ignored. +func LoadRSAPSSVerifier(pub *rsa.PublicKey, hashFunc crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSVerifier, error) { + if pub == nil { + return nil, errors.New("invalid RSA public key specified") + } + + if !isSupportedAlg(hashFunc, rsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + + return &RSAPSSVerifier{ + publicKey: pub, + hashFunc: hashFunc, + pssOpts: opts, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} + +// VerifySignature verifies the signature for the given message using PSS. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the RSAPSSVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// - WithCryptoSignerOpts() +// +// All other options are ignored if specified. +func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) + if err != nil { + return err + } + + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + // rsa.VerifyPSS ignores pssOpts.Hash, so we don't set it + pssOpts := r.pssOpts + if pssOpts == nil { + pssOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + } + } + + return rsa.VerifyPSS(r.publicKey, hf, digest, sigBytes, pssOpts) +} + +// RSAPSSSignerVerifier is a signature.SignerVerifier that uses the RSA PSS algorithm +type RSAPSSSignerVerifier struct { + *RSAPSSSigner + *RSAPSSVerifier +} + +// LoadRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. This is +// a convenience object that simply wraps an instance of RSAPSSSigner and RSAPSSVerifier. +func LoadRSAPSSSignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSignerVerifier, error) { + signer, err := LoadRSAPSSSigner(priv, hf, opts) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + verifier, err := LoadRSAPSSVerifier(&priv.PublicKey, hf, opts) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + RSAPSSSigner: signer, + RSAPSSVerifier: verifier, + }, nil +} + +// NewDefaultRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. +// This creates a new RSA key of 2048 bits and uses the SHA256 hashing algorithm. +func NewDefaultRSAPSSSignerVerifier() (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) { + return NewRSAPSSSignerVerifier(rand.Reader, 2048, crypto.SHA256) +} + +// NewRSAPSSSignerVerifier creates a combined signer and verifier using RSA PSS. +// This creates a new RSA key of the specified length of bits, entropy source, and hash function. +func NewRSAPSSSignerVerifier(rand io.Reader, bits int, hashFunc crypto.Hash) (*RSAPSSSignerVerifier, *rsa.PrivateKey, error) { + priv, err := rsa.GenerateKey(rand, bits) + if err != nil { + return nil, nil, err + } + + sv, err := LoadRSAPSSSignerVerifier(priv, hashFunc, &rsa.PSSOptions{Hash: hashFunc}) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (r RSAPSSSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return r.publicKey, nil +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go new file mode 100644 index 000000000..1122989ff --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -0,0 +1,147 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "io" + "os" + "path/filepath" + + // these ensure we have the implementations loaded + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" + + // these ensure we have the implementations loaded + _ "golang.org/x/crypto/sha3" +) + +// Signer creates digital signatures over a message using a specified key pair +type Signer interface { + PublicKeyProvider + SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) +} + +// SignerOpts implements crypto.SignerOpts but also allows callers to specify +// additional options that may be utilized in signing the digest provided. +type SignerOpts struct { + Hash crypto.Hash + Opts []SignOption +} + +// HashFunc returns the hash function for this object +func (s SignerOpts) HashFunc() crypto.Hash { + return s.Hash +} + +// LoadSigner returns a signature.Signer based on the algorithm of the private key +// provided. +// +// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a +// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly. +func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) { + return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key +// provided. +func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := privateKey.(type) { + case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15Signer(pk, hashFunc) + case *ecdsa.PrivateKey: + return LoadECDSASigner(pk, hashFunc) + case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSigner(pk) + } + return LoadED25519Signer(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadSignerFromPEMFile returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified when computing digests. +// +// If key is an RSA key, a RSAPKCS1v15Signer will be returned. If a +// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() and +// cryptoutils.UnmarshalPEMToPrivateKey() methods directly. +func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSigner(priv, hashFunc) +} + +// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified in the options when computing digests. +func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerWithOpts(priv, opts...) +} + +// LoadDefaultSigner returns a signature.Signer based on the private key. +// Each private key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSigner(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerFromAlgorithmDetails returns a signature.Signer based on +// the algorithm details and the user's choice of options. +func LoadSignerFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Signer, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerWithOpts(privateKey, filteredOpts...) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go new file mode 100644 index 000000000..9ff93420e --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -0,0 +1,127 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "os" + "path/filepath" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// SignerVerifier creates and verifies digital signatures over a message using a specified key pair +type SignerVerifier interface { + Signer + Verifier +} + +// LoadSignerVerifier returns a signature.SignerVerifier based on the algorithm of the private key +// provided. +// +// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a +// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly. +func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) { + return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the +// algorithm of the private key provided and the user's choice. +func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := privateKey.(type) { + case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc) + case *ecdsa.PrivateKey: + return LoadECDSASignerVerifier(pk, hashFunc) + case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSignerVerifier(pk) + } + return LoadED25519SignerVerifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadSignerVerifierFromPEMFile returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified when computing digests. +// +// If publicKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a +// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() and +// cryptoutils.UnmarshalPEMToPrivateKey() methods directly. +func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifier(priv, hashFunc) +} + +// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified in the options when computing digests. +func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifierWithOpts(priv, opts...) +} + +// LoadDefaultSignerVerifier returns a signature.SignerVerifier based on +// the private key. Each private key has a corresponding PublicKeyDetails +// associated in the Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSignerVerifier(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerVerifierFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerVerifierFromAlgorithmDetails returns a signature.SignerVerifier based on +// the algorithm details and the user's choice of options. +func LoadSignerVerifierFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (SignerVerifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerVerifierWithOpts(privateKey, filteredOpts...) +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/util.go new file mode 100644 index 000000000..3f8beff49 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/util.go @@ -0,0 +1,74 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "bytes" + "crypto/rsa" + "encoding/json" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + + "github.com/sigstore/sigstore/pkg/signature/options" + sigpayload "github.com/sigstore/sigstore/pkg/signature/payload" +) + +// SignImage signs a container manifest using the specified signer object +func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) { + imgPayload := sigpayload.Cosign{ + Image: image, + Annotations: optionalAnnotations, + } + payload, err = json.Marshal(imgPayload) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal payload to JSON: %w", err) + } + signature, err = signer.SignMessage(bytes.NewReader(payload)) + if err != nil { + return nil, nil, fmt.Errorf("failed to sign payload: %w", err) + } + return payload, signature, nil +} + +// VerifyImageSignature verifies a signature over a container manifest +func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) { + if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil { + return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err) + } + var imgPayload sigpayload.Cosign + if err := json.Unmarshal(payload, &imgPayload); err != nil { + return name.Digest{}, nil, fmt.Errorf("could not deserialize image payload: %w", err) + } + return imgPayload.Image, imgPayload.Annotations, nil +} + +// GetOptsFromAlgorithmDetails returns a list of LoadOptions that are +// appropriate for the given algorithm details. It ignores the hash type because +// that can be retrieved from the algorithm details. +func GetOptsFromAlgorithmDetails(algorithmDetails AlgorithmDetails, opts ...LoadOption) []LoadOption { + res := []LoadOption{options.WithHash(algorithmDetails.hashType)} + for _, opt := range opts { + var useED25519ph bool + var rsaPSSOptions *rsa.PSSOptions + opt.ApplyED25519ph(&useED25519ph) + opt.ApplyRSAPSS(&rsaPSSOptions) + if useED25519ph || rsaPSSOptions != nil { + res = append(res, opt) + } + } + return res +} diff --git a/tools/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go new file mode 100644 index 000000000..0b5a1bba7 --- /dev/null +++ b/tools/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -0,0 +1,156 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "io" + "os" + "path/filepath" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" +) + +// Verifier verifies the digital signature using a specified public key +type Verifier interface { + PublicKeyProvider + VerifySignature(signature, message io.Reader, opts ...VerifyOption) error +} + +// LoadVerifier returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +// +// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. +func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) { + return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc)) +} + +// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if rsaPSSOptions != nil { + return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions) + } + return LoadRSAPKCS1v15Verifier(pk, hashFunc) + case *ecdsa.PublicKey: + return LoadECDSAVerifier(pk, hashFunc) + case ed25519.PublicKey: + if useED25519ph { + return LoadED25519phVerifier(pk) + } + return LoadED25519Verifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadUnsafeVerifier returns a signature.Verifier based on the algorithm of the public key +// provided that will use SHA1 when computing digests for RSA and ECDSA signatures. +// +// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. +func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) { + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid RSA public key specified") + } + return &RSAPKCS1v15Verifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case *ecdsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid ECDSA public key specified") + } + return &ECDSAVerifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case ed25519.PublicKey: + return LoadED25519Verifier(pk) + } + return nil, errors.New("unsupported public key type") +} + +// LoadVerifierFromPEMFile returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified when computing digests. +// +// If the publickey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() and cryptoutils.UnmarshalPEMToPublicKey() methods directly. +func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifier(pubKey, hashFunc) +} + +// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified in the options when computing digests. +func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifierWithOpts(pubKey, opts...) +} + +// LoadDefaultVerifier returns a signature.Verifier based on the public key. +// Each public key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultVerifier(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + algorithmDetails, err := GetDefaultAlgorithmDetails(publicKey, opts...) + if err != nil { + return nil, err + } + return LoadVerifierFromAlgorithmDetails(publicKey, algorithmDetails, opts...) +} + +// LoadVerifierFromAlgorithmDetails returns a signature.Verifier based on +// the algorithm details and the user's choice of options. +func LoadVerifierFromAlgorithmDetails(publicKey crypto.PublicKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Verifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadVerifierWithOpts(publicKey, filteredOpts...) +} diff --git a/tools/vendor/github.com/docker/distribution/.gitignore b/tools/vendor/github.com/smallstep/pkcs7/.gitignore similarity index 60% rename from tools/vendor/github.com/docker/distribution/.gitignore rename to tools/vendor/github.com/smallstep/pkcs7/.gitignore index 4cf7888e9..948aae2ac 100644 --- a/tools/vendor/github.com/docker/distribution/.gitignore +++ b/tools/vendor/github.com/smallstep/pkcs7/.gitignore @@ -23,16 +23,6 @@ _testmain.go *.test *.prof -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace -.idea/* +# Development +.envrc +coverage.out \ No newline at end of file diff --git a/tools/vendor/github.com/smallstep/pkcs7/LICENSE b/tools/vendor/github.com/smallstep/pkcs7/LICENSE new file mode 100644 index 000000000..75f320908 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/tools/vendor/github.com/smallstep/pkcs7/Makefile b/tools/vendor/github.com/smallstep/pkcs7/Makefile new file mode 100644 index 000000000..47c73b868 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/Makefile @@ -0,0 +1,20 @@ +all: vet staticcheck test + +test: + go test -covermode=count -coverprofile=coverage.out . + +showcoverage: test + go tool cover -html=coverage.out + +vet: + go vet . + +lint: + golint . + +staticcheck: + staticcheck . + +gettools: + go get -u honnef.co/go/tools/... + go get -u golang.org/x/lint/golint diff --git a/tools/vendor/github.com/smallstep/pkcs7/README.md b/tools/vendor/github.com/smallstep/pkcs7/README.md new file mode 100644 index 000000000..9d94e65f2 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/README.md @@ -0,0 +1,63 @@ +# pkcs7 + +[![Go Reference](https://pkg.go.dev/badge/github.com/smallstep/pkcs7.svg)](https://pkg.go.dev/github.com/smallstep/pkcs7) +[![Build Status](https://github.com/smallstep/pkcs7/workflows/CI/badge.svg?query=branch%3Amain+event%3Apush)](https://github.com/smallstep/pkcs7/actions/workflows/ci.yml?query=branch%3Amain+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "github.com/smallstep/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + return fmt.Errorf("Cannot initialize signed data: %w", err) + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + return fmt.Errorf("Cannot add signer: %w", err) + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + return fmt.Errorf("Cannot finish signing data: %w", err) + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + return fmt.Errorf("Cannot parse our signed data: %w", err) + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + return fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + } + if err = p7.Verify(); err != nil { + return fmt.Errorf("Cannot verify our signed data: %w", err) + } + + return signed, nil +} +``` + + +## Credits + +This is a fork of [mozilla-services/pkcs7](https://github.com/mozilla-services/pkcs7) which, itself, was a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7). diff --git a/tools/vendor/github.com/smallstep/pkcs7/ber.go b/tools/vendor/github.com/smallstep/pkcs7/ber.go new file mode 100644 index 000000000..52333215d --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/ber.go @@ -0,0 +1,266 @@ +package pkcs7 + +import ( + "bytes" + "errors" +) + +type asn1Object interface { + EncodeTo(writer *bytes.Buffer) error +} + +type asn1Structured struct { + tagBytes []byte + content []asn1Object +} + +func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { + inner := new(bytes.Buffer) + for _, obj := range s.content { + err := obj.EncodeTo(inner) + if err != nil { + return err + } + } + out.Write(s.tagBytes) + encodeLength(out, inner.Len()) + out.Write(inner.Bytes()) + return nil +} + +type asn1Primitive struct { + tagBytes []byte + length int + content []byte +} + +func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { + _, err := out.Write(p.tagBytes) + if err != nil { + return err + } + if err = encodeLength(out, p.length); err != nil { + return err + } + // fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + // fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + out.Write(p.content) + + return nil +} + +func ber2der(ber []byte) ([]byte, error) { + if len(ber) == 0 { + return nil, errors.New("ber2der: input ber is empty") + } + // fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + out := new(bytes.Buffer) + + obj, _, err := readObject(ber, 0) + if err != nil { + return nil, err + } + obj.EncodeTo(out) + + // if offset < len(ber) { + // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) + // } + + return out.Bytes(), nil +} + +// encodes lengths that are longer than 127 into string of bytes +func marshalLongLength(out *bytes.Buffer, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +// computes the byte length of an encoded length value +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +// encodes the length in DER format +// If the length fits in 7 bits, the value is encoded directly. +// +// Otherwise, the number of bytes to encode the length is first determined. +// This number is likely to be 4 or less for a 32bit length. This number is +// added to 0x80. The length is encoded in big endian encoding follow after +// +// Examples: +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 +// +func encodeLength(out *bytes.Buffer, length int) (err error) { + if length >= 128 { + l := lengthLength(length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLongLength(out, length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(length)) + if err != nil { + return + } + } + return +} + +func readObject(ber []byte, offset int) (asn1Object, int, error) { + berLen := len(ber) + if offset >= berLen { + return nil, 0, errors.New("ber2der: offset is after end of ber data") + } + tagStart := offset + b := ber[offset] + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + tag := b & 0x1F // last 5 bits + if tag == 0x1F { + tag = 0 + for ber[offset] >= 0x80 { + tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + // jvehent 20170227: this doesn't appear to be used anywhere... + // tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + tagEnd := offset + + kind := b & 0x20 + if kind == 0 { + debugprint("--> Primitive\n") + } else { + debugprint("--> Constructed\n") + } + // read length + var length int + l := ber[offset] + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + indefinite := false + if l > 0x80 { + numberOfBytes := (int)(l & 0x7F) + if numberOfBytes > 4 { // int is only guaranteed to be 32bit + return nil, 0, errors.New("ber2der: BER tag length too long") + } + if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { + return nil, 0, errors.New("ber2der: BER tag length is negative") + } + if (int)(ber[offset]) == 0x0 { + return nil, 0, errors.New("ber2der: BER tag length has leading zero") + } + debugprint("--> (compute length) indicator byte: %x\n", l) + debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes]) + for i := 0; i < numberOfBytes; i++ { + length = length*256 + (int)(ber[offset]) + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + } else if l == 0x80 { + indefinite = true + } else { + length = (int)(l) + } + if length < 0 { + return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") + } + // fmt.Printf("--> length : %d\n", length) + contentEnd := offset + length + if contentEnd > len(ber) { + return nil, 0, errors.New("ber2der: BER tag length is more than available data") + } + debugprint("--> content start : %d\n", offset) + debugprint("--> content end : %d\n", contentEnd) + debugprint("--> content : % X\n", ber[offset:contentEnd]) + var obj asn1Object + if indefinite && kind == 0 { + return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") + } + if kind == 0 { + obj = asn1Primitive{ + tagBytes: ber[tagStart:tagEnd], + length: length, + content: ber[offset:contentEnd], + } + } else { + var subObjects []asn1Object + for (offset < contentEnd) || indefinite { + var subObj asn1Object + var err error + subObj, offset, err = readObject(ber, offset) + if err != nil { + return nil, 0, err + } + subObjects = append(subObjects, subObj) + + if indefinite { + terminated, err := isIndefiniteTermination(ber, offset) + if err != nil { + return nil, 0, err + } + + if terminated { + break + } + } + } + obj = asn1Structured{ + tagBytes: ber[tagStart:tagEnd], + content: subObjects, + } + } + + // Apply indefinite form length with 0x0000 terminator. + if indefinite { + contentEnd = offset + 2 + } + + return obj, contentEnd, nil +} + +func isIndefiniteTermination(ber []byte, offset int) (bool, error) { + if len(ber)-offset < 2 { + return false, errors.New("ber2der: Invalid BER format") + } + + return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil +} + +func debugprint(format string, a ...interface{}) { + // fmt.Printf(format, a) +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/decrypt.go b/tools/vendor/github.com/smallstep/pkcs7/decrypt.go new file mode 100644 index 000000000..76dc17f74 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/decrypt.go @@ -0,0 +1,233 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed +var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") + +// ErrUnsupportedAsymmetricEncryptionAlgorithm is returned when attempting to use an unknown asymmetric encryption algorithm +var ErrUnsupportedAsymmetricEncryptionAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA PKCS#1 v1.5 and RSA OAEP are supported") + +// ErrUnsupportedKeyType is returned when attempting to encrypting keys using a key that's not an RSA key +var ErrUnsupportedKeyType = errors.New("pkcs7: only RSA keys are supported") + +// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data +var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") + +// Decrypt decrypts encrypted content info for recipient cert and private key +func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { + data, ok := p7.raw.(envelopedData) + if !ok { + return nil, ErrNotEncryptedContent + } + recipient := selectRecipientForCertificate(data.RecipientInfos, cert) + if recipient.EncryptedKey == nil { + return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") + } + switch pkey := pkey.(type) { + case crypto.Decrypter: + var opts crypto.DecrypterOpts + switch algorithm := recipient.KeyEncryptionAlgorithm.Algorithm; { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + hashFunc, err := getHashFuncForKeyEncryptionAlgorithm(recipient.KeyEncryptionAlgorithm) + if err != nil { + return nil, err + } + opts = &rsa.OAEPOptions{Hash: hashFunc} + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + opts = &rsa.PKCS1v15DecryptOptions{} + default: + return nil, ErrUnsupportedAsymmetricEncryptionAlgorithm + } + contentKey, err := pkey.Decrypt(rand.Reader, recipient.EncryptedKey, opts) + if err != nil { + return nil, err + } + return data.EncryptedContentInfo.decrypt(contentKey) + } + return nil, ErrUnsupportedAlgorithm +} + +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so the 'default:' tags here do nothing. +type rsaOAEPAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskGenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +func getHashFuncForKeyEncryptionAlgorithm(keyEncryptionAlgorithm pkix.AlgorithmIdentifier) (crypto.Hash, error) { + invalidHashFunc := crypto.Hash(0) + params := &rsaOAEPAlgorithmParameters{ + HashFunc: pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1}, // set default hash algorithm to SHA1 + } + var rest []byte + rest, err := asn1.Unmarshal(keyEncryptionAlgorithm.Parameters.FullBytes, params) + if err != nil { + return invalidHashFunc, fmt.Errorf("pkcs7: failed unmarshaling key encryption algorithm parameters: %v", err) + } + if len(rest) != 0 { + return invalidHashFunc, errors.New("pkcs7: trailing data after RSA OAEP parameters") + } + + switch { + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return crypto.SHA1, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA224): + return crypto.SHA224, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return crypto.SHA256, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return crypto.SHA384, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return crypto.SHA512, nil + default: + return invalidHashFunc, errors.New("pkcs7: unsupported hash function for RSA OAEP") + } +} + +// DecryptUsingPSK decrypts encrypted data using caller provided +// pre-shared secret +func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { + data, ok := p7.raw.(encryptedData) + if !ok { + return nil, ErrNotEncryptedContent + } + return data.EncryptedContentInfo.decrypt(key) +} + +func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { + alg := eci.ContentEncryptionAlgorithm.Algorithm + if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && + !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && + !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + return nil, ErrUnsupportedAlgorithm + } + + // EncryptedContent can either be constructed of multple OCTET STRINGs + // or _be_ a tagged OCTET STRING + var cyphertext []byte + if eci.EncryptedContent.IsCompound { + // Complex case to concat all of the children OCTET STRINGs + var buf bytes.Buffer + cypherbytes := eci.EncryptedContent.Bytes + for { + var part []byte + cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) + buf.Write(part) + if cypherbytes == nil { + break + } + } + cyphertext = buf.Bytes() + } else { + // Simple case, the bytes _are_ the cyphertext + cyphertext = eci.EncryptedContent.Bytes + } + + var block cipher.Block + var err error + + switch { + case alg.Equal(OIDEncryptionAlgorithmDESCBC): + block, err = des.NewCipher(key) + case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): + block, err = des.NewTripleDESCipher(key) + case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): + fallthrough + case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): + block, err = aes.NewCipher(key) + } + + if err != nil { + return nil, err + } + + if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + params := aesGCMParameters{} + paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes + + _, err := asn1.Unmarshal(paramBytes, ¶ms) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(params.Nonce) != gcm.NonceSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + if params.ICVLen != gcm.Overhead() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + + plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil + } + + iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes + if len(iv) != block.BlockSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") + } + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(cyphertext)) + mode.CryptBlocks(plaintext, cyphertext) + if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { + return nil, err + } + return plaintext, nil +} + +func unpad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("pkcs7: invalid blocklen %d", blocklen) + } + if len(data)%blocklen != 0 || len(data) == 0 { + return nil, fmt.Errorf("pkcs7: invalid data len %d", len(data)) + } + + // the last byte is the length of padding + padlen := int(data[len(data)-1]) + + // check padding integrity, all bytes should be the same + pad := data[len(data)-padlen:] + for _, padbyte := range pad { + if padbyte != byte(padlen) { + return nil, errors.New("pkcs7: invalid padding") + } + } + + return data[:len(data)-padlen], nil +} + +func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { + for _, recp := range recipients { + if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { + return recp + } + } + return recipientInfo{} +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/encrypt.go b/tools/vendor/github.com/smallstep/pkcs7/encrypt.go new file mode 100644 index 000000000..a5c96e755 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/encrypt.go @@ -0,0 +1,475 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +type envelopedData struct { + Version int + RecipientInfos []recipientInfo `asn1:"set"` + EncryptedContentInfo encryptedContentInfo +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type recipientInfo struct { + Version int + IssuerAndSerialNumber issuerAndSerial + KeyEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedKey []byte +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` +} + +const ( + // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm + EncryptionAlgorithmDESCBC = iota + + // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES128CBC + + // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES256CBC + + // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm + EncryptionAlgorithmAES128GCM + + // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm + EncryptionAlgorithmAES256GCM +) + +// ContentEncryptionAlgorithm determines the algorithm used to encrypt the +// plaintext message. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC + +// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt +// content with an unsupported algorithm. +var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") + +// KeyEncryptionAlgorithm determines the algorithm used to encrypt a +// content key. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var KeyEncryptionAlgorithm = OIDEncryptionAlgorithmRSA + +// ErrUnsupportedKeyEncryptionAlgorithm is returned when an +// unsupported key encryption algorithm OID is provided. +var ErrUnsupportedKeyEncryptionAlgorithm = errors.New("pkcs7: unsupported key encryption algorithm provided") + +// KeyEncryptionHash determines the crypto.Hash algorithm to use +// when encrypting a content key. Change the value of this variable +// to change which algorithm is used in the Encrypt() function. +var KeyEncryptionHash = crypto.SHA256 + +// ErrUnsupportedKeyEncryptionHash is returned when an +// unsupported key encryption hash is provided. +var ErrUnsupportedKeyEncryptionHash = errors.New("pkcs7: unsupported key encryption hash provided") + +// ErrPSKNotProvided is returned when attempting to encrypt +// using a PSK without actually providing the PSK. +var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") + +const nonceSize = 12 + +type aesGCMParameters struct { + Nonce []byte `asn1:"tag:4"` + ICVLen int +} + +func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128GCM: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128GCM + case EncryptionAlgorithmAES256GCM: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256GCM + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) + } + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create nonce + nonce := make([]byte, nonceSize) + + _, err := rand.Read(nonce) + if err != nil { + return nil, nil, err + } + + // Encrypt content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := gcm.Seal(nil, nonce, content, nil) + + // Prepare ASN.1 Encrypted Content Info + paramSeq := aesGCMParameters{ + Nonce: nonce, + ICVLen: gcm.Overhead(), + } + + paramBytes, err := asn1.Marshal(paramSeq) + if err != nil { + return nil, nil, err + } + + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{ + Tag: asn1.TagSequence, + Bytes: paramBytes, + }, + }, + EncryptedContent: marshalEncryptedContent(ciphertext), + } + + return key, &eci, nil +} + +func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + if key == nil { + // Create DES key + key = make([]byte, 8) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, des.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := des.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmDESCBC, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128CBC: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128CBC + case EncryptionAlgorithmAES256CBC: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256CBC + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) + } + + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, aes.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +// Encrypt creates and returns an envelope data PKCS7 structure with encrypted +// recipient keys for each recipient public key. +// +// The algorithm used to perform encryption is determined by the current value +// of the global ContentEncryptionAlgorithm package variable. By default, the +// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the +// value before calling Encrypt(). For example: +// +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES256GCM +// +// TODO(fullsailor): Add support for encrypting content with other algorithms +func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { + var eci *encryptedContentInfo + var key []byte + var err error + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + key, eci, err = encryptDESCBC(content, nil) + case EncryptionAlgorithmAES128CBC: + fallthrough + case EncryptionAlgorithmAES256CBC: + key, eci, err = encryptAESCBC(content, nil) + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + key, eci, err = encryptAESGCM(content, nil) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare each recipient's encrypted cipher key + recipientInfos := make([]recipientInfo, len(recipients)) + for i, recipient := range recipients { + algorithm := KeyEncryptionAlgorithm + hash := KeyEncryptionHash + var kea pkix.AlgorithmIdentifier + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + parameters, err := getParametersForKeyEncryptionAlgorithm(algorithm, hash) + if err != nil { + return nil, fmt.Errorf("failed to get parameters for key encryption: %v", err) + } + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + Parameters: parameters, + } + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + } + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm + } + encrypted, err := encryptKey(key, recipient, algorithm, hash) + if err != nil { + return nil, err + } + ias, err := cert2issuerAndSerial(recipient) + if err != nil { + return nil, err + } + info := recipientInfo{ + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: kea, + EncryptedKey: encrypted, + } + recipientInfos[i] = info + } + + // Prepare envelope content + envelope := envelopedData{ + EncryptedContentInfo: *eci, + Version: 0, + RecipientInfos: recipientInfos, + } + innerContent, err := asn1.Marshal(envelope) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEnvelopedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func getParametersForKeyEncryptionAlgorithm(algorithm asn1.ObjectIdentifier, hash crypto.Hash) (asn1.RawValue, error) { + if !algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP) { + return asn1.RawValue{}, nil // return empty; not used + } + + params := rsaOAEPAlgorithmParameters{} + switch hash { + case crypto.SHA1: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1} + case crypto.SHA224: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA224} + case crypto.SHA256: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA256} + case crypto.SHA384: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA384} + case crypto.SHA512: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA512} + default: + return asn1.RawValue{}, ErrUnsupportedAlgorithm + } + + b, err := asn1.Marshal(params) + if err != nil { + return asn1.RawValue{}, fmt.Errorf("failed marshaling key encryption parameters: %v", err) + } + + return asn1.RawValue{ + FullBytes: b, + }, nil +} + +// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, +// encrypted using caller provided pre-shared secret. +func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { + var eci *encryptedContentInfo + var err error + + if key == nil { + return nil, ErrPSKNotProvided + } + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + _, eci, err = encryptDESCBC(content, key) + + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + _, eci, err = encryptAESGCM(content, key) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare encrypted-data content + ed := encryptedData{ + Version: 0, + EncryptedContentInfo: *eci, + } + innerContent, err := asn1.Marshal(ed) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEncryptedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func marshalEncryptedContent(content []byte) asn1.RawValue { + return asn1.RawValue{Bytes: content, Class: 2, IsCompound: false} +} + +func encryptKey(key []byte, recipient *x509.Certificate, algorithm asn1.ObjectIdentifier, hash crypto.Hash) ([]byte, error) { + pub, ok := recipient.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType + } + + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + return rsa.EncryptOAEP(hash.New(), rand.Reader, pub, key, nil) + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm + } +} + +func pad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + padlen := blocklen - (len(data) % blocklen) + if padlen == 0 { + padlen = blocklen + } + pad := bytes.Repeat([]byte{byte(padlen)}, padlen) + return append(data, pad...), nil +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go new file mode 100644 index 000000000..378cc265d --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go @@ -0,0 +1,14 @@ +package legacyx509 + +import "fmt" + +// legacyGodebugSetting is a type mimicking Go's internal godebug package +// settings, which are used to enable / disable certain functionalities at +// build time. +type legacyGodebugSetting int + +func (s legacyGodebugSetting) Value() string { + return fmt.Sprintf("%d", s) +} + +func (s legacyGodebugSetting) IncNonDefault() {} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go new file mode 100644 index 000000000..7d1469b6d --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go @@ -0,0 +1,14 @@ +/* +Package legacyx509 is a copy of certain parts of Go's crypto/x509 package. +It is based on Go 1.23, and has just the parts copied over required for +parsing X509 certificates. + +The primary reason this copy exists is to keep support for parsing PKCS7 +messages containing Simple Certificate Enrolment Protocol (SCEP) requests +from Windows devices. Go 1.23 made a change marking certificates with a +critical authority key identifier as invalid, which is mandated by RFC 5280, +but apparently Windows marks those specific certificates as such, resulting +in those SCEP requests failing from being parsed correctly. +*/ + +package legacyx509 diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go new file mode 100644 index 000000000..8268a07c5 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go @@ -0,0 +1,377 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "encoding/asn1" + "errors" + "math" + "math/big" + "math/bits" + "strconv" + "strings" +) + +var ( + errInvalidOID = errors.New("invalid oid") +) + +// An OID represents an ASN.1 OBJECT IDENTIFIER. +type OID struct { + der []byte +} + +// ParseOID parses a Object Identifier string, represented by ASCII numbers separated by dots. +func ParseOID(oid string) (OID, error) { + var o OID + return o, o.unmarshalOIDText(oid) +} + +func newOIDFromDER(der []byte) (OID, bool) { + if len(der) == 0 || der[len(der)-1]&0x80 != 0 { + return OID{}, false + } + + start := 0 + for i, v := range der { + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == start && v == 0x80 { + return OID{}, false + } + if v&0x80 == 0 { + start = i + 1 + } + } + + return OID{der}, true +} + +// OIDFromInts creates a new OID using ints, each integer is a separate component. +func OIDFromInts(oid []uint64) (OID, error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return OID{}, errInvalidOID + } + + length := base128IntLength(oid[0]*40 + oid[1]) + for _, v := range oid[2:] { + length += base128IntLength(v) + } + + der := make([]byte, 0, length) + der = appendBase128Int(der, oid[0]*40+oid[1]) + for _, v := range oid[2:] { + der = appendBase128Int(der, v) + } + return OID{der}, nil +} + +func base128IntLength(n uint64) int { + if n == 0 { + return 1 + } + return (bits.Len64(n) + 6) / 7 +} + +func appendBase128Int(dst []byte, n uint64) []byte { + for i := base128IntLength(n) - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +func base128BigIntLength(n *big.Int) int { + if n.Cmp(big.NewInt(0)) == 0 { + return 1 + } + return (n.BitLen() + 6) / 7 +} + +func appendBase128BigInt(dst []byte, n *big.Int) []byte { + if n.Cmp(big.NewInt(0)) == 0 { + return append(dst, 0) + } + + for i := base128BigIntLength(n) - 1; i >= 0; i-- { + o := byte(big.NewInt(0).Rsh(n, uint(i)*7).Bits()[0]) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +// AppendText implements [encoding.TextAppender] +func (o OID) AppendText(b []byte) ([]byte, error) { + return append(b, o.String()...), nil +} + +// MarshalText implements [encoding.TextMarshaler] +func (o OID) MarshalText() ([]byte, error) { + return o.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] +func (o *OID) UnmarshalText(text []byte) error { + return o.unmarshalOIDText(string(text)) +} + +// cutString slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func cutString(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (o *OID) unmarshalOIDText(oid string) error { + // (*big.Int).SetString allows +/- signs, but we don't want + // to allow them in the string representation of Object Identifier, so + // reject such encodings. + for _, c := range oid { + isDigit := c >= '0' && c <= '9' + if !isDigit && c != '.' { + return errInvalidOID + } + } + + var ( + firstNum string + secondNum string + ) + + var nextComponentExists bool + firstNum, oid, nextComponentExists = cutString(oid, ".") + if !nextComponentExists { + return errInvalidOID + } + secondNum, oid, nextComponentExists = cutString(oid, ".") + + var ( + first = big.NewInt(0) + second = big.NewInt(0) + ) + + if _, ok := first.SetString(firstNum, 10); !ok { + return errInvalidOID + } + if _, ok := second.SetString(secondNum, 10); !ok { + return errInvalidOID + } + + if first.Cmp(big.NewInt(2)) > 0 || (first.Cmp(big.NewInt(2)) < 0 && second.Cmp(big.NewInt(40)) >= 0) { + return errInvalidOID + } + + firstComponent := first.Mul(first, big.NewInt(40)) + firstComponent.Add(firstComponent, second) + + der := appendBase128BigInt(make([]byte, 0, 32), firstComponent) + + for nextComponentExists { + var strNum string + strNum, oid, nextComponentExists = cutString(oid, ".") + b, ok := big.NewInt(0).SetString(strNum, 10) + if !ok { + return errInvalidOID + } + der = appendBase128BigInt(der, b) + } + + o.der = der + return nil +} + +// AppendBinary implements [encoding.BinaryAppender] +func (o OID) AppendBinary(b []byte) ([]byte, error) { + return append(b, o.der...), nil +} + +// MarshalBinary implements [encoding.BinaryMarshaler] +func (o OID) MarshalBinary() ([]byte, error) { + return o.AppendBinary(nil) +} + +// cloneBytes returns a copy of b[:len(b)]. +// The result may have additional unused capacity. +// Clone(nil) returns nil. +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } + return append([]byte{}, b...) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler] +func (o *OID) UnmarshalBinary(b []byte) error { + oid, ok := newOIDFromDER(cloneBytes(b)) + if !ok { + return errInvalidOID + } + *o = oid + return nil +} + +// Equal returns true when oid and other represents the same Object Identifier. +func (oid OID) Equal(other OID) bool { + // There is only one possible DER encoding of + // each unique Object Identifier. + return bytes.Equal(oid.der, other.der) +} + +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, failed bool) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + failed = true + return + } + ret64 <<= 7 + b := bytes[offset] + // integers should be minimally encoded, so the leading octet should + // never be 0x80 + if shifted == 0 && b == 0x80 { + failed = true + return + } + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + failed = true + } + return + } + } + failed = true + return +} + +// EqualASN1OID returns whether an OID equals an asn1.ObjectIdentifier. If +// asn1.ObjectIdentifier cannot represent the OID specified by oid, because +// a component of OID requires more than 31 bits, it returns false. +func (oid OID) EqualASN1OID(other asn1.ObjectIdentifier) bool { + if len(other) < 2 { + return false + } + v, offset, failed := parseBase128Int(oid.der, 0) + if failed { + // This should never happen, since we've already parsed the OID, + // but just in case. + return false + } + if v < 80 { + a, b := v/40, v%40 + if other[0] != a || other[1] != b { + return false + } + } else { + a, b := 2, v-80 + if other[0] != a || other[1] != b { + return false + } + } + + i := 2 + for ; offset < len(oid.der); i++ { + v, offset, failed = parseBase128Int(oid.der, offset) + if failed { + // Again, shouldn't happen, since we've already parsed + // the OID, but better safe than sorry. + return false + } + if i >= len(other) || v != other[i] { + return false + } + } + + return i == len(other) +} + +// Strings returns the string representation of the Object Identifier. +func (oid OID) String() string { + var b strings.Builder + b.Grow(32) + const ( + valSize = 64 // size in bits of val. + bitsPerByte = 7 + maxValSafeShift = (1 << (valSize - bitsPerByte)) - 1 + ) + var ( + start = 0 + val = uint64(0) + numBuf = make([]byte, 0, 21) + bigVal *big.Int + overflow bool + ) + for i, v := range oid.der { + curVal := v & 0x7F + valEnd := v&0x80 == 0 + if valEnd { + if start != 0 { + b.WriteByte('.') + } + } + if !overflow && val > maxValSafeShift { + if bigVal == nil { + bigVal = new(big.Int) + } + bigVal = bigVal.SetUint64(val) + overflow = true + } + if overflow { + bigVal = bigVal.Lsh(bigVal, bitsPerByte).Or(bigVal, big.NewInt(int64(curVal))) + if valEnd { + if start == 0 { + b.WriteString("2.") + bigVal = bigVal.Sub(bigVal, big.NewInt(80)) + } + numBuf = bigVal.Append(numBuf, 10) + b.Write(numBuf) + numBuf = numBuf[:0] + val = 0 + start = i + 1 + overflow = false + } + continue + } + val <<= bitsPerByte + val |= uint64(curVal) + if valEnd { + if start == 0 { + if val < 80 { + b.Write(strconv.AppendUint(numBuf, val/40, 10)) + b.WriteByte('.') + b.Write(strconv.AppendUint(numBuf, val%40, 10)) + } else { + b.WriteString("2.") + b.Write(strconv.AppendUint(numBuf, val-80, 10)) + } + } else { + b.Write(strconv.AppendUint(numBuf, val, 10)) + } + val = 0 + start = i + 1 + } + } + return b.String() +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go new file mode 100644 index 000000000..ec57e79f6 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go @@ -0,0 +1,1027 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + stdx509 "crypto/x509" +) + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +func ParseCertificates(der []byte) ([]*stdx509.Certificate, error) { + var certs []*stdx509.Certificate + for len(der) > 0 { + cert, err := parseCertificate(der) + if err != nil { + return nil, err + } + certs = append(certs, cert) + der = der[len(cert.Raw):] + } + return certs, nil +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// This is a simplified version of encoding/asn1.isPrintable. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + b == '&' +} + +// parseASN1String parses the ASN.1 string types T61String, PrintableString, +// UTF8String, BMPString, IA5String, and NumericString. This is mostly copied +// from the respective encoding/asn1.parse... methods, rather than just +// increasing the API surface of that package. +func parseASN1String(tag cryptobyte_asn1.Tag, value []byte) (string, error) { + switch tag { + case cryptobyte_asn1.T61String: + return string(value), nil + case cryptobyte_asn1.PrintableString: + for _, b := range value { + if !isPrintable(b) { + return "", errors.New("invalid PrintableString") + } + } + return string(value), nil + case cryptobyte_asn1.UTF8String: + if !utf8.Valid(value) { + return "", errors.New("invalid UTF-8 string") + } + return string(value), nil + case cryptobyte_asn1.Tag(asn1.TagBMPString): + if len(value)%2 != 0 { + return "", errors.New("invalid BMPString") + } + + // Strip terminator if present. + if l := len(value); l >= 2 && value[l-1] == 0 && value[l-2] == 0 { + value = value[:l-2] + } + + s := make([]uint16, 0, len(value)/2) + for len(value) > 0 { + s = append(s, uint16(value[0])<<8+uint16(value[1])) + value = value[2:] + } + + return string(utf16.Decode(s)), nil + case cryptobyte_asn1.IA5String: + s := string(value) + if isIA5String(s) != nil { + return "", errors.New("invalid IA5String") + } + return s, nil + case cryptobyte_asn1.Tag(asn1.TagNumericString): + for _, b := range value { + if !('0' <= b && b <= '9' || b == ' ') { + return "", errors.New("invalid NumericString") + } + } + return string(value), nil + } + return "", fmt.Errorf("unsupported string type: %v", tag) +} + +// parseName parses a DER encoded Name as defined in RFC 5280. We may +// want to export this function in the future for use in crypto/tls. +func parseName(raw cryptobyte.String) (*pkix.RDNSequence, error) { + if !raw.ReadASN1(&raw, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence") + } + + var rdnSeq pkix.RDNSequence + for !raw.Empty() { + var rdnSet pkix.RelativeDistinguishedNameSET + var set cryptobyte.String + if !raw.ReadASN1(&set, cryptobyte_asn1.SET) { + return nil, errors.New("x509: invalid RDNSequence") + } + for !set.Empty() { + var atav cryptobyte.String + if !set.ReadASN1(&atav, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute") + } + var attr pkix.AttributeTypeAndValue + if !atav.ReadASN1ObjectIdentifier(&attr.Type) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute type") + } + var rawValue cryptobyte.String + var valueTag cryptobyte_asn1.Tag + if !atav.ReadAnyASN1(&rawValue, &valueTag) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute value") + } + var err error + attr.Value, err = parseASN1String(valueTag, rawValue) + if err != nil { + return nil, fmt.Errorf("x509: invalid RDNSequence: invalid attribute value: %s", err) + } + rdnSet = append(rdnSet, attr) + } + + rdnSeq = append(rdnSeq, rdnSet) + } + + return &rdnSeq, nil +} + +func parseAI(der cryptobyte.String) (pkix.AlgorithmIdentifier, error) { + ai := pkix.AlgorithmIdentifier{} + if !der.ReadASN1ObjectIdentifier(&ai.Algorithm) { + return ai, errors.New("x509: malformed OID") + } + if der.Empty() { + return ai, nil + } + var params cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1Element(¶ms, &tag) { + return ai, errors.New("x509: malformed parameters") + } + ai.Parameters.Tag = int(tag) + ai.Parameters.FullBytes = params + return ai, nil +} + +func parseTime(der *cryptobyte.String) (time.Time, error) { + var t time.Time + switch { + case der.PeekASN1Tag(cryptobyte_asn1.UTCTime): + if !der.ReadASN1UTCTime(&t) { + return t, errors.New("x509: malformed UTCTime") + } + case der.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime): + if !der.ReadASN1GeneralizedTime(&t) { + return t, errors.New("x509: malformed GeneralizedTime") + } + default: + return t, errors.New("x509: unsupported time format") + } + return t, nil +} + +func parseValidity(der cryptobyte.String) (time.Time, time.Time, error) { + notBefore, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + notAfter, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + + return notBefore, notAfter, nil +} + +func parseExtension(der cryptobyte.String) (pkix.Extension, error) { + var ext pkix.Extension + if !der.ReadASN1ObjectIdentifier(&ext.Id) { + return ext, errors.New("x509: malformed extension OID field") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&ext.Critical) { + return ext, errors.New("x509: malformed extension critical field") + } + } + var val cryptobyte.String + if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) { + return ext, errors.New("x509: malformed extension value field") + } + ext.Value = val + return ext, nil +} + +func parsePublicKey(keyData *publicKeyInfo) (interface{}, error) { + oid := keyData.Algorithm.Algorithm + params := keyData.Algorithm.Parameters + der := cryptobyte.String(keyData.PublicKey.RightAlign()) + switch { + case oid.Equal(oidPublicKeyRSA): + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if !bytes.Equal(params.FullBytes, asn1.NullBytes) { + return nil, errors.New("x509: RSA key missing NULL parameters") + } + + p := &pkcs1PublicKey{N: new(big.Int)} + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RSA public key") + } + if !der.ReadASN1Integer(p.N) { + return nil, errors.New("x509: invalid RSA modulus") + } + if !der.ReadASN1Integer(&p.E) { + return nil, errors.New("x509: invalid RSA public exponent") + } + + if p.N.Sign() <= 0 { + return nil, errors.New("x509: RSA modulus is not a positive number") + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case oid.Equal(oidPublicKeyECDSA): + paramsDer := cryptobyte.String(params.FullBytes) + namedCurveOID := new(asn1.ObjectIdentifier) + if !paramsDer.ReadASN1ObjectIdentifier(namedCurveOID) { + return nil, errors.New("x509: invalid ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID) + if namedCurve == nil { + return nil, errors.New("x509: unsupported elliptic curve") + } + x, y := elliptic.Unmarshal(namedCurve, der) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case oid.Equal(oidPublicKeyEd25519): + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(params.FullBytes) != 0 { + return nil, errors.New("x509: Ed25519 key encoded with illegal parameters") + } + if len(der) != ed25519.PublicKeySize { + return nil, errors.New("x509: wrong Ed25519 public key size") + } + return ed25519.PublicKey(der), nil + // case oid.Equal(oidPublicKeyX25519): + // // RFC 8410, Section 3 + // // > For all of the OIDs, the parameters MUST be absent. + // if len(params.FullBytes) != 0 { + // return nil, errors.New("x509: X25519 key encoded with illegal parameters") + // } + // return ecdh.X25519().NewPublicKey(der) + case oid.Equal(oidPublicKeyDSA): + y := new(big.Int) + if !der.ReadASN1Integer(y) { + return nil, errors.New("x509: invalid DSA public key") + } + pub := &dsa.PublicKey{ + Y: y, + Parameters: dsa.Parameters{ + P: new(big.Int), + Q: new(big.Int), + G: new(big.Int), + }, + } + paramsDer := cryptobyte.String(params.FullBytes) + if !paramsDer.ReadASN1(¶msDer, cryptobyte_asn1.SEQUENCE) || + !paramsDer.ReadASN1Integer(pub.Parameters.P) || + !paramsDer.ReadASN1Integer(pub.Parameters.Q) || + !paramsDer.ReadASN1Integer(pub.Parameters.G) { + return nil, errors.New("x509: invalid DSA parameters") + } + if pub.Y.Sign() <= 0 || pub.Parameters.P.Sign() <= 0 || + pub.Parameters.Q.Sign() <= 0 || pub.Parameters.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + return pub, nil + default: + return nil, errors.New("x509: unknown public key algorithm") + } +} + +func parseKeyUsageExtension(der cryptobyte.String) (stdx509.KeyUsage, error) { + var usageBits asn1.BitString + if !der.ReadASN1BitString(&usageBits) { + return 0, errors.New("x509: invalid key usage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + return stdx509.KeyUsage(usage), nil +} + +func parseBasicConstraintsExtension(der cryptobyte.String) (bool, int, error) { + var isCA bool + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return false, 0, errors.New("x509: invalid basic constraints") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&isCA) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + maxPathLen := -1 + if der.PeekASN1Tag(cryptobyte_asn1.INTEGER) { + if !der.ReadASN1Integer(&maxPathLen) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + return isCA, maxPathLen, nil +} + +func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error) error { + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid subject alternative names") + } + for !der.Empty() { + var san cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1(&san, &tag) { + return errors.New("x509: invalid subject alternative name") + } + if err := callback(int(tag^0x80), san); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(der, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + email := string(data) + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } + emailAddresses = append(emailAddresses, email) + case nameTypeDNS: + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } + dnsNames = append(dnsNames, string(name)) + case nameTypeURI: + uriStr := string(data) + if err := isIA5String(uriStr); err != nil { + return errors.New("x509: SAN uniformResourceIdentifier is malformed") + } + uri, err := url.Parse(uriStr) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))) + } + } + + return nil + }) + + return +} + +func parseAuthorityKeyIdentifier(e pkix.Extension) ([]byte, error) { + // RFC 5280, Section 4.2.1.1 + // if e.Critical { + // // Conforming CAs MUST mark this extension as non-critical + // return nil, errors.New("x509: authority key identifier incorrectly marked critical") + // } + val := cryptobyte.String(e.Value) + var akid cryptobyte.String + if !val.ReadASN1(&akid, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid authority key identifier") + } + if akid.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) { + if !akid.ReadASN1(&akid, cryptobyte_asn1.Tag(0).ContextSpecific()) { + return nil, errors.New("x509: invalid authority key identifier") + } + return akid, nil + } + return nil, nil +} + +func parseExtKeyUsageExtension(der cryptobyte.String) ([]stdx509.ExtKeyUsage, []asn1.ObjectIdentifier, error) { + var extKeyUsages []stdx509.ExtKeyUsage + var unknownUsages []asn1.ObjectIdentifier + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + for !der.Empty() { + var eku asn1.ObjectIdentifier + if !der.ReadASN1ObjectIdentifier(&eku) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + if extKeyUsage, ok := extKeyUsageFromOID(eku); ok { + extKeyUsages = append(extKeyUsages, stdx509.ExtKeyUsage(extKeyUsage)) + } else { + unknownUsages = append(unknownUsages, eku) + } + } + return extKeyUsages, unknownUsages, nil +} + +// func parseCertificatePoliciesExtension(der cryptobyte.String) ([]OID, error) { +// var oids []OID +// if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// for !der.Empty() { +// var cp cryptobyte.String +// var OIDBytes cryptobyte.String +// if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) || !cp.ReadASN1(&OIDBytes, cryptobyte_asn1.OBJECT_IDENTIFIER) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oid, ok := newOIDFromDER(OIDBytes) +// if !ok { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oids = append(oids, oid) +// } +// return oids, nil +// } + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *stdx509.Certificate, e pkix.Extension) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func processExtensions(out *stdx509.Certificate) error { + var err error + for _, e := range out.Extensions { + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 { + switch e.Id[3] { + case 15: + out.KeyUsage, err = parseKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 19: + out.IsCA, out.MaxPathLen, err = parseBasicConstraintsExtension(e.Value) + if err != nil { + return err + } + out.BasicConstraintsValid = true + out.MaxPathLenZero = out.MaxPathLen == 0 + case 17: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value) + if err != nil { + return err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case 30: + unhandled, err = parseNameConstraintsExtension(out, e) + if err != nil { + return err + } + + case 31: + // RFC 5280, 4.2.1.13 + + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution points") + } + for !val.Empty() { + var dpDER cryptobyte.String + if !val.ReadASN1(&dpDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution point") + } + var dpNameDER cryptobyte.String + var dpNamePresent bool + if !dpDER.ReadOptionalASN1(&dpNameDER, &dpNamePresent, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + if !dpNamePresent { + continue + } + if !dpNameDER.ReadASN1(&dpNameDER, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + for !dpNameDER.Empty() { + if !dpNameDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + break + } + var uri cryptobyte.String + if !dpNameDER.ReadASN1(&uri, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(uri)) + } + } + + case 35: + out.AuthorityKeyId, err = parseAuthorityKeyIdentifier(e) + if err != nil { + return err + } + case 37: + out.ExtKeyUsage, out.UnknownExtKeyUsage, err = parseExtKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 14: + // RFC 5280, 4.2.1.2 + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: subject key identifier incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + var skid cryptobyte.String + if !val.ReadASN1(&skid, cryptobyte_asn1.OCTET_STRING) { + return errors.New("x509: invalid subject key identifier") + } + out.SubjectKeyId = skid + // case 32: + // out.Policies, err = parseCertificatePoliciesExtension(e.Value) + // if err != nil { + // return err + // } + // out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, 0, len(out.Policies)) + // for _, oid := range out.Policies { + // if oid, ok := oid.toASN1OID(); ok { + // out.PolicyIdentifiers = append(out.PolicyIdentifiers, oid) + // } + // } + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: authority info access incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + for !val.Empty() { + var aiaDER cryptobyte.String + if !val.ReadASN1(&aiaDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + var method asn1.ObjectIdentifier + if !aiaDER.ReadASN1ObjectIdentifier(&method) { + return errors.New("x509: invalid authority info access") + } + if !aiaDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + continue + } + if !aiaDER.ReadASN1(&aiaDER, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid authority info access") + } + switch { + case method.Equal(oidAuthorityInfoAccessOcsp): + out.OCSPServer = append(out.OCSPServer, string(aiaDER)) + case method.Equal(oidAuthorityInfoAccessIssuers): + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(aiaDER)) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + + return nil +} + +var x509negativeserial = legacyGodebugSetting(0) // replaces godebug.New("x509negativeserial") + +func parseCertificate(der []byte) (*stdx509.Certificate, error) { + cert := &stdx509.Certificate{} + + input := cryptobyte.String(der) + // we read the SEQUENCE including length and tag bytes so that + // we can populate Certificate.Raw, before unwrapping the + // SEQUENCE so it can be operated on + if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + cert.Raw = input + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + + var tbs cryptobyte.String + // do the same trick again as above to extract the raw + // bytes for Certificate.RawTBSCertificate + if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + cert.RawTBSCertificate = tbs + if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + + if !tbs.ReadOptionalASN1Integer(&cert.Version, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) { + return nil, errors.New("x509: malformed version") + } + if cert.Version < 0 { + return nil, errors.New("x509: malformed version") + } + // for backwards compat reasons Version is one-indexed, + // rather than zero-indexed as defined in 5280 + cert.Version++ + if cert.Version > 3 { + return nil, errors.New("x509: invalid version") + } + + serial := new(big.Int) + if !tbs.ReadASN1Integer(serial) { + return nil, errors.New("x509: malformed serial number") + } + if serial.Sign() == -1 { + if x509negativeserial.Value() != "1" { + return nil, errors.New("x509: negative serial number") + } else { + x509negativeserial.IncNonDefault() + } + } + cert.SerialNumber = serial + + var sigAISeq cryptobyte.String + if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed signature algorithm identifier") + } + // Before parsing the inner algorithm identifier, extract + // the outer algorithm identifier and make sure that they + // match. + var outerSigAISeq cryptobyte.String + if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed algorithm identifier") + } + if !bytes.Equal(outerSigAISeq, sigAISeq) { + return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match") + } + sigAI, err := parseAI(sigAISeq) + if err != nil { + return nil, err + } + cert.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI) + + var issuerSeq cryptobyte.String + if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawIssuer = issuerSeq + issuerRDNs, err := parseName(issuerSeq) + if err != nil { + return nil, err + } + cert.Issuer.FillFromRDNSequence(issuerRDNs) + + var validity cryptobyte.String + if !tbs.ReadASN1(&validity, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed validity") + } + cert.NotBefore, cert.NotAfter, err = parseValidity(validity) + if err != nil { + return nil, err + } + + var subjectSeq cryptobyte.String + if !tbs.ReadASN1Element(&subjectSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawSubject = subjectSeq + subjectRDNs, err := parseName(subjectSeq) + if err != nil { + return nil, err + } + cert.Subject.FillFromRDNSequence(subjectRDNs) + + var spki cryptobyte.String + if !tbs.ReadASN1Element(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + cert.RawSubjectPublicKeyInfo = spki + if !spki.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + var pkAISeq cryptobyte.String + if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed public key algorithm identifier") + } + pkAI, err := parseAI(pkAISeq) + if err != nil { + return nil, err + } + cert.PublicKeyAlgorithm = getPublicKeyAlgorithmFromOID(pkAI.Algorithm) + var spk asn1.BitString + if !spki.ReadASN1BitString(&spk) { + return nil, errors.New("x509: malformed subjectPublicKey") + } + if cert.PublicKeyAlgorithm != stdx509.UnknownPublicKeyAlgorithm { + cert.PublicKey, err = parsePublicKey(&publicKeyInfo{ + Algorithm: pkAI, + PublicKey: spk, + }) + if err != nil { + return nil, err + } + } + + if cert.Version > 1 { + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(1).ContextSpecific()) { + return nil, errors.New("x509: malformed issuerUniqueID") + } + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(2).ContextSpecific()) { + return nil, errors.New("x509: malformed subjectUniqueID") + } + if cert.Version == 3 { + var extensions cryptobyte.String + var present bool + if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) { + return nil, errors.New("x509: malformed extensions") + } + if present { + seenExts := make(map[string]bool) + if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extensions") + } + for !extensions.Empty() { + var extension cryptobyte.String + if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extension") + } + ext, err := parseExtension(extension) + if err != nil { + return nil, err + } + oidStr := ext.Id.String() + if seenExts[oidStr] { + return nil, fmt.Errorf("x509: certificate contains duplicate extension with OID %q", oidStr) + } + seenExts[oidStr] = true + cert.Extensions = append(cert.Extensions, ext) + } + err = processExtensions(cert) + if err != nil { + return nil, err + } + } + } + } + + var signature asn1.BitString + if !input.ReadASN1BitString(&signature) { + return nil, errors.New("x509: malformed signature") + } + cert.Signature = signature.RightAlign() + + return cert, nil +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go new file mode 100644 index 000000000..da3c38a4e --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "math/big" +) + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go new file mode 100644 index 000000000..901e3ba85 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go @@ -0,0 +1,193 @@ +package legacyx509 + +import ( + "bytes" + "strings" +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:]) + domain = domain[:i] + if i == 0 { // domain == "" + // domain is prefixed with an empty label, append an empty + // string to reverseLabels to indicate this. + reverseLabels = append(reverseLabels, "") + } + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go new file mode 100644 index 000000000..a4500bfb1 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go @@ -0,0 +1,488 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 implements a subset of the X.509 standard. +// +// It allows parsing and generating certificates, certificate signing +// requests, certificate revocation lists, and encoded public and private keys. +// It provides a certificate verifier, complete with a chain builder. +// +// The package targets the X.509 technical profile defined by the IETF (RFC +// 2459/3280/5280), and as further restricted by the CA/Browser Forum Baseline +// Requirements. There is minimal support for features outside of these +// profiles, as the primary goal of the package is to provide compatibility +// with the publicly trusted TLS certificate ecosystem and its policies and +// constraints. +// +// On macOS and Windows, certificate verification is handled by system APIs, but +// the package aims to apply consistent validation rules across operating +// systems. +package legacyx509 + +import ( + "bytes" + "crypto" + "crypto/elliptic" + stdx509 "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "strconv" + "unicode" + + // Explicitly import these for their crypto.RegisterHash init side-effects. + // Keep these as blank imports, even if they're imported above. + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +type SignatureAlgorithm int + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + + MD2WithRSA // Unsupported. + MD5WithRSA // Only supported for signing, not verification. + SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 // Unsupported. + DSAWithSHA256 // Unsupported. + ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS + PureEd25519 +) + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +type PublicKeyAlgorithm int + +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA // Only supported for parsing. + ECDSA + Ed25519 +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + Ed25519: "Ed25519", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } +// +// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers +// +// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } +var ( + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + params asn1.RawValue + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash + isRSAPSS bool +}{ + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, asn1.NullRawValue, RSA, crypto.MD5, false}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, asn1.NullRawValue, RSA, crypto.SHA256, false}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, asn1.NullRawValue, RSA, crypto.SHA384, false}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, asn1.NullRawValue, RSA, crypto.SHA512, false}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, pssParametersSHA256, RSA, crypto.SHA256, true}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, pssParametersSHA384, RSA, crypto.SHA384, true}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, pssParametersSHA512, RSA, crypto.SHA512, true}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, emptyRawValue, DSA, crypto.SHA1, false}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, emptyRawValue, DSA, crypto.SHA256, false}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, emptyRawValue, ECDSA, crypto.SHA1, false}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, emptyRawValue, ECDSA, crypto.SHA256, false}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, emptyRawValue, ECDSA, crypto.SHA384, false}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, emptyRawValue, ECDSA, crypto.SHA512, false}, + {PureEd25519, "Ed25519", oidSignatureEd25519, emptyRawValue, Ed25519, crypto.Hash(0) /* no pre-hashing */, false}, +} + +var emptyRawValue = asn1.RawValue{} + +// DER encoded RSA PSS parameters for the +// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3. +// The parameters contain the following values: +// - hashAlgorithm contains the associated hash identifier with NULL parameters +// - maskGenAlgorithm always contains the default mgf1SHA1 identifier +// - saltLength contains the length of the associated hash +// - trailerField always contains the default trailerFieldBC value +var ( + pssParametersSHA256 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}} + pssParametersSHA384 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}} + pssParametersSHA512 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}} +) + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) stdx509.SignatureAlgorithm { + if ai.Algorithm.Equal(oidSignatureEd25519) { + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(ai.Parameters.FullBytes) != 0 { + return stdx509.UnknownSignatureAlgorithm + } + } + + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return stdx509.SignatureAlgorithm(details.algo) + } + } + return stdx509.UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return stdx509.UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return stdx509.SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return stdx509.SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return stdx509.SHA512WithRSAPSS + } + + return stdx509.UnknownSignatureAlgorithm +} + +var ( + // RFC 3279, 2.3 Public Key Algorithms + // + // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // rsadsi(113549) pkcs(1) 1 } + // + // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } + // + // id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // x9-57(10040) x9cm(4) 1 } + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters + // + // id-ecPublicKey OBJECT IDENTIFIER ::= { + // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + // RFC 8410, Section 3 + // + // id-X25519 OBJECT IDENTIFIER ::= { 1 3 101 110 } + // id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } + oidPublicKeyX25519 = asn1.ObjectIdentifier{1, 3, 101, 110} + oidPublicKeyEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} +) + +// getPublicKeyAlgorithmFromOID returns the exposed PublicKeyAlgorithm +// identifier for public key types supported in certificates and CSRs. Marshal +// and Parse functions may support a different set of public key types. +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) stdx509.PublicKeyAlgorithm { + switch { + case oid.Equal(oidPublicKeyRSA): + return stdx509.RSA + case oid.Equal(oidPublicKeyDSA): + return stdx509.DSA + case oid.Equal(oidPublicKeyECDSA): + return stdx509.ECDSA + case oid.Equal(oidPublicKeyEd25519): + return stdx509.Ed25519 + } + return stdx509.UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// NB: secp256r1 is equivalent to prime256v1 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { + switch { + case oid.Equal(oidNamedCurveP224): + return elliptic.P224() + case oid.Equal(oidNamedCurveP256): + return elliptic.P256() + case oid.Equal(oidNamedCurveP384): + return elliptic.P384() + case oid.Equal(oidNamedCurveP521): + return elliptic.P521() + } + return nil +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +var ( + oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} +) + +var ( + oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} +) + +func isIA5String(s string) error { + for _, r := range s { + // Per RFC5280 "IA5String is limited to the set of ASCII characters" + if r > unicode.MaxASCII { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/pkcs7.go b/tools/vendor/github.com/smallstep/pkcs7/pkcs7.go new file mode 100644 index 000000000..dd5b18380 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/pkcs7.go @@ -0,0 +1,353 @@ +// Package pkcs7 implements parsing and generation of some PKCS#7 structures. +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "io" + "sort" + "sync" + + _ "crypto/sha1" // for crypto.SHA1 + + legacyx509 "github.com/smallstep/pkcs7/internal/legacy/x509" +) + +// PKCS7 Represents a PKCS7 structure +type PKCS7 struct { + Content []byte + Certificates []*x509.Certificate + CRLs []pkix.CertificateList + Signers []signerInfo + Hasher Hasher + raw interface{} +} + +// Hasher is an interface defining a custom hash calculator. +type Hasher interface { + Hash(crypto.Hash, io.Reader) ([]byte, error) +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"explicit,optional,tag:0"` +} + +// ErrUnsupportedContentType is returned when a PKCS7 content type is not supported. +// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), +// and Enveloped Data are supported (1.2.840.113549.1.7.3) +var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") + +type unsignedData []byte + +var ( + // Signed Data OIDs + OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} + OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} + OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} + OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} + OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} + OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} + OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} + + // Digest Algorithms + OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + OIDDigestAlgorithmSHA224 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 4} + + OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + + OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + // Signature Algorithms + OIDEncryptionAlgorithmRSAMD5 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.4 + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} // ditto + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} // ditto + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} // ditto + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} // ditto + OIDEncryptionAlgorithmRSASHA224 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 14} // ditto + + OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + // Asymmetric Encryption Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.2 + OIDEncryptionAlgorithmRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.1 + + // Symmetric Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.1 + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.2 + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} // see https://www.rfc-editor.org/rfc/rfc3565.html#section-4.1 + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.5 + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 +) + +func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { + switch { + case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), + oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), + oid.Equal(OIDEncryptionAlgorithmRSA): + return crypto.SHA1, nil + case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): + return crypto.SHA256, nil + case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): + return crypto.SHA384, nil + case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): + return crypto.SHA512, nil + } + return crypto.Hash(0), ErrUnsupportedAlgorithm +} + +// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm +// and returns the corresponding OID digest algorithm +func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { + switch digestAlg { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + return OIDDigestAlgorithmSHA1, nil + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + return OIDDigestAlgorithmSHA256, nil + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + return OIDDigestAlgorithmSHA384, nil + case x509.SHA512WithRSA, x509.ECDSAWithSHA512: + return OIDDigestAlgorithmSHA512, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") +} + +// getOIDForEncryptionAlgorithm takes the public or private key type of the signer and +// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm +func getOIDForEncryptionAlgorithm(pkey interface{}, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + switch k := pkey.(type) { + case *rsa.PrivateKey, *rsa.PublicKey: + switch { + default: + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDEncryptionAlgorithmRSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDEncryptionAlgorithmRSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDEncryptionAlgorithmRSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDEncryptionAlgorithmRSASHA512, nil + } + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + switch { + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDDigestAlgorithmECDSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDDigestAlgorithmECDSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDDigestAlgorithmECDSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDDigestAlgorithmECDSASHA512, nil + } + case *dsa.PrivateKey, *dsa.PublicKey: + return OIDDigestAlgorithmDSA, nil + case crypto.Signer: + // This generic case is here to cover types from other packages. It + // was specifically added to handle the private keyRSA type in the + // github.com/go-piv/piv-go/piv package. + return getOIDForEncryptionAlgorithm(k.Public(), OIDDigestAlg) + } + return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) + +} + +// Parse decodes a DER encoded PKCS7 package +func Parse(data []byte) (p7 *PKCS7, err error) { + if len(data) == 0 { + return nil, errors.New("pkcs7: input data is empty") + } + var info contentInfo + der, err := ber2der(data) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(der, &info) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + // fmt.Printf("--> Content Type: %s", info.ContentType) + switch { + case info.ContentType.Equal(OIDSignedData): + return parseSignedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEnvelopedData): + return parseEnvelopedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEncryptedData): + return parseEncryptedData(info.Content.Bytes) + } + return nil, ErrUnsupportedContentType +} + +func parseEnvelopedData(data []byte) (*PKCS7, error) { + var ed envelopedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func parseEncryptedData(data []byte) (*PKCS7, error) { + var ed encryptedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +// SetFallbackLegacyX509CertificateParserEnabled enables parsing certificates +// embedded in a PKCS7 message using the logic from crypto/x509 from before +// Go 1.23. Go 1.23 introduced a breaking change in case a certificate contains +// a critical authority key identifier, which is the correct thing to do based +// on RFC 5280, but it breaks Windows devices performing the Simple Certificate +// Enrolment Protocol (SCEP), as the certificates embedded in those requests +// apparently have authority key identifier extensions marked critical. +// +// See https://go-review.googlesource.com/c/go/+/562341 for the change in the +// Go source. +// +// When [SetFallbackLegacyX509CertificateParserEnabled] is called with true, it +// enables parsing using the legacy crypto/x509 certificate parser. It'll first +// try to parse the certificates using the regular Go crypto/x509 package, but +// if it fails on the above case, it'll retry parsing the certificates using a +// copy of the crypto/x509 package based on Go 1.23, but skips checking the +// authority key identifier extension being critical or not. +func SetFallbackLegacyX509CertificateParserEnabled(v bool) { + legacyX509CertificateParser.Lock() + legacyX509CertificateParser.enabled = v + legacyX509CertificateParser.Unlock() +} + +var legacyX509CertificateParser struct { + sync.RWMutex + enabled bool +} + +func isLegacyX509ParserEnabled() bool { + legacyX509CertificateParser.RLock() + defer legacyX509CertificateParser.RUnlock() + return legacyX509CertificateParser.enabled +} + +func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { + if len(raw.Raw) == 0 { + return nil, nil + } + + var val asn1.RawValue + if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { + return nil, err + } + + certificates, err := x509.ParseCertificates(val.Bytes) + if err != nil && err.Error() == "x509: authority key identifier incorrectly marked critical" { + if isLegacyX509ParserEnabled() { + certificates, err = legacyx509.ParseCertificates(val.Bytes) + } + } + + return certificates, err +} + +func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { + return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) +} + +// Attribute represents a key value pair attribute. Value must be marshalable byte +// `encoding/asn1` +type Attribute struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +type attributes struct { + types []asn1.ObjectIdentifier + values []interface{} +} + +// Add adds the attribute, maintaining insertion order +func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { + attrs.types = append(attrs.types, attrType) + attrs.values = append(attrs.values, value) +} + +type sortableAttribute struct { + SortKey []byte + Attribute attribute +} + +type attributeSet []sortableAttribute + +func (sa attributeSet) Len() int { + return len(sa) +} + +func (sa attributeSet) Less(i, j int) bool { + return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 +} + +func (sa attributeSet) Swap(i, j int) { + sa[i], sa[j] = sa[j], sa[i] +} + +func (sa attributeSet) Attributes() []attribute { + attrs := make([]attribute, len(sa)) + for i, attr := range sa { + attrs[i] = attr.Attribute + } + return attrs +} + +func (attrs *attributes) ForMarshalling() ([]attribute, error) { + sortables := make(attributeSet, len(attrs.types)) + for i := range sortables { + attrType := attrs.types[i] + attrValue := attrs.values[i] + asn1Value, err := asn1.Marshal(attrValue) + if err != nil { + return nil, err + } + attr := attribute{ + Type: attrType, + Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag + } + encoded, err := asn1.Marshal(attr) + if err != nil { + return nil, err + } + sortables[i] = sortableAttribute{ + SortKey: encoded, + Attribute: attr, + } + } + sort.Sort(sortables) + return sortables.Attributes(), nil +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/sign.go b/tools/vendor/github.com/smallstep/pkcs7/sign.go new file mode 100644 index 000000000..74ce50d80 --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/sign.go @@ -0,0 +1,474 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "sync" + "time" +) + +func init() { + defaultMessageDigestAlgorithm.oid = OIDDigestAlgorithmSHA1 +} + +var defaultMessageDigestAlgorithm struct { + sync.RWMutex + oid asn1.ObjectIdentifier +} + +// SetDefaultDigestAlgorithm sets the default digest algorithm +// to be used for signing operations on [SignedData]. +// +// This must be called before creating a new instance of [SignedData] +// using [NewSignedData]. +// +// When this function is not called, the default digest algorithm is SHA1. +func SetDefaultDigestAlgorithm(d asn1.ObjectIdentifier) error { + defaultMessageDigestAlgorithm.Lock() + defer defaultMessageDigestAlgorithm.Unlock() + + switch { + case d.Equal(OIDDigestAlgorithmSHA1), + d.Equal(OIDDigestAlgorithmSHA224), d.Equal(OIDDigestAlgorithmSHA256), + d.Equal(OIDDigestAlgorithmSHA384), d.Equal(OIDDigestAlgorithmSHA512), + d.Equal(OIDDigestAlgorithmDSA), d.Equal(OIDDigestAlgorithmDSASHA1), + d.Equal(OIDDigestAlgorithmECDSASHA1), d.Equal(OIDDigestAlgorithmECDSASHA256), + d.Equal(OIDDigestAlgorithmECDSASHA384), d.Equal(OIDDigestAlgorithmECDSASHA512): + break + default: + return fmt.Errorf("unsupported message digest algorithm %v", d) + } + + defaultMessageDigestAlgorithm.oid = d + + return nil +} + +func defaultMessageDigestAlgorithmOID() asn1.ObjectIdentifier { + defaultMessageDigestAlgorithm.RLock() + defer defaultMessageDigestAlgorithm.RUnlock() + + return defaultMessageDigestAlgorithm.oid +} + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: defaultMessageDigestAlgorithmOID()}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. +func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, pkey, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, pkey, hash) + if err != nil { + return err + } + signer := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + key, ok := pkey.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + } + if err != nil { + return err + } + signer := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + key, ok := pkey.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + return key.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/tools/vendor/github.com/smallstep/pkcs7/verify.go b/tools/vendor/github.com/smallstep/pkcs7/verify.go new file mode 100644 index 000000000..f9ad34bba --- /dev/null +++ b/tools/vendor/github.com/smallstep/pkcs7/verify.go @@ -0,0 +1,385 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/subtle" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "time" +) + +// Verify is a wrapper around VerifyWithChain() that initializes an empty +// trust store, effectively disabling certificate verification when validating +// a signature. +func (p7 *PKCS7) Verify() (err error) { + return p7.VerifyWithChain(nil) +} + +// VerifyWithChain checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignature(p7, signer, truststore); err != nil { + return err + } + } + return nil +} + +// VerifyWithChainAtTime checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to a root in the truststore at +// currentTime. It does not use the signing time authenticated +// attribute. +func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignatureAtTime(p7, signer, truststore, currentTime); err != nil { + return err + } + } + return nil +} + +// SigningTimeNotValidError is returned when the signing time attribute +// falls outside of the signer certificate validity. +type SigningTimeNotValidError struct { + SigningTime time.Time + NotBefore time.Time // NotBefore of signer + NotAfter time.Time // NotAfter of signer +} + +func (e *SigningTimeNotValidError) Error() string { + return fmt.Sprintf("pkcs7: signing time %q is outside of certificate validity %q to %q", + e.SigningTime.Format(time.RFC3339), + e.NotBefore.Format(time.RFC3339), + e.NotAfter.Format(time.RFC3339)) +} + +func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var ( + digest []byte + signingTime time.Time + ) + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + computed, err := calculateHash(p7.Hasher, hash, p7.Content) + if err != nil { + return err + } + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, currentTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + signingTime := time.Now().UTC() + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var digest []byte + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + computed, err := calculateHash(p7.Hasher, hash, p7.Content) + if err != nil { + return err + } + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +// GetOnlySigner returns an x509.Certificate for the first signer of the signed +// data payload. If there are more or less than one signer, nil is returned +func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { + if len(p7.Signers) != 1 { + return nil + } + signer := p7.Signers[0] + return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) +} + +// UnmarshalSignedAttribute decodes a single attribute from the signer info +func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { + sd, ok := p7.raw.(signedData) + if !ok { + return errors.New("pkcs7: payload is not signedData content") + } + if len(sd.SignerInfos) < 1 { + return errors.New("pkcs7: payload has no signers") + } + attributes := sd.SignerInfos[0].AuthenticatedAttributes + return unmarshalAttribute(attributes, attributeType, out) +} + +func parseSignedData(data []byte) (*PKCS7, error) { + var sd signedData + asn1.Unmarshal(data, &sd) + certs, err := sd.Certificates.Parse() + if err != nil { + return nil, err + } + // fmt.Printf("--> Signed Data Version %d\n", sd.Version) + + var compound asn1.RawValue + var content unsignedData + + // The Content.Bytes maybe empty on PKI responses. + if len(sd.ContentInfo.Content.Bytes) > 0 { + if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { + return nil, err + } + } + // Compound octet string + if compound.IsCompound { + if compound.Tag == 4 { + for len(compound.Bytes) > 0 { + var cdata asn1.RawValue + if _, err = asn1.Unmarshal(compound.Bytes, &cdata); err != nil { + return nil, err + } + content = append(content, cdata.Bytes...) + compound.Bytes = compound.Bytes[len(cdata.FullBytes):] + } + } else { + content = compound.Bytes + } + } else { + // assuming this is tag 04 + content = compound.Bytes + } + return &PKCS7{ + Content: content, + Certificates: certs, + CRLs: sd.CRLs, + Signers: sd.SignerInfos, + raw: sd}, nil +} + +// verifyCertChain takes an end-entity certs, a list of potential intermediates and a +// truststore, and built all potential chains between the EE and a trusted root. +// +// When verifying chains that may have expired, currentTime can be set to a past date +// to allow the verification to pass. If unset, currentTime is set to the current UTC time. +func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) { + intermediates := x509.NewCertPool() + for _, intermediate := range certs { + intermediates.AddCert(intermediate) + } + verifyOptions := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + CurrentTime: currentTime, + } + chains, err = ee.Verify(verifyOptions) + if err != nil { + return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + return +} + +// MessageDigestMismatchError is returned when the signer data digest does not +// match the computed digest for the contained content +type MessageDigestMismatchError struct { + ExpectedDigest []byte + ActualDigest []byte +} + +func (err *MessageDigestMismatchError) Error() string { + return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) +} + +func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { + switch { + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): + return x509.ECDSAWithSHA1, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): + return x509.ECDSAWithSHA256, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): + return x509.ECDSAWithSHA384, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): + return x509.ECDSAWithSHA512, nil + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1): + return x509.SHA1WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256): + return x509.SHA256WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384): + return x509.SHA384WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + return x509.SHA512WithRSA, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), + digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.DSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.DSAWithSHA256, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.ECDSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.ECDSAWithSHA256, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.ECDSAWithSHA384, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.ECDSAWithSHA512, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + default: + return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", + digestEncryption.Algorithm.String()) + } +} + +func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { + for _, cert := range certs { + if isCertMatchForIssuerAndSerial(cert, ias) { + return cert + } + } + return nil +} + +func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { + for _, attr := range attrs { + if attr.Type.Equal(attributeType) { + _, err := asn1.Unmarshal(attr.Value.Bytes, out) + return err + } + } + return errors.New("pkcs7: attribute type not in attributes") +} + +func calculateHash(hasher Hasher, hashFunc crypto.Hash, content []byte) (computed []byte, err error) { + if hasher != nil { + computed, err = hasher.Hash(hashFunc, bytes.NewReader(content)) + } else { + if !hashFunc.Available() { + return nil, fmt.Errorf("hash function %v not available", hashFunc) + } + + h := hashFunc.New() + _, _ = h.Write(content) + computed = h.Sum(nil) + } + + return +} diff --git a/tools/vendor/github.com/sourcegraph/conc/Makefile b/tools/vendor/github.com/sourcegraph/conc/Makefile new file mode 100644 index 000000000..3e0720a12 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/Makefile @@ -0,0 +1,24 @@ +.DEFAULT_GOAL := help + +GO_BIN ?= $(shell go env GOPATH)/bin + +.PHONY: help +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +$(GO_BIN)/golangci-lint: + @echo "==> Installing golangci-lint within "${GO_BIN}"" + @go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +.PHONY: lint +lint: $(GO_BIN)/golangci-lint ## Run linting on Go files + @echo "==> Linting Go source files" + @golangci-lint run -v --fix -c .golangci.yml ./... + +.PHONY: test +test: ## Run tests + go test -race -v ./... -coverprofile ./coverage.txt + +.PHONY: bench +bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags + go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem diff --git a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go deleted file mode 100644 index 7087e32a8..000000000 --- a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package multierror - -import "go.uber.org/multierr" - -var ( - Join = multierr.Combine -) diff --git a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go deleted file mode 100644 index 39cff829a..000000000 --- a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package multierror - -import "errors" - -var ( - Join = errors.Join -) diff --git a/tools/vendor/github.com/sourcegraph/conc/iter/iter.go b/tools/vendor/github.com/sourcegraph/conc/iter/iter.go deleted file mode 100644 index 124b4f940..000000000 --- a/tools/vendor/github.com/sourcegraph/conc/iter/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -package iter - -import ( - "runtime" - "sync/atomic" - - "github.com/sourcegraph/conc" -) - -// defaultMaxGoroutines returns the default maximum number of -// goroutines to use within this package. -func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } - -// Iterator can be used to configure the behaviour of ForEach -// and ForEachIdx. The zero value is safe to use with reasonable -// defaults. -// -// Iterator is also safe for reuse and concurrent use. -type Iterator[T any] struct { - // MaxGoroutines controls the maximum number of goroutines - // to use on this Iterator's methods. - // - // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). - MaxGoroutines int -} - -// ForEach executes f in parallel over each element in input. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// ForEach always uses at most runtime.GOMAXPROCS goroutines. -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. For -// a configurable goroutine limit, use a custom Iterator. -func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } - -// ForEach executes f in parallel over each element in input, -// using up to the Iterator's configured maximum number of -// goroutines. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. -func (iter Iterator[T]) ForEach(input []T, f func(*T)) { - iter.ForEachIdx(input, func(_ int, t *T) { - f(t) - }) -} - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { - if iter.MaxGoroutines == 0 { - // iter is a value receiver and is hence safe to mutate - iter.MaxGoroutines = defaultMaxGoroutines() - } - - numInput := len(input) - if iter.MaxGoroutines > numInput { - // No more concurrent tasks than the number of input items. - iter.MaxGoroutines = numInput - } - - var idx atomic.Int64 - // Create the task outside the loop to avoid extra closure allocations. - task := func() { - i := int(idx.Add(1) - 1) - for ; i < numInput; i = int(idx.Add(1) - 1) { - f(i, &input[i]) - } - } - - var wg conc.WaitGroup - for i := 0; i < iter.MaxGoroutines; i++ { - wg.Go(task) - } - wg.Wait() -} diff --git a/tools/vendor/github.com/sourcegraph/conc/iter/map.go b/tools/vendor/github.com/sourcegraph/conc/iter/map.go deleted file mode 100644 index efbe6bfaf..000000000 --- a/tools/vendor/github.com/sourcegraph/conc/iter/map.go +++ /dev/null @@ -1,65 +0,0 @@ -package iter - -import ( - "sync" - - "github.com/sourcegraph/conc/internal/multierror" -) - -// Mapper is an Iterator with a result type R. It can be used to configure -// the behaviour of Map and MapErr. The zero value is safe to use with -// reasonable defaults. -// -// Mapper is also safe for reuse and concurrent use. -type Mapper[T, R any] Iterator[T] - -// Map applies f to each element of input, returning the mapped result. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func Map[T, R any](input []T, f func(*T) R) []R { - return Mapper[T, R]{}.Map(input, f) -} - -// Map applies f to each element of input, returning the mapped result. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { - res := make([]R, len(input)) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - res[i] = f(t) - }) - return res -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { - return Mapper[T, R]{}.MapErr(input, f) -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { - var ( - res = make([]R, len(input)) - errMux sync.Mutex - errs error - ) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - var err error - res[i], err = f(t) - if err != nil { - errMux.Lock() - // TODO: use stdlib errors once multierrors land in go 1.20 - errs = multierror.Join(errs, err) - errMux.Unlock() - } - }) - return res, errs -} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/context_pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/context_pool.go new file mode 100644 index 000000000..85c34e5ae --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/context_pool.go @@ -0,0 +1,104 @@ +package pool + +import ( + "context" +) + +// ContextPool is a pool that runs tasks that take a context. +// A new ContextPool should be created with `New().WithContext(ctx)`. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ContextPool struct { + errorPool ErrorPool + + ctx context.Context + cancel context.CancelFunc + + cancelOnError bool +} + +// Go submits a task. If it returns an error, the error will be +// collected and returned by Wait(). If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ContextPool) Go(f func(ctx context.Context) error) { + p.errorPool.Go(func() error { + if p.cancelOnError { + // If we are cancelling on error, then we also want to cancel if a + // panic is raised. To do this, we need to recover, cancel, and then + // re-throw the caught panic. + defer func() { + if r := recover(); r != nil { + p.cancel() + panic(r) + } + }() + } + + err := f(p.ctx) + if err != nil && p.cancelOnError { + // Leaky abstraction warning: We add the error directly because + // otherwise, canceling could cause another goroutine to exit and + // return an error before this error was added, which breaks the + // expectations of WithFirstError(). + p.errorPool.addErr(err) + p.cancel() + return nil + } + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ContextPool) Wait() error { + // Make sure we call cancel after pool is done to avoid memory leakage. + defer p.cancel() + return p.errorPool.Wait() +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +// This is particularly useful for (*ContextPool).WithCancelOnError(), +// where all errors after the first are likely to be context.Canceled. +func (p *ContextPool) WithFirstError() *ContextPool { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error or panics. By default, the pool's context is not +// canceled until the parent context is canceled. +// +// In this case, all errors returned from the pool after the first will +// likely be context.Canceled - you may want to also use +// (*ContextPool).WithFirstError() to configure the pool to only return +// the first error. +func (p *ContextPool) WithCancelOnError() *ContextPool { + p.panicIfInitialized() + p.cancelOnError = true + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ContextPool) WithFailFast() *ContextPool { + p.panicIfInitialized() + p.WithFirstError() + p.WithCancelOnError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ContextPool) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/error_pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/error_pool.go new file mode 100644 index 000000000..e1789e61b --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/error_pool.go @@ -0,0 +1,100 @@ +package pool + +import ( + "context" + "errors" + "sync" +) + +// ErrorPool is a pool that runs tasks that may return an error. +// Errors are collected and returned by Wait(). +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// A new ErrorPool should be created using `New().WithErrors()`. +type ErrorPool struct { + pool Pool + + onlyFirstError bool + + mu sync.Mutex + errs []error +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ErrorPool) Go(f func() error) { + p.pool.Go(func() { + p.addErr(f()) + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning any errors from tasks. +func (p *ErrorPool) Wait() error { + p.pool.Wait() + + errs := p.errs + p.errs = nil // reset errs + + if len(errs) == 0 { + return nil + } else if p.onlyFirstError { + return errs[0] + } else { + return errors.Join(errs...) + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.deref(), + ctx: ctx, + cancel: cancel, + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ErrorPool) WithFirstError() *ErrorPool { + p.panicIfInitialized() + p.onlyFirstError = true + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *ErrorPool) deref() ErrorPool { + return ErrorPool{ + pool: p.pool.deref(), + onlyFirstError: p.onlyFirstError, + } +} + +func (p *ErrorPool) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +func (p *ErrorPool) addErr(err error) { + if err != nil { + p.mu.Lock() + p.errs = append(p.errs, err) + p.mu.Unlock() + } +} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/pool.go new file mode 100644 index 000000000..8f4494efb --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/pool.go @@ -0,0 +1,174 @@ +package pool + +import ( + "context" + "sync" + + "github.com/sourcegraph/conc" +) + +// New creates a new Pool. +func New() *Pool { + return &Pool{} +} + +// Pool is a pool of goroutines used to execute tasks concurrently. +// +// Tasks are submitted with Go(). Once all your tasks have been submitted, you +// must call Wait() to clean up any spawned goroutines and propagate any +// panics. +// +// Goroutines are started lazily, so creating a new pool is cheap. There will +// never be more goroutines spawned than there are tasks submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// Pool is efficient, but not zero cost. It should not be used for very short +// tasks. Startup and teardown come with an overhead of around 1µs, and each +// task has an overhead of around 300ns. +type Pool struct { + handle conc.WaitGroup + limiter limiter + tasks chan func() + initOnce sync.Once +} + +// Go submits a task to be run in the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *Pool) Go(f func()) { + p.init() + + if p.limiter == nil { + // No limit on the number of goroutines. + select { + case p.tasks <- f: + // A goroutine was available to handle the task. + default: + // No goroutine was available to handle the task. + // Spawn a new one and send it the task. + p.handle.Go(func() { + p.worker(f) + }) + } + } else { + select { + case p.limiter <- struct{}{}: + // If we are below our limit, spawn a new worker rather + // than waiting for one to become available. + p.handle.Go(func() { + p.worker(f) + }) + case p.tasks <- f: + // A worker is available and has accepted the task. + return + } + } + +} + +// Wait cleans up spawned goroutines, propagating any panics that were +// raised by a tasks. +func (p *Pool) Wait() { + p.init() + + close(p.tasks) + + // After Wait() returns, reset the struct so tasks will be reinitialized on + // next use. This better matches the behavior of sync.WaitGroup + defer func() { p.initOnce = sync.Once{} }() + + p.handle.Wait() +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *Pool) MaxGoroutines() int { + return p.limiter.limit() +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *Pool) WithMaxGoroutines(n int) *Pool { + p.panicIfInitialized() + if n < 1 { + panic("max goroutines in a pool must be greater than zero") + } + p.limiter = make(limiter, n) + return p +} + +// init ensures that the pool is initialized before use. This makes the +// zero value of the pool usable. +func (p *Pool) init() { + p.initOnce.Do(func() { + p.tasks = make(chan func()) + }) +} + +// panicIfInitialized will trigger a panic if a configuration method is called +// after the pool has started any goroutines for the first time. In the case that +// new settings are needed, a new pool should be created. +func (p *Pool) panicIfInitialized() { + if p.tasks != nil { + panic("pool can not be reconfigured after calling Go() for the first time") + } +} + +// WithErrors converts the pool to an ErrorPool so the submitted tasks can +// return errors. +func (p *Pool) WithErrors() *ErrorPool { + p.panicIfInitialized() + return &ErrorPool{ + pool: p.deref(), + } +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *Pool) deref() Pool { + p.panicIfInitialized() + return Pool{ + limiter: p.limiter, + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *Pool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.WithErrors().deref(), + ctx: ctx, + cancel: cancel, + } +} + +func (p *Pool) worker(initialFunc func()) { + // The only time this matters is if the task panics. + // This makes it possible to spin up new workers in that case. + defer p.limiter.release() + + if initialFunc != nil { + initialFunc() + } + + for f := range p.tasks { + f() + } +} + +type limiter chan struct{} + +func (l limiter) limit() int { + return cap(l) +} + +func (l limiter) release() { + if l != nil { + <-l + } +} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go new file mode 100644 index 000000000..6bc30dd63 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go @@ -0,0 +1,85 @@ +package pool + +import ( + "context" +) + +// ResultContextPool is a pool that runs tasks that take a context and return a +// result. The context passed to the task will be canceled if any of the tasks +// return an error, which makes its functionality different than just capturing +// a context with the task closure. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultContextPool[T any] struct { + contextPool ContextPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) { + idx := p.agg.nextIndex() + p.contextPool.Go(func(ctx context.Context) error { + res, err := f(ctx) + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ResultContextPool[T]) Wait() ([]T, error) { + err := p.contextPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error. By default, the pool's context is not +// canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithCancelOnError() + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFailFast() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultContextPool[T]) panicIfInitialized() { + p.contextPool.panicIfInitialized() +} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go new file mode 100644 index 000000000..832cd9bb4 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go @@ -0,0 +1,80 @@ +package pool + +import ( + "context" +) + +// ResultErrorPool is a pool that executes tasks that return a generic result +// type and an error. Tasks are executed in the pool with Go(), then the +// results of the tasks are returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultErrorPool[T any] struct { + errorPool ErrorPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultErrorPool[T]) Go(f func() (T, error)) { + idx := p.agg.nextIndex() + p.errorPool.Go(func() error { + res, err := f() + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning the results and any errors from tasks. +func (p *ResultErrorPool[T]) Wait() ([]T, error) { + err := p.errorPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} // reset for reuse + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.errorPool.WithContext(ctx), + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultErrorPool[T]) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/tools/vendor/github.com/sourcegraph/conc/pool/result_pool.go b/tools/vendor/github.com/sourcegraph/conc/pool/result_pool.go new file mode 100644 index 000000000..f73a77261 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/pool/result_pool.go @@ -0,0 +1,142 @@ +package pool + +import ( + "context" + "sort" + "sync" +) + +// NewWithResults creates a new ResultPool for tasks with a result of type T. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +func NewWithResults[T any]() *ResultPool[T] { + return &ResultPool[T]{ + pool: *New(), + } +} + +// ResultPool is a pool that executes tasks that return a generic result type. +// Tasks are executed in the pool with Go(), then the results of the tasks are +// returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +type ResultPool[T any] struct { + pool Pool + agg resultAggregator[T] +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultPool[T]) Go(f func() T) { + idx := p.agg.nextIndex() + p.pool.Go(func() { + p.agg.save(idx, f(), false) + }) +} + +// Wait cleans up all spawned goroutines, propagating any panics, and returning +// a slice of results from tasks that did not panic. +func (p *ResultPool[T]) Wait() []T { + p.pool.Wait() + results := p.agg.collect(true) + p.agg = resultAggregator[T]{} // reset for reuse + return results +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *ResultPool[T]) MaxGoroutines() int { + return p.pool.MaxGoroutines() +} + +// WithErrors converts the pool to an ResultErrorPool so the submitted tasks +// can return errors. +func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] { + p.panicIfInitialized() + return &ResultErrorPool[T]{ + errorPool: *p.pool.WithErrors(), + } +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.pool.WithContext(ctx), + } +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +func (p *ResultPool[T]) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +// resultAggregator is a utility type that lets us safely append from multiple +// goroutines. The zero value is valid and ready to use. +type resultAggregator[T any] struct { + mu sync.Mutex + len int + results []T + errored []int +} + +// nextIndex reserves a slot for a result. The returned value should be passed +// to save() when adding a result to the aggregator. +func (r *resultAggregator[T]) nextIndex() int { + r.mu.Lock() + defer r.mu.Unlock() + + nextIdx := r.len + r.len += 1 + return nextIdx +} + +func (r *resultAggregator[T]) save(i int, res T, errored bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if i >= len(r.results) { + old := r.results + r.results = make([]T, r.len) + copy(r.results, old) + } + + r.results[i] = res + + if errored { + r.errored = append(r.errored, i) + } +} + +// collect returns the set of aggregated results. +func (r *resultAggregator[T]) collect(collectErrored bool) []T { + if !r.mu.TryLock() { + panic("collect should not be called until all goroutines have exited") + } + + if collectErrored || len(r.errored) == 0 { + return r.results + } + + filtered := r.results[:0] + sort.Ints(r.errored) + for i, e := range r.errored { + if i == 0 { + filtered = append(filtered, r.results[:e]...) + } else { + filtered = append(filtered, r.results[r.errored[i-1]+1:e]...) + } + } + return filtered +} diff --git a/tools/vendor/github.com/spf13/afero/.editorconfig b/tools/vendor/github.com/spf13/afero/.editorconfig index 4492e9f9f..a85749f19 100644 --- a/tools/vendor/github.com/spf13/afero/.editorconfig +++ b/tools/vendor/github.com/spf13/afero/.editorconfig @@ -10,3 +10,6 @@ trim_trailing_whitespace = true [*.go] indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/tools/vendor/github.com/spf13/afero/.golangci.yaml b/tools/vendor/github.com/spf13/afero/.golangci.yaml index 806289a25..4f359b81a 100644 --- a/tools/vendor/github.com/spf13/afero/.golangci.yaml +++ b/tools/vendor/github.com/spf13/afero/.golangci.yaml @@ -1,18 +1,48 @@ -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/afero) +version: "2" + +run: + timeout: 10m linters: - disable-all: true - enable: - - gci - - gofmt - - gofumpt - - staticcheck - -issues: - exclude-dirs: - - gcsfs/internal/stiface + enable: + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - staticcheck + - unused + + disable: + - errcheck + # - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + + exclusions: + paths: + - gcsfs/internal/stiface + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + + exclusions: + paths: + - gcsfs/internal/stiface diff --git a/tools/vendor/github.com/spf13/afero/README.md b/tools/vendor/github.com/spf13/afero/README.md index 86f154554..ef67e9a77 100644 --- a/tools/vendor/github.com/spf13/afero/README.md +++ b/tools/vendor/github.com/spf13/afero/README.md @@ -1,479 +1,474 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) +afero logo-sm -A FileSystem Abstraction System for Go -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![GoDoc](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square") -# Overview -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. +# Afero: The Universal Filesystem Abstraction for Go -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with, while retaining all the power -and benefit of the os and ioutil packages. +Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage. -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. +Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition. -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. +## Why Afero? +Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture. -## Afero Features +🔑 **Key Features:** -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` +* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS). +* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup. +* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors: + * **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem. + * **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive). + * **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot). +* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless. +* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces. -# Using Afero +## Installation -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero +```bash +go get github.com/spf13/afero +``` -Next include Afero in your application. ```go import "github.com/spf13/afero" ``` -## Step 2: Declare a backend +## Quick Start: The Power of Abstraction + +The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable. + +### 1. Refactor Your Code + +Change functions that rely on the `os` package to accept `afero.Fs`. -First define a package variable and set it to a pointer to a filesystem. ```go -var AppFs = afero.NewMemMapFs() +// Before: Coupled to the OS and difficult to test +// func ProcessConfiguration(path string) error { +// data, err := os.ReadFile(path) +// ... +// } -or +import "github.com/spf13/afero" -var AppFs = afero.NewOsFs() +// After: Decoupled, flexible, and testable +func ProcessConfiguration(fs afero.Fs, path string) error { + // Use Afero utility functions which mirror os/ioutil + data, err := afero.ReadFile(fs, path) + // ... process the data + return err +} ``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. -## Step 3: Use it like you would the OS package +### 2. Usage in Production -Throughout your application use any function and method like you normally -would. +In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls. -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: ```go -AppFs.Open("/tmp/foo") +func main() { + // Use the real OS filesystem + AppFs := afero.NewOsFs() + ProcessConfiguration(AppFs, "/etc/myapp.conf") +} ``` -`AppFs` being the variable we defined above. +### 3. Usage in Testing +In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup. -## List of all available functions - -File System Methods Available: ```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error +func TestProcessConfiguration(t *testing.T) { + // Use the in-memory filesystem + AppFs := afero.NewMemMapFs() + + // Pre-populate the memory filesystem for the test + configPath := "/test/config.json" + afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644) + + // Run the test entirely in memory + err := ProcessConfiguration(AppFs, configPath) + if err != nil { + t.Fatal(err) + } +} ``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. -## Using Afero's utility functions +## Afero's Superpower: Composition -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. +Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean. -The afero utilities support all afero compatible backends. +### Example 1: Sandboxing with Copy-on-Write -The list of utilities includes: +Create a temporary environment where an application can "modify" system files without affecting the actual disk. ```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) +// 1. The base layer is the real OS, made read-only for safety. +baseFs := afero.NewReadOnlyFs(afero.NewOsFs()) -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. +// 2. The overlay layer is a temporary in-memory filesystem for changes. +overlayFs := afero.NewMemMapFs() -### Calling utilities directly +// 3. Combine them. Reads fall through to the base; writes only hit the overlay. +sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs) -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") +// The application can now "modify" /etc/hosts, but the changes are isolated in memory. +afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644) +// The real /etc/hosts on disk is untouched. ``` -### Calling via Afero +### Example 2: Caching a Slow Filesystem -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` +Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage). -## Using Afero for Testing +```go +import "time" -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. +// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS) +var remoteFs afero.Fs -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed +// 'cacheFs' is a fast in-memory backend +cacheFs := afero.NewMemMapFs() -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). +// Create the caching layer. Cache items for 5 minutes upon first read. +cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute) -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. +// The first read is slow (fetches from remote, then caches) +data1, _ := afero.ReadFile(cachedFs, "data.json") -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} +// The second read is instant (serves from memory cache) +data2, _ := afero.ReadFile(cachedFs, "data.json") ``` -# Available Backends +### Example 3: Security Jails (chroot) + +Restrict an application component's access to a specific subdirectory. -## Operating System Native +```go +osFs := afero.NewOsFs() -### OsFs +// Create a filesystem rooted at /home/user/public +// The application cannot access anything above this directory. +jailedFs := afero.NewBasePathFs(osFs, "/home/user/public") -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. +// To the application, this is reading "/" +// In reality, it's reading "/home/user/public/" +dirInfo, err := afero.ReadDir(jailedFs, "/") -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) +// Attempts to access parent directories fail +_, err = jailedFs.Open("../secrets.txt") // Returns an error ``` -## Memory Backed Storage +## Real-World Use Cases -### MemMapFs +### Build Cloud-Agnostic Applications -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. +Write applications that seamlessly work with different storage backends: ```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` +type DocumentProcessor struct { + fs afero.Fs +} + +func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor { + return &DocumentProcessor{fs: fs} +} -#### InMemoryFile +func (p *DocumentProcessor) Process(inputPath, outputPath string) error { + // This code works whether fs is local disk, cloud storage, or memory + content, err := afero.ReadFile(p.fs, inputPath) + if err != nil { + return err + } + + processed := processContent(content) + return afero.WriteFile(p.fs, outputPath, processed, 0644) +} -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. +// Use with local filesystem +processor := NewDocumentProcessor(afero.NewOsFs()) -## Network Interfaces +// Use with Google Cloud Storage +processor := NewDocumentProcessor(gcsFS) -### SftpFs +// Use with in-memory filesystem for testing +processor := NewDocumentProcessor(afero.NewMemMapFs()) +``` -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. +### Treating Archives as Filesystems -### GCSFs +Read files directly from `.zip` or `.tar` archives without unpacking them to disk first. -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. +```go +import ( + "archive/zip" + "github.com/spf13/afero/zipfs" +) -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. +// Assume 'zipReader' is a *zip.Reader initialized from a file or memory +var zipReader *zip.Reader +// Create a read-only ZipFs +archiveFS := zipfs.New(zipReader) -## Filtering Backends +// Read a file from within the archive using the standard Afero API +content, err := afero.ReadFile(archiveFS, "/docs/readme.md") +``` -### BasePathFs +### Serving Any Filesystem over HTTP -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. +Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server. ```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` +import ( + "net/http" + "github.com/spf13/afero" +) -### ReadOnlyFs +func main() { + memFS := afero.NewMemMapFs() + afero.WriteFile(memFS, "index.html", []byte("

    Hello from Memory!

    "), 0644) -A thin wrapper around the source Fs providing a read only view. + // Wrap the memory filesystem to make it compatible with http.FileServer. + httpFS := afero.NewHttpFs(memFS) -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM + http.Handle("/", http.FileServer(httpFS.Dir("/"))) + http.ListenAndServe(":8080", nil) +} ``` -# RegexpFs +### Testing Made Simple -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. +One of Afero's greatest strengths is making filesystem-dependent code easily testable: ```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` +func SaveUserData(fs afero.Fs, userID string, data []byte) error { + filename := fmt.Sprintf("users/%s.json", userID) + return afero.WriteFile(fs, filename, data, 0644) +} -### HttpFs +func TestSaveUserData(t *testing.T) { + // Create a clean, fast, in-memory filesystem for testing + testFS := afero.NewMemMapFs() + + userData := []byte(`{"name": "John", "email": "john@example.com"}`) + err := SaveUserData(testFS, "123", userData) + + if err != nil { + t.Fatalf("SaveUserData failed: %v", err) + } + + // Verify the file was saved correctly + saved, err := afero.ReadFile(testFS, "users/123.json") + if err != nil { + t.Fatalf("Failed to read saved file: %v", err) + } + + if string(saved) != string(userData) { + t.Errorf("Data mismatch: got %s, want %s", saved, userData) + } +} +``` -Afero provides an http compatible backend which can wrap any of the existing -backends. +**Benefits of testing with Afero:** +- ⚡ **Fast** - No disk I/O, tests run in memory +- 🔄 **Reliable** - Each test starts with a clean slate +- 🧹 **No cleanup** - Memory is automatically freed +- 🔒 **Safe** - Can't accidentally modify real files +- 🏃 **Parallel** - Tests can run concurrently without conflicts + +## Backend Reference + +| Type | Backend | Constructor | Description | Status | +| :--- | :--- | :--- | :--- | :--- | +| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official | +| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official | +| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official | +| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official | +| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official | +| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official | +| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official | +| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official | +| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official | +| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official | +| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental | +| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental | +| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party | +| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party | +| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party | +| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party | +| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party | +| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party | +| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party | +| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party | +| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party | +| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party | + +## Afero vs. `io/fs` (Go 1.16+) + +Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems. + +Afero complements `io/fs` by focusing on different needs: + +* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces. +* **Use Afero when:** + * Your application needs to **create, write, modify, or delete** files. + * You need to test complex read/write interactions (e.g., renaming, concurrent writes). + * You need advanced compositional features (Copy-on-Write, Caching, etc.). + +Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`: -The Http package requires a slightly specific version of Open which -returns an http.File type. +```go +import "io/fs" -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. +// Create an Afero filesystem (writable) +var myAferoFs afero.Fs = afero.NewMemMapFs() -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) +// Convert it to a standard library fs.FS (read-only view) +var myIoFs fs.FS = afero.NewIOFS(myAferoFs) ``` -## Composite Backends +## Third-Party Backends & Ecosystem -Afero provides the ability have two filesystems (or more) act as a single -file system. +The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability. -### CacheOnReadFs +### Featured Community Backends -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. +These are mature, reliable backends that we can confidently recommend for production use: -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. +#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) +Production-ready S3 backend built on the official AWS SDK for Go. -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). +```go +import "github.com/fclairamb/afero-s3" -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. +s3fs := s3.NewFs(bucket, session) +``` -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. +#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) +MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features. ```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +import "github.com/cpyun/afero-minio" + +minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket") ``` -### CopyOnWriteFs() +### Community & Specialized Backends -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. +#### Cloud Storage -Read operations will first look in the overlay and if not found there, will -serve the file from the base. +- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) + Streaming support; no write-seeking or POSIX permissions; no files listing cache -Changes to the file system will only be made in the overlay. +- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) + Streaming support; no write-seeking or POSIX permissions -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). +#### Version Control Systems -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. +- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) + Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access. -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) +#### Container and Remote Systems - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` +- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox) + Access Docker container filesystems as if they were local filesystems + +- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox) + Turn GitHub repositories, releases, and assets into browsable filesystems -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. +#### FUSE Integration +- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) + Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem -## Desired/possible backends +#### Specialized Filesystems -The following is a short list of possible backends we hope someone will -implement: +- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT) + Pure Go FAT filesystem implementation (currently read-only) -* SSH -* S3 +### Interface Adapters & Utilities -# About the project +**Cross-Interface Compatibility:** +- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility) +- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy +- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter -## What's in the name +**Working Directory Management:** +- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper -Afero comes from the latin roots Ad-Facere. +**Advanced Filtering:** +- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems: + - **FilterFs** - Predicate-based file filtering + - **IgnoreFs** - .gitignore-aware filtering + - **WriterFs** - Dump writes to io.Writer for debugging -**"Ad"** is a prefix meaning "to". +#### Developer Tools & Utilities -**"Facere"** is a form of the root "faciō" making "make or do". +**nhatthm Utility Suite** - Essential tools for Afero development: +- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems +- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing +- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. +### Ecosystem Showcase -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". +**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive) +Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power! -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. +### Modern Asset Embedding (Go 1.16+) -## Release Notes +Instead of third-party tools, use Go's native `//go:embed` with Afero: -See the [Releases Page](https://github.com/spf13/afero/releases). +```go +import ( + "embed" + "github.com/spf13/afero" +) + +//go:embed assets/* +var assetsFS embed.FS + +func main() { + // Convert embedded files to Afero filesystem + fs := afero.FromIOFS(assetsFS) + + // Use like any other Afero filesystem + content, _ := afero.ReadFile(fs, "assets/config.json") +} +``` ## Contributing -1. Fork it +We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends. + +* 🔥 **Microsoft Azure Blob Storage** +* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS) +* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration +* 📡 **SSH/SCP Backend** - Secure remote file operations +* Stabilization of existing experimental backends (GCS, SFTP) + +To contribute: +1. Fork the repository 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releasing - -As of version 1.14.0, Afero moved implementations with third-party libraries to -their own submodules. - -Releasing a new version now requires a few steps: - -``` -VERSION=X.Y.Z -git tag -a v$VERSION -m "Release $VERSION" -git push origin v$VERSION - -cd gcsfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" -git push origin gcsfs/v$VERSION -cd .. - -cd sftpfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" -git push origin sftpfs/v$VERSION -cd .. - -git push -``` +5. Create a new Pull Request -TODO: move these instructions to a Makefile or something +## 📄 License -## Contributors +Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details. -Names in no particular order: +## 🔗 Additional Resources -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) +- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero) +- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples) +- [📋 Release Notes](https://github.com/spf13/afero/releases) +- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions) -## License +--- -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) +*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.* diff --git a/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go b/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go index 184d6dd70..aba2879eb 100644 --- a/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/tools/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -34,7 +34,8 @@ func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { _, err := u.base.Stat(name) if err != nil { if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || + oerr.Err == syscall.ENOTDIR { return false, nil } } @@ -237,7 +238,11 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return u.layer.OpenFile(name, flag, perm) } - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: syscall.ENOTDIR, + } // ...or os.ErrNotExist? } if b { return u.base.OpenFile(name, flag, perm) diff --git a/tools/vendor/github.com/spf13/afero/iofs.go b/tools/vendor/github.com/spf13/afero/iofs.go index b13155ca4..57ba5673e 100644 --- a/tools/vendor/github.com/spf13/afero/iofs.go +++ b/tools/vendor/github.com/spf13/afero/iofs.go @@ -137,7 +137,7 @@ type readDirFile struct { var _ fs.ReadDirFile = readDirFile{} func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) + items, err := r.Readdir(n) if err != nil { return nil, err } @@ -161,7 +161,12 @@ var _ Fs = FromIOFS{} func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } +func (f FromIOFS) Mkdir( + name string, + perm os.FileMode, +) error { + return notImplemented("mkdir", name) +} func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { return notImplemented("mkdirall", path) diff --git a/tools/vendor/github.com/spf13/afero/lstater.go b/tools/vendor/github.com/spf13/afero/lstater.go index 89c1bfc0a..2dcbdb1f0 100644 --- a/tools/vendor/github.com/spf13/afero/lstater.go +++ b/tools/vendor/github.com/spf13/afero/lstater.go @@ -19,9 +19,9 @@ import ( // Lstater is an optional interface in Afero. It is only implemented by the // filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem. // Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not. type Lstater interface { LstatIfPossible(name string) (os.FileInfo, bool, error) } diff --git a/tools/vendor/github.com/spf13/afero/mem/file.go b/tools/vendor/github.com/spf13/afero/mem/file.go index 62fe4498e..c77fcd40e 100644 --- a/tools/vendor/github.com/spf13/afero/mem/file.go +++ b/tools/vendor/github.com/spf13/afero/mem/file.go @@ -150,7 +150,11 @@ func (f *File) Sync() error { func (f *File) Readdir(count int) (res []os.FileInfo, err error) { if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + return nil, &os.PathError{ + Op: "readdir", + Path: f.fileData.name, + Err: errors.New("not a dir"), + } } var outLength int64 @@ -236,7 +240,11 @@ func (f *File) Truncate(size int64) error { return ErrFileClosed } if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return &os.PathError{ + Op: "truncate", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } if size < 0 { return ErrOutOfRange @@ -273,7 +281,11 @@ func (f *File) Write(b []byte) (n int, err error) { return 0, ErrFileClosed } if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return 0, &os.PathError{ + Op: "write", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } n = len(b) cur := atomic.LoadInt64(&f.at) @@ -285,7 +297,9 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append( + f.fileData.data, + append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) diff --git a/tools/vendor/github.com/spf13/afero/unionFile.go b/tools/vendor/github.com/spf13/afero/unionFile.go index 62dd6c93c..2e2253f55 100644 --- a/tools/vendor/github.com/spf13/afero/unionFile.go +++ b/tools/vendor/github.com/spf13/afero/unionFile.go @@ -92,7 +92,8 @@ func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { func (f *UnionFile) Write(s []byte) (n int, err error) { if f.Layer != nil { n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + if err == nil && + f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? _, err = f.Base.Write(s) } return n, err @@ -157,7 +158,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err // return a single view of the overlayed directories. // At the end of the directory view, the error is io.EOF if c > 0. func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger + merge := f.Merger if merge == nil { merge = defaultUnionMergeDirsFn } diff --git a/tools/vendor/github.com/spf13/afero/util.go b/tools/vendor/github.com/spf13/afero/util.go index 9e4cba274..231768838 100644 --- a/tools/vendor/github.com/spf13/afero/util.go +++ b/tools/vendor/github.com/spf13/afero/util.go @@ -113,11 +113,11 @@ func GetTempDir(fs Fs, subPath string) string { if subPath != "" { // preserve windows backslash :-( if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) + subPath = strings.ReplaceAll(subPath, "\\", "____") } dir = dir + UnicodeSanitize((subPath)) if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) + dir = strings.ReplaceAll(dir, "____", "\\") } if exists, _ := Exists(fs, dir); exists { diff --git a/tools/vendor/github.com/spf13/cast/.editorconfig b/tools/vendor/github.com/spf13/cast/.editorconfig new file mode 100644 index 000000000..a85749f19 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/tools/vendor/github.com/spf13/cast/.golangci.yaml b/tools/vendor/github.com/spf13/cast/.golangci.yaml new file mode 100644 index 000000000..e00fd47aa --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/.golangci.yaml @@ -0,0 +1,39 @@ +version: "2" + +run: + timeout: 10m + +linters: + enable: + - errcheck + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - unused + + disable: + - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + # - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/tools/vendor/github.com/spf13/cast/README.md b/tools/vendor/github.com/spf13/cast/README.md index 1be666a45..c58eccb3f 100644 --- a/tools/vendor/github.com/spf13/cast/README.md +++ b/tools/vendor/github.com/spf13/cast/README.md @@ -1,9 +1,9 @@ # cast -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/cast) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/spf13/cast?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/spf13/cast/badge?style=flat-square)](https://deps.dev/go/github.com%252Fspf13%252Fcast) Easy and safe casting from one type to another in Go @@ -73,3 +73,7 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/tools/vendor/github.com/spf13/cast/alias.go b/tools/vendor/github.com/spf13/cast/alias.go new file mode 100644 index 000000000..855d60005 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/alias.go @@ -0,0 +1,69 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. +package cast + +import ( + "reflect" + "slices" +) + +var kindNames = []string{ + reflect.String: "string", + reflect.Bool: "bool", + reflect.Int: "int", + reflect.Int8: "int8", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Uint: "uint", + reflect.Uint8: "uint8", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Float32: "float32", + reflect.Float64: "float64", +} + +var kinds = map[reflect.Kind]func(reflect.Value) any{ + reflect.String: func(v reflect.Value) any { return v.String() }, + reflect.Bool: func(v reflect.Value) any { return v.Bool() }, + reflect.Int: func(v reflect.Value) any { return int(v.Int()) }, + reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) }, + reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) }, + reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) }, + reflect.Int64: func(v reflect.Value) any { return v.Int() }, + reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) }, + reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) }, + reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) }, + reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) }, + reflect.Uint64: func(v reflect.Value) any { return v.Uint() }, + reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) }, + reflect.Float64: func(v reflect.Value) any { return v.Float() }, +} + +// resolveAlias attempts to resolve a named type to its underlying basic type (if possible). +// +// Pointers are expected to be indirected by this point. +func resolveAlias(i any) (any, bool) { + if i == nil { + return nil, false + } + + t := reflect.TypeOf(i) + + // Not a named type + if t.Name() == "" || slices.Contains(kindNames, t.Name()) { + return i, false + } + + resolve, ok := kinds[t.Kind()] + if !ok { // Not a supported kind + return i, false + } + + v := reflect.ValueOf(i) + + return resolve(v), true +} diff --git a/tools/vendor/github.com/spf13/cast/basic.go b/tools/vendor/github.com/spf13/cast/basic.go new file mode 100644 index 000000000..fa330e207 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/basic.go @@ -0,0 +1,131 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "html/template" + "strconv" + "time" +) + +// ToBoolE casts any value to a bool type. +func ToBoolE(i any) (bool, error) { + i, _ = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + return b != 0, nil + case int8: + return b != 0, nil + case int16: + return b != 0, nil + case int32: + return b != 0, nil + case int64: + return b != 0, nil + case uint: + return b != 0, nil + case uint8: + return b != 0, nil + case uint16: + return b != 0, nil + case uint32: + return b != 0, nil + case uint64: + return b != 0, nil + case float32: + return b != 0, nil + case float64: + return b != 0, nil + case time.Duration: + return b != 0, nil + case string: + return strconv.ParseBool(b) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + + return false, fmt.Errorf(errorMsg, i, i, false) + default: + if i, ok := resolveAlias(i); ok { + return ToBoolE(i) + } + + return false, fmt.Errorf(errorMsg, i, i, false) + } +} + +// ToStringE casts any value to a string type. +func ToStringE(i any) (string, error) { + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int32: + return strconv.FormatInt(int64(s), 10), nil + case int64: + return strconv.FormatInt(s, 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(s, 10), nil + case json.Number: + return s.String(), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + if i, ok := indirect(i); ok { + return ToStringE(i) + } + + if i, ok := resolveAlias(i); ok { + return ToStringE(i) + } + + return "", fmt.Errorf(errorMsg, i, i, "") + } +} diff --git a/tools/vendor/github.com/spf13/cast/cast.go b/tools/vendor/github.com/spf13/cast/cast.go index 0cfe9418d..8d85539b3 100644 --- a/tools/vendor/github.com/spf13/cast/cast.go +++ b/tools/vendor/github.com/spf13/cast/cast.go @@ -8,169 +8,77 @@ package cast import "time" -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} +const errorMsg = "unable to cast %#v of type %T to %T" +const errorMsgWith = "unable to cast %#v of type %T to %T: %w" -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} +// Basic is a type parameter constraint for functions accepting basic types. +// +// It represents the supported basic types this package can cast to. +type Basic interface { + string | bool | Number | time.Time | time.Duration +} + +// ToE casts any value to a [Basic] type. +func ToE[T Basic](i any) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case string: + v, err = ToStringE(i) + case bool: + v, err = ToBoolE(i) + case int: + v, err = toNumberE[int](i, parseInt[int]) + case int8: + v, err = toNumberE[int8](i, parseInt[int8]) + case int16: + v, err = toNumberE[int16](i, parseInt[int16]) + case int32: + v, err = toNumberE[int32](i, parseInt[int32]) + case int64: + v, err = toNumberE[int64](i, parseInt[int64]) + case uint: + v, err = toUnsignedNumberE[uint](i, parseUint[uint]) + case uint8: + v, err = toUnsignedNumberE[uint8](i, parseUint[uint8]) + case uint16: + v, err = toUnsignedNumberE[uint16](i, parseUint[uint16]) + case uint32: + v, err = toUnsignedNumberE[uint32](i, parseUint[uint32]) + case uint64: + v, err = toUnsignedNumberE[uint64](i, parseUint[uint64]) + case float32: + v, err = toNumberE[float32](i, parseFloat[float32]) + case float64: + v, err = toNumberE[float64](i, parseFloat[float64]) + case time.Time: + v, err = ToTimeE(i) + case time.Duration: + v, err = ToDurationE(i) + } + + if err != nil { + return t, err + } + + return v.(T), nil +} + +// Must is a helper that wraps a call to a cast function and panics if the error is non-nil. +func Must[T any](i any, err error) T { + if err != nil { + panic(err) + } + + return i.(T) +} + +// To casts any value to a [Basic] type. +func To[T Basic](i any) T { + v, _ := ToE[T](i) -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) return v } diff --git a/tools/vendor/github.com/spf13/cast/caste.go b/tools/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4181a2e75..000000000 --- a/tools/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1510 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -type float64EProvider interface { - Float64() (float64, error) -} - -type float64Provider interface { - Float64() float64 -} - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case float64EProvider: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - case float64Provider: - d = time.Duration(s.Float64()) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64Provider: - return s.Float64(), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64Provider: - return float32(s.Float64()), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - m := map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - m := map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - m := map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - m := map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - m := map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - m := map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, -} - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/tools/vendor/github.com/spf13/cast/indirect.go b/tools/vendor/github.com/spf13/cast/indirect.go new file mode 100644 index 000000000..093345f73 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/indirect.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "reflect" +) + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(i any) (any, bool) { + if i == nil { + return nil, false + } + + if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return i, false + } + + v := reflect.ValueOf(i) + + for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) { + if v.IsNil() { + return nil, true + } + + v = v.Elem() + } + + return v.Interface(), true +} diff --git a/tools/vendor/github.com/spf13/cast/internal/time.go b/tools/vendor/github.com/spf13/cast/internal/time.go new file mode 100644 index 000000000..906e9aece --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/internal/time.go @@ -0,0 +1,79 @@ +package internal + +import ( + "fmt" + "time" +) + +//go:generate stringer -type=TimeFormatType + +type TimeFormatType int + +const ( + TimeFormatNoTimezone TimeFormatType = iota + TimeFormatNamedTimezone + TimeFormatNumericTimezone + TimeFormatNumericAndNamedTimezone + TimeFormatTimeOnly +) + +type TimeFormat struct { + Format string + Typ TimeFormatType +} + +func (f TimeFormat) HasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone +} + +var TimeFormats = []TimeFormat{ + // Keep common formats at the top. + {"2006-01-02", TimeFormatNoTimezone}, + {time.RFC3339, TimeFormatNumericTimezone}, + {"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, TimeFormatNumericTimezone}, + {time.RFC1123, TimeFormatNamedTimezone}, + {time.RFC822Z, TimeFormatNumericTimezone}, + {time.RFC822, TimeFormatNamedTimezone}, + {time.RFC850, TimeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", TimeFormatNoTimezone}, + {time.ANSIC, TimeFormatNoTimezone}, + {time.UnixDate, TimeFormatNamedTimezone}, + {time.RubyDate, TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone}, + {"02 Jan 2006", TimeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone}, + {time.Kitchen, TimeFormatTimeOnly}, + {time.Stamp, TimeFormatTimeOnly}, + {time.StampMilli, TimeFormatTimeOnly}, + {time.StampMicro, TimeFormatTimeOnly}, + {time.StampNano, TimeFormatTimeOnly}, +} + +func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) { + for _, format := range formats { + if d, e = time.Parse(format.Format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.Typ <= TimeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} diff --git a/tools/vendor/github.com/spf13/cast/internal/timeformattype_string.go b/tools/vendor/github.com/spf13/cast/internal/timeformattype_string.go new file mode 100644 index 000000000..60a29a862 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/internal/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT. + +package internal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TimeFormatNoTimezone-0] + _ = x[TimeFormatNamedTimezone-1] + _ = x[TimeFormatNumericTimezone-2] + _ = x[TimeFormatNumericAndNamedTimezone-3] + _ = x[TimeFormatTimeOnly-4] +} + +const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly" + +var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i TimeFormatType) String() string { + if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) { + return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]] +} diff --git a/tools/vendor/github.com/spf13/cast/map.go b/tools/vendor/github.com/spf13/cast/map.go new file mode 100644 index 000000000..858d4ee43 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/map.go @@ -0,0 +1,212 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) { + m := map[K]V{} + + if i == nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[K]V: + return v, nil + + case map[K]any: + for k, val := range v { + m[k] = valFn(val) + } + + return m, nil + + case map[any]V: + for k, val := range v { + m[keyFn(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[keyFn(k)] = valFn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } +} + +func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) { + return toMapE(i, ToString, fn) +} + +// ToStringMapStringE casts any value to a map[string]string type. +func ToStringMapStringE(i any) (map[string]string, error) { + return toStringMapE(i, ToString) +} + +// ToStringMapStringSliceE casts any value to a map[string][]string type. +func ToStringMapStringSliceE(i any) (map[string][]string, error) { + m := map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]any: + for k, val := range v { + switch vt := val.(type) { + case []any: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[any][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]any: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } + + return m, nil +} + +// ToStringMapBoolE casts any value to a map[string]bool type. +func ToStringMapBoolE(i any) (map[string]bool, error) { + return toStringMapE(i, ToBool) +} + +// ToStringMapE casts any value to a map[string]any type. +func ToStringMapE(i any) (map[string]any, error) { + fn := func(i any) any { return i } + + return toStringMapE(i, fn) +} + +func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) { + m := map[string]T{} + + if i == nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[string]T: + return v, nil + + case map[string]any: + for k, val := range v { + m[k] = fn(val) + } + + return m, nil + + case map[any]T: + for k, val := range v { + m[ToString(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[ToString(k)] = fn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + + for _, keyVal := range v.MapKeys() { + val, err := fnE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + + return m, nil +} + +// ToStringMapIntE casts any value to a map[string]int type. +func ToStringMapIntE(i any) (map[string]int, error) { + return toStringMapIntE(i, ToInt, ToIntE) +} + +// ToStringMapInt64E casts any value to a map[string]int64 type. +func ToStringMapInt64E(i any) (map[string]int64, error) { + return toStringMapIntE(i, ToInt64, ToInt64E) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v any) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/tools/vendor/github.com/spf13/cast/number.go b/tools/vendor/github.com/spf13/cast/number.go new file mode 100644 index 000000000..a58dc4d1e --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/number.go @@ -0,0 +1,549 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + +// Number is a type parameter constraint for functions accepting number types. +// +// It represents the supported number types this package can cast to. +type Number interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + +type integer interface { + int | int8 | int16 | int32 | int64 +} + +type unsigned interface { + uint | uint8 | uint16 | uint32 | uint64 +} + +type float interface { + float32 | float64 +} + +// ToNumberE casts any value to a [Number] type. +func ToNumberE[T Number](i any) (T, error) { + var t T + + switch any(t).(type) { + case int: + return toNumberE[T](i, parseNumber[T]) + case int8: + return toNumberE[T](i, parseNumber[T]) + case int16: + return toNumberE[T](i, parseNumber[T]) + case int32: + return toNumberE[T](i, parseNumber[T]) + case int64: + return toNumberE[T](i, parseNumber[T]) + case uint: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint8: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint16: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint32: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint64: + return toUnsignedNumberE[T](i, parseNumber[T]) + case float32: + return toNumberE[T](i, parseNumber[T]) + case float64: + return toNumberE[T](i, parseNumber[T]) + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +// ToNumber casts any value to a [Number] type. +func ToNumber[T Number](i any) T { + v, _ := ToNumberE[T](i) + + return v +} + +// toNumber's semantics differ from other "to" functions. +// It returns false as the second parameter if the conversion fails. +// This is to signal other callers that they should proceed with their own conversions. +func toNumber[T Number](i any) (T, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true + case int: + return T(s), true + case int8: + return T(s), true + case int16: + return T(s), true + case int32: + return T(s), true + case int64: + return T(s), true + case uint: + return T(s), true + case uint8: + return T(s), true + case uint16: + return T(s), true + case uint32: + return T(s), true + case uint64: + return T(s), true + case float32: + return T(s), true + case float64: + return T(s), true + case bool: + if s { + return 1, true + } + + return 0, true + case nil: + return 0, true + case time.Weekday: + return T(s), true + case time.Month: + return T(s), true + } + + return 0, false +} + +func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, ok := toNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(s.Float64()), nil + default: + if i, ok := resolveAlias(i); ok { + return toNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func toUnsignedNumber[T Number](i any) (T, bool, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true, true + case int: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int8: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int16: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case uint: + return T(s), true, true + case uint8: + return T(s), true, true + case uint16: + return T(s), true, true + case uint32: + return T(s), true, true + case uint64: + return T(s), true, true + case float32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case float64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case bool: + if s { + return 1, true, true + } + + return 0, true, true + case nil: + return 0, true, true + case time.Weekday: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case time.Month: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + } + + return 0, true, false +} + +func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, valid, ok := toUnsignedNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + if !valid { + return 0, errNegativeNotAllowed + } + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v := s.Float64() + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + default: + if i, ok := resolveAlias(i); ok { + return toUnsignedNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func parseNumber[T Number](s string) (T, error) { + var t T + + switch any(t).(type) { + case int: + v, err := parseInt[int](s) + + return T(v), err + case int8: + v, err := parseInt[int8](s) + + return T(v), err + case int16: + v, err := parseInt[int16](s) + + return T(v), err + case int32: + v, err := parseInt[int32](s) + + return T(v), err + case int64: + v, err := parseInt[int64](s) + + return T(v), err + case uint: + v, err := parseUint[uint](s) + + return T(v), err + case uint8: + v, err := parseUint[uint8](s) + + return T(v), err + case uint16: + v, err := parseUint[uint16](s) + + return T(v), err + case uint32: + v, err := parseUint[uint32](s) + + return T(v), err + case uint64: + v, err := parseUint[uint64](s) + + return T(v), err + case float32: + v, err := strconv.ParseFloat(s, 32) + + return T(v), err + case float64: + v, err := strconv.ParseFloat(s, 64) + + return T(v), err + + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +func parseInt[T integer](s string) (T, error) { + v, err := strconv.ParseInt(trimDecimal(s), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseUint[T unsigned](s string) (T, error) { + v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseFloat[T float](s string) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case float32: + n, e := strconv.ParseFloat(s, 32) + + v = float32(n) + err = e + case float64: + n, e := strconv.ParseFloat(s, 64) + + v = float64(n) + err = e + } + + return v.(T), err +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i any) (float64, error) { + return toNumberE[float64](i, parseFloat[float64]) +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i any) (float32, error) { + return toNumberE[float32](i, parseFloat[float32]) +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i any) (int64, error) { + return toNumberE[int64](i, parseInt[int64]) +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i any) (int32, error) { + return toNumberE[int32](i, parseInt[int32]) +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i any) (int16, error) { + return toNumberE[int16](i, parseInt[int16]) +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i any) (int8, error) { + return toNumberE[int8](i, parseInt[int8]) +} + +// ToIntE casts an interface to an int type. +func ToIntE(i any) (int, error) { + return toNumberE[int](i, parseInt[int]) +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i any) (uint, error) { + return toUnsignedNumberE[uint](i, parseUint[uint]) +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i any) (uint64, error) { + return toUnsignedNumberE[uint64](i, parseUint[uint64]) +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i any) (uint32, error) { + return toUnsignedNumberE[uint32](i, parseUint[uint32]) +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i any) (uint16, error) { + return toUnsignedNumberE[uint16](i, parseUint[uint16]) +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i any) (uint8, error) { + return toUnsignedNumberE[uint8](i, parseUint[uint8]) +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} + +var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`) + +// see [BenchmarkDecimal] for details about the implementation +func trimDecimal(s string) string { + if !strings.Contains(s, ".") { + return s + } + + matches := stringNumberRe.FindStringSubmatch(s) + if matches != nil { + // matches[1] is the captured integer part with sign + s = matches[1] + + // handle special cases + switch s { + case "-", "+": + s += "0" + case "": + s = "0" + } + + return s + } + + return s +} diff --git a/tools/vendor/github.com/spf13/cast/slice.go b/tools/vendor/github.com/spf13/cast/slice.go new file mode 100644 index 000000000..e6a8328c6 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/slice.go @@ -0,0 +1,106 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "fmt" + "reflect" + "strings" +) + +// ToSliceE casts any value to a []any type. +func ToSliceE(i any) ([]any, error) { + i, _ = indirect(i) + + var s []any + + switch v := i.(type) { + case []any: + // TODO: use slices.Clone + return append(s, v...), nil + case []map[string]any: + for _, u := range v { + s = append(s, u) + } + + return s, nil + default: + return s, fmt.Errorf(errorMsg, i, i, s) + } +} + +func toSliceE[T Basic](i any) ([]T, error) { + v, ok, err := toSliceEOk[T](i) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf(errorMsg, i, i, []T{}) + } + + return v, nil +} + +func toSliceEOk[T Basic](i any) ([]T, bool, error) { + i, _ = indirect(i) + if i == nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + switch v := i.(type) { + case []T: + // TODO: clone slice + return v, true, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]T, s.Len()) + + for j := 0; j < s.Len(); j++ { + val, err := ToE[T](s.Index(j).Interface()) + if err != nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + a[j] = val + } + + return a, true, nil + default: + return nil, false, nil + } +} + +// ToStringSliceE casts any value to a []string type. +func ToStringSliceE(i any) ([]string, error) { + if a, ok, err := toSliceEOk[string](i); ok { + if err != nil { + return nil, err + } + + return a, nil + } + + var a []string + + switch v := i.(type) { + case string: + return strings.Fields(v), nil + case any: + str, err := ToStringE(v) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, a) + } + + return []string{str}, nil + default: + return nil, fmt.Errorf(errorMsg, i, i, a) + } +} diff --git a/tools/vendor/github.com/spf13/cast/time.go b/tools/vendor/github.com/spf13/cast/time.go new file mode 100644 index 000000000..744cd5acc --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/time.go @@ -0,0 +1,116 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cast/internal" +) + +// ToTimeE any value to a [time.Time] type. +func ToTimeE(i any) (time.Time, error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) { + i, _ = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case json.Number: + // Originally this used ToInt64E, but adding string float conversion broke ToTime. + // the behavior of ToTime would have changed if we continued using it. + // For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility. + v = json.Number(trimZeroDecimal(string(v))) + s, err1 := v.Int64() + if err1 != nil { + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } + return time.Unix(s, 0), nil + case int: + return time.Unix(int64(v), 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case nil: + return time.Time{}, nil + default: + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } +} + +// ToDurationE casts any value to a [time.Duration] type. +func ToDurationE(i any) (time.Duration, error) { + i, _ = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + v, err := ToInt64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration")) + } + + return time.Duration(v), nil + case float32, float64, float64EProvider, float64Provider: + v, err := ToFloat64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration")) + } + + return time.Duration(v), nil + case string: + if !strings.ContainsAny(s, "nsuµmh") { + return time.ParseDuration(s + "ns") + } + + return time.ParseDuration(s) + case nil: + return time.Duration(0), nil + default: + if i, ok := resolveAlias(i); ok { + return ToDurationE(i) + } + + return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0)) + } +} + +// StringToDate attempts to parse a string into a [time.Time] type using a +// predefined list of formats. +// +// If no suitable format is found, an error is returned. +func StringToDate(s string) (time.Time, error) { + return internal.ParseDateWith(s, time.UTC, internal.TimeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return internal.ParseDateWith(s, location, internal.TimeFormats) +} diff --git a/tools/vendor/github.com/spf13/cast/timeformattype_string.go b/tools/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82c..000000000 --- a/tools/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/tools/vendor/github.com/spf13/cast/zz_generated.go b/tools/vendor/github.com/spf13/cast/zz_generated.go new file mode 100644 index 000000000..ce3ec0f78 --- /dev/null +++ b/tools/vendor/github.com/spf13/cast/zz_generated.go @@ -0,0 +1,261 @@ +// Code generated by cast generator. DO NOT EDIT. + +package cast + +import "time" + +// ToBool casts any value to a(n) bool type. +func ToBool(i any) bool { + v, _ := ToBoolE(i) + return v +} + +// ToString casts any value to a(n) string type. +func ToString(i any) string { + v, _ := ToStringE(i) + return v +} + +// ToTime casts any value to a(n) time.Time type. +func ToTime(i any) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToTimeInDefaultLocation casts any value to a(n) time.Time type. +func ToTimeInDefaultLocation(i any, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts any value to a(n) time.Duration type. +func ToDuration(i any) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToInt casts any value to a(n) int type. +func ToInt(i any) int { + v, _ := ToIntE(i) + return v +} + +// ToInt8 casts any value to a(n) int8 type. +func ToInt8(i any) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt16 casts any value to a(n) int16 type. +func ToInt16(i any) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt32 casts any value to a(n) int32 type. +func ToInt32(i any) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt64 casts any value to a(n) int64 type. +func ToInt64(i any) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToUint casts any value to a(n) uint type. +func ToUint(i any) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint8 casts any value to a(n) uint8 type. +func ToUint8(i any) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToUint16 casts any value to a(n) uint16 type. +func ToUint16(i any) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint32 casts any value to a(n) uint32 type. +func ToUint32(i any) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint64 casts any value to a(n) uint64 type. +func ToUint64(i any) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToFloat32 casts any value to a(n) float32 type. +func ToFloat32(i any) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToFloat64 casts any value to a(n) float64 type. +func ToFloat64(i any) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToStringMapString casts any value to a(n) map[string]string type. +func ToStringMapString(i any) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts any value to a(n) map[string][]string type. +func ToStringMapStringSlice(i any) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts any value to a(n) map[string]bool type. +func ToStringMapBool(i any) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts any value to a(n) map[string]int type. +func ToStringMapInt(i any) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts any value to a(n) map[string]int64 type. +func ToStringMapInt64(i any) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts any value to a(n) map[string]any type. +func ToStringMap(i any) map[string]any { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts any value to a(n) []any type. +func ToSlice(i any) []any { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts any value to a(n) []bool type. +func ToBoolSlice(i any) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts any value to a(n) []string type. +func ToStringSlice(i any) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts any value to a(n) []int type. +func ToIntSlice(i any) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToInt64Slice casts any value to a(n) []int64 type. +func ToInt64Slice(i any) []int64 { + v, _ := ToInt64SliceE(i) + return v +} + +// ToUintSlice casts any value to a(n) []uint type. +func ToUintSlice(i any) []uint { + v, _ := ToUintSliceE(i) + return v +} + +// ToFloat64Slice casts any value to a(n) []float64 type. +func ToFloat64Slice(i any) []float64 { + v, _ := ToFloat64SliceE(i) + return v +} + +// ToDurationSlice casts any value to a(n) []time.Duration type. +func ToDurationSlice(i any) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} + +// ToBoolSliceE casts any value to a(n) []bool type. +func ToBoolSliceE(i any) ([]bool, error) { + return toSliceE[bool](i) +} + +// ToDurationSliceE casts any value to a(n) []time.Duration type. +func ToDurationSliceE(i any) ([]time.Duration, error) { + return toSliceE[time.Duration](i) +} + +// ToIntSliceE casts any value to a(n) []int type. +func ToIntSliceE(i any) ([]int, error) { + return toSliceE[int](i) +} + +// ToInt8SliceE casts any value to a(n) []int8 type. +func ToInt8SliceE(i any) ([]int8, error) { + return toSliceE[int8](i) +} + +// ToInt16SliceE casts any value to a(n) []int16 type. +func ToInt16SliceE(i any) ([]int16, error) { + return toSliceE[int16](i) +} + +// ToInt32SliceE casts any value to a(n) []int32 type. +func ToInt32SliceE(i any) ([]int32, error) { + return toSliceE[int32](i) +} + +// ToInt64SliceE casts any value to a(n) []int64 type. +func ToInt64SliceE(i any) ([]int64, error) { + return toSliceE[int64](i) +} + +// ToUintSliceE casts any value to a(n) []uint type. +func ToUintSliceE(i any) ([]uint, error) { + return toSliceE[uint](i) +} + +// ToUint8SliceE casts any value to a(n) []uint8 type. +func ToUint8SliceE(i any) ([]uint8, error) { + return toSliceE[uint8](i) +} + +// ToUint16SliceE casts any value to a(n) []uint16 type. +func ToUint16SliceE(i any) ([]uint16, error) { + return toSliceE[uint16](i) +} + +// ToUint32SliceE casts any value to a(n) []uint32 type. +func ToUint32SliceE(i any) ([]uint32, error) { + return toSliceE[uint32](i) +} + +// ToUint64SliceE casts any value to a(n) []uint64 type. +func ToUint64SliceE(i any) ([]uint64, error) { + return toSliceE[uint64](i) +} + +// ToFloat32SliceE casts any value to a(n) []float32 type. +func ToFloat32SliceE(i any) ([]float32, error) { + return toSliceE[float32](i) +} + +// ToFloat64SliceE casts any value to a(n) []float64 type. +func ToFloat64SliceE(i any) ([]float64, error) { + return toSliceE[float64](i) +} diff --git a/tools/vendor/github.com/spf13/cobra/.golangci.yml b/tools/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c..6acf8ab1e 100644 --- a/tools/vendor/github.com/spf13/cobra/.golangci.yml +++ b/tools/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/tools/vendor/github.com/spf13/cobra/README.md b/tools/vendor/github.com/spf13/cobra/README.md index 71757151c..8416275f4 100644 --- a/tools/vendor/github.com/spf13/cobra/README.md +++ b/tools/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
    + +cobra-logo + +
    Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
    +
    + Supported by: +
    +
    + + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
    + +
    +
    # Overview diff --git a/tools/vendor/github.com/spf13/cobra/SECURITY.md b/tools/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 000000000..54e60c28c --- /dev/null +++ b/tools/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/tools/vendor/github.com/spf13/cobra/command.go b/tools/vendor/github.com/spf13/cobra/command.go index dbb2c298b..78088db69 100644 --- a/tools/vendor/github.com/spf13/cobra/command.go +++ b/tools/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/tools/vendor/github.com/spf13/cobra/completions.go b/tools/vendor/github.com/spf13/cobra/completions.go index a1752f763..d3607c2d2 100644 --- a/tools/vendor/github.com/spf13/cobra/completions.go +++ b/tools/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/tools/vendor/github.com/spf13/pflag/README.md b/tools/vendor/github.com/spf13/pflag/README.md index 7eacc5bdb..388c4e5ea 100644 --- a/tools/vendor/github.com/spf13/pflag/README.md +++ b/tools/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/tools/vendor/github.com/spf13/pflag/bool_func.go b/tools/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 000000000..83d77afa8 --- /dev/null +++ b/tools/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/tools/vendor/github.com/spf13/pflag/count.go b/tools/vendor/github.com/spf13/pflag/count.go index a0b2679f7..d49c0143c 100644 --- a/tools/vendor/github.com/spf13/pflag/count.go +++ b/tools/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/tools/vendor/github.com/spf13/pflag/errors.go b/tools/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 000000000..ff11b66be --- /dev/null +++ b/tools/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/tools/vendor/github.com/spf13/pflag/flag.go b/tools/vendor/github.com/spf13/pflag/flag.go index 7c058de37..2fd3c5759 100644 --- a/tools/vendor/github.com/spf13/pflag/flag.go +++ b/tools/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/tools/vendor/github.com/spf13/pflag/func.go b/tools/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 000000000..9f4d88f27 --- /dev/null +++ b/tools/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/tools/vendor/github.com/spf13/pflag/golangflag.go b/tools/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7f..e62eab538 100644 --- a/tools/vendor/github.com/spf13/pflag/golangflag.go +++ b/tools/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/tools/vendor/github.com/spf13/pflag/ipnet_slice.go b/tools/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa87..c6e89da18 100644 --- a/tools/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/tools/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/tools/vendor/github.com/spf13/pflag/string_to_string.go b/tools/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc..1d1e3bf91 100644 --- a/tools/vendor/github.com/spf13/pflag/string_to_string.go +++ b/tools/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/tools/vendor/github.com/spf13/pflag/text.go b/tools/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 000000000..886d5a3d8 --- /dev/null +++ b/tools/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/tools/vendor/github.com/spf13/pflag/time.go b/tools/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 000000000..3dee42479 --- /dev/null +++ b/tools/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/tools/vendor/github.com/spf13/viper/.editorconfig b/tools/vendor/github.com/spf13/viper/.editorconfig index 1f664d13a..faef0c91e 100644 --- a/tools/vendor/github.com/spf13/viper/.editorconfig +++ b/tools/vendor/github.com/spf13/viper/.editorconfig @@ -16,3 +16,6 @@ indent_style = tab [*.nix] indent_size = 2 + +[.golangci.yaml] +indent_size = 2 diff --git a/tools/vendor/github.com/spf13/viper/.golangci.yaml b/tools/vendor/github.com/spf13/viper/.golangci.yaml index 474f41633..bed0b83ec 100644 --- a/tools/vendor/github.com/spf13/viper/.golangci.yaml +++ b/tools/vendor/github.com/spf13/viper/.golangci.yaml @@ -1,105 +1,118 @@ -run: - timeout: 5m +version: "2" -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/viper) - gocritic: - # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. - enabled-tags: - - diagnostic - - experimental - - opinionated - - style - disabled-checks: - - importShadow - - unnamedResult - goimports: - local-prefixes: github.com/spf13/viper +run: + timeout: 5m linters: - disable-all: true - enable: - - bodyclose - - dogsled - - dupl - - durationcheck - - exhaustive - - gci - - gocritic - - godot - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - wastedassign - - whitespace + enable: + - bodyclose + - dogsled + - dupl + - durationcheck + - exhaustive + - gocritic + - godot + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - tparallel + - unconvert + - unparam + - unused + - wastedassign + - whitespace - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocyclo - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocyclo + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck - # unused - # - depguard - # - goheader - # - gomodguard + # unused + # - depguard + # - goheader + # - gomodguard - # deprecated - # - deadcode - # - structcheck - # - varcheck + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl + exclusions: + rules: + - linters: + - errcheck + - noctx + path: _test.go + presets: + - comments + - std-error-handling + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + gocritic: + # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. + enabled-tags: + - diagnostic + - experimental + - opinionated + - style + disabled-checks: + - importShadow + - unnamedResult + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/tools/vendor/github.com/spf13/viper/README.md b/tools/vendor/github.com/spf13/viper/README.md index 769a5d900..7a4c0fc30 100644 --- a/tools/vendor/github.com/spf13/viper/README.md +++ b/tools/vendor/github.com/spf13/viper/README.md @@ -12,7 +12,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -821,7 +821,7 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" + yaml "go.yaml.in/yaml/v3" // ... ) diff --git a/tools/vendor/github.com/spf13/viper/UPDATES.md b/tools/vendor/github.com/spf13/viper/UPGRADE.md similarity index 79% rename from tools/vendor/github.com/spf13/viper/UPDATES.md rename to tools/vendor/github.com/spf13/viper/UPGRADE.md index ccf413ed7..a33c965a4 100644 --- a/tools/vendor/github.com/spf13/viper/UPDATES.md +++ b/tools/vendor/github.com/spf13/viper/UPGRADE.md @@ -83,6 +83,27 @@ v := viper.NewWithOptions( ) ``` +### BREAKING: "github.com/mitchellh/mapstructure" depedency replaced + +The original [mapstructure](https://github.com/mitchellh/mapstructure) has been [archived](https://github.com/mitchellh/mapstructure/issues/349) and was replaced with a [fork](https://github.com/go-viper/mapstructure) maintained by Viper ([#1723](https://github.com/spf13/viper/pull/1723)). + +As a result, the package import path needs to be changed in cases where `mapstructure` is directly referenced in your code. + +For example, when providing a custom decoder config: + +```go +err := viper.Unmarshal(&appConfig, func(config *mapstructure.DecoderConfig) { + config.TagName = "yaml" +}) +``` + +The change is fairly straightforward, just replace all occurrences of the import path `github.com/mitchellh/mapstructure` with `github.com/go-viper/mapstructure/v2`: + +```diff +- import "github.com/mitchellh/mapstructure" ++ import "github.com/go-viper/mapstructure/v2" +``` + ### BREAKING: HCL, Java properties, INI removed from core In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: diff --git a/tools/vendor/github.com/spf13/viper/flake.lock b/tools/vendor/github.com/spf13/viper/flake.lock index d76dfbddd..0b8cfb5a8 100644 --- a/tools/vendor/github.com/spf13/viper/flake.lock +++ b/tools/vendor/github.com/spf13/viper/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1724763216, - "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", - "owner": "cachix", - "repo": "devenv", - "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1755257397, + "narHash": "sha256-VU+OHexL2y6y7yrpEc6bZvYYwoQg6aZK1b4YxT0yZCk=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "6f9c3d4722aa253631644329f7bda60b1d3d1b97", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1722555600, - "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1754487366, + "narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1755029779, + "narHash": "sha256-3+GHIYGg4U9XKUN4rg473frIVNn8YD06bjwxKS1IPrU=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "b0972b0eee6726081d10b1199f54de6d2917f861", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1722555339, - "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1724748588, - "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1753579242, + "narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1755268003, + "narHash": "sha256-nNaeJjo861wFR0tjHDyCnHs1rbRtrMgxAKMoig9Sj/w=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "32f313e49e42f715491e1ea7b306a87c16fe0388", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/tools/vendor/github.com/spf13/viper/flake.nix b/tools/vendor/github.com/spf13/viper/flake.nix index 52ad7d581..a16b2e3a7 100644 --- a/tools/vendor/github.com/spf13/viper/flake.nix +++ b/tools/vendor/github.com/spf13/viper/flake.nix @@ -7,51 +7,55 @@ devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.go_1_23; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - yamllint.enable = true; - }; - - packages = with pkgs; [ - gnumake - - golangci-lint - yamllint - ]; + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; - scripts = { - versions.exec = '' - go version - golangci-lint version + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + git-hooks.hooks = { + nixpkgs-fmt.enable = true; + yamllint.enable = true; + }; + + packages = with pkgs; [ + gnumake + + golangci-lint + yamllint + ]; + + scripts = { + versions.exec = '' + go version + golangci-lint version + ''; + }; + + enterShell = '' + versions ''; - }; - - enterShell = '' - versions - ''; - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; }; - - ci = devenv.shells.default; }; - }; }; } diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 036879249..a7a839fd9 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,6 +1,6 @@ package yaml -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} diff --git a/tools/vendor/github.com/spf13/viper/remote.go b/tools/vendor/github.com/spf13/viper/remote.go index bdde7de26..46f26721d 100644 --- a/tools/vendor/github.com/spf13/viper/remote.go +++ b/tools/vendor/github.com/spf13/viper/remote.go @@ -219,7 +219,10 @@ func (v *Viper) watchKeyValueConfigOnChannel() error { for { b := <-rc reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) + err := v.unmarshalReader(reader, v.kvstore) + if err != nil { + v.logger.Error(fmt.Errorf("failed to unmarshal remote config: %w", err).Error()) + } } }(respc) return nil diff --git a/tools/vendor/github.com/spf13/viper/util.go b/tools/vendor/github.com/spf13/viper/util.go index 2a08074bc..d08ed4621 100644 --- a/tools/vendor/github.com/spf13/viper/util.go +++ b/tools/vendor/github.com/spf13/viper/util.go @@ -174,10 +174,7 @@ func parseSizeInBytes(sizeStr string) uint { } } - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } + size := max(cast.ToInt(sizeStr), 0) return safeMul(uint(size), multiplier) } diff --git a/tools/vendor/github.com/spf13/viper/viper.go b/tools/vendor/github.com/spf13/viper/viper.go index f900e58b1..34a94798b 100644 --- a/tools/vendor/github.com/spf13/viper/viper.go +++ b/tools/vendor/github.com/spf13/viper/viper.go @@ -376,7 +376,12 @@ func (v *Viper) WatchConfig() { } } }() - watcher.Add(configDir) + err = watcher.Add(configDir) + if err != nil { + v.logger.Error(fmt.Sprintf("failed to add watcher: %s", err)) + initWG.Done() + return + } initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... eventsWG.Wait() // now, wait for event loop to end in this go-routine... }() @@ -1181,11 +1186,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "durationSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") @@ -1268,11 +1288,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "stringToString": return stringToStringConv(flag.ValueString()) case "stringToInt": @@ -1535,27 +1570,29 @@ func (v *Viper) MergeInConfig() error { func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { - if v.configType == "" { - return errors.New("cannot decode configuration: config type is not set") + config := make(map[string]any) + + err := v.unmarshalReader(in, config) + if err != nil { + return err } - v.config = make(map[string]any) - return v.unmarshalReader(in, v.config) + v.config = config + + return nil } // MergeConfig merges a new configuration with an existing config. func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } func (v *Viper) MergeConfig(in io.Reader) error { - if v.configType == "" { - return errors.New("cannot decode configuration: config type is not set") - } + config := make(map[string]any) - cfg := make(map[string]any) - if err := v.unmarshalReader(in, cfg); err != nil { + if err := v.unmarshalReader(in, config); err != nil { return err } - return v.MergeConfigMap(cfg) + + return v.MergeConfigMap(config) } // MergeConfigMap merges the configuration from the map given with an existing config. @@ -1662,15 +1699,24 @@ func (v *Viper) writeConfig(filename string, force bool) error { } func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - format := strings.ToLower(v.getConfigType()) + if format == "" { + return errors.New("cannot decode configuration: unable to determine config type") + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(in) + if err != nil { + return fmt.Errorf("failed to read configuration from input: %w", err) + } + // TODO: remove this once SupportedExts is deprecated/removed if !slices.Contains(SupportedExts, format) { return UnsupportedConfigError(format) } + // TODO: return [UnsupportedConfigError] if the registry does not contain the format + // TODO: consider deprecating this error type decoder, err := v.decoderRegistry.Decoder(format) if err != nil { return ConfigParseError{err} diff --git a/tools/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore b/tools/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore new file mode 100644 index 000000000..1823f5d48 --- /dev/null +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore @@ -0,0 +1,2 @@ +*~ +pkcs11uri diff --git a/tools/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml b/tools/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml new file mode 100644 index 000000000..45c00cb9c --- /dev/null +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml @@ -0,0 +1,25 @@ +dist: bionic +language: go + +os: +- linux + +go: + - "1.19.x" + +matrix: + include: + - os: linux + +addons: + apt: + packages: + - softhsm2 + +install: + - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.2 + +script: + - make + - make check + - make test diff --git a/tools/vendor/github.com/docker/go-metrics/LICENSE b/tools/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE similarity index 94% rename from tools/vendor/github.com/docker/go-metrics/LICENSE rename to tools/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE index 8f3fee627..49cc83d2e 100644 --- a/tools/vendor/github.com/docker/go-metrics/LICENSE +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE @@ -175,17 +175,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/vendor/github.com/stefanberger/go-pkcs11uri/Makefile b/tools/vendor/github.com/stefanberger/go-pkcs11uri/Makefile new file mode 100644 index 000000000..1a1051524 --- /dev/null +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/Makefile @@ -0,0 +1,28 @@ +# Copyright IBM Corporation, 2020 + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build test + +all: build + +FORCE: + +check: + golangci-lint run + +build: + go build ./... + +test: + go test ./... -test.v diff --git a/tools/vendor/github.com/stefanberger/go-pkcs11uri/README.md b/tools/vendor/github.com/stefanberger/go-pkcs11uri/README.md new file mode 100644 index 000000000..c1fc6e911 --- /dev/null +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/README.md @@ -0,0 +1,102 @@ +# go-pkcs11uri + +Welcome to the go-pkcs11uri library. The implementation follows [RFC 7512](https://tools.ietf.org/html/rfc7512) and this [errata](https://www.rfc-editor.org/errata/rfc7512). + +# Exampe usage: + +The following example builds on this library [here](https://github.com/miekg/pkcs11) and are using softhsm2 on Fedora. + +## Example + +This example program extending the one found [here](https://github.com/miekg/pkcs11/blob/master/README.md#examples): + +``` +package main + +import ( + "fmt" + "os" + "strconv" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +func main() { + if len(os.Args) < 2 { + panic("Missing pkcs11 URI argument") + } + uristr := os.Args[1] + + uri, err := pkcs11uri.New() + if err != nil { + panic(err) + } + err = uri.Parse(uristr) + if err != nil { + panic(err) + } + + module, err := uri.GetModule() + if err != nil { + panic(err) + } + + slot, ok := uri.GetPathAttribute("slot-id", false) + if !ok { + panic("No slot-id in pkcs11 URI") + } + slotid, err := strconv.Atoi(slot) + if err != nil { + panic(err) + } + + pin, err := uri.GetPIN() + if err != nil { + panic(err) + } + + p := pkcs11.New(module) + err = p.Initialize() + if err != nil { + panic(err) + } + + defer p.Destroy() + defer p.Finalize() + + session, err := p.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + panic(err) + } + defer p.CloseSession(session) + + err = p.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + panic(err) + } + defer p.Logout(session) + + p.DigestInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_SHA_1, nil)}) + hash, err := p.Digest(session, []byte("this is a string")) + if err != nil { + panic(err) + } + + for _, d := range hash { + fmt.Printf("%x", d) + } + fmt.Println() +} +``` + +## Exampe Usage + +``` +$ sudo softhsm2-util --init-token --slot 1 --label test --pin 1234 --so-pin 1234 +The token has been initialized and is reassigned to slot 2053753261 +$ go build ./... +$ sudo ./pkcs11-example 'pkcs11:slot-id=2053753261?module-path=/usr/lib64/pkcs11/libsofthsm2.so&pin-value=1234' +517592df8fec3ad146a79a9af153db2a4d784ec5 +``` + diff --git a/tools/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go b/tools/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go new file mode 100644 index 000000000..82c32e3c8 --- /dev/null +++ b/tools/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go @@ -0,0 +1,484 @@ +/* + (c) Copyright IBM Corporation, 2020 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11uri + +import ( + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +// Pkcs11URI holds a pkcs11 URI object +type Pkcs11URI struct { + // path and query attributes may have custom attributes that either + // have to be in the query or in the path part, so we use two maps + pathAttributes map[string]string + queryAttributes map[string]string + // directories to search for pkcs11 modules + moduleDirectories []string + // file paths of allowed pkcs11 modules + allowedModulePaths []string + // whether any module is allowed to be loaded + allowAnyModule bool + // A map of environment variables needed by the pkcs11 module using this URI. + // This map is not needed by this implementation but is there for convenience. + env map[string]string +} + +// upper character hex digits needed for pct-encoding +const hex = "0123456789ABCDEF" + +// escapeAll pct-escapes all characters in the string +func escapeAll(s string) string { + res := make([]byte, len(s)*3) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + res[j] = '%' + res[j+1] = hex[c>>4] + res[j+2] = hex[c&0xf] + j += 3 + } + return string(res) +} + +// escape pct-escapes the path and query part of the pkcs11 URI following the different rules of the +// path and query part as decribed in RFC 7512 sec. 2.3 +func escape(s string, isPath bool) string { + res := make([]byte, len(s)*3) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + // unreserved per RFC 3986 sec. 2.3 + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') { + res[j] = c + } else if isPath && c == '&' { + res[j] = c + } else if !isPath && (c == '/' || c == '?' || c == '|') { + res[j] = c + } else { + switch c { + case '-', '.', '_', '~': // unreserved per RFC 3986 sec. 2.3 + res[j] = c + case ':', '[', ']', '@', '!', '$', '\'', '(', ')', '*', '+', ',', '=': + res[j] = c + default: + res[j] = '%' + res[j+1] = hex[c>>4] + res[j+2] = hex[c&0xf] + j += 2 + } + } + j++ + } + return string(res[:j]) +} + +// New creates a new Pkcs11URI object +func New() *Pkcs11URI { + return &Pkcs11URI{ + pathAttributes: make(map[string]string), + queryAttributes: make(map[string]string), + env: make(map[string]string), + } +} + +func (uri *Pkcs11URI) setAttribute(attrMap map[string]string, name, value string) error { + v, err := url.PathUnescape(value) + if err != nil { + return err + } + attrMap[name] = v + return nil +} + +// GetPathAttribute returns the value of a path attribute in unescaped form or +// pct-encoded form +func (uri *Pkcs11URI) GetPathAttribute(name string, pctencode bool) (string, bool) { + v, ok := uri.pathAttributes[name] + if ok && pctencode { + v = escape(v, true) + } + return v, ok +} + +// SetPathAttribute sets the value for a path attribute; this function may return an error +// if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) SetPathAttribute(name, value string) error { + return uri.setAttribute(uri.pathAttributes, name, value) +} + +// SetPathAttributeUnencoded sets the value for a path attribute given as byte[]. +// The value must not have been pct-encoded already. +func (uri *Pkcs11URI) SetPathAttributeUnencoded(name string, value []byte) { + uri.pathAttributes[name] = string(value) +} + +// AddPathAttribute adds a path attribute; it returns an error if an attribute with the same +// name already existed or if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) AddPathAttribute(name, value string) error { + if _, ok := uri.pathAttributes[name]; ok { + return errors.New("duplicate path attribute") + } + return uri.SetPathAttribute(name, value) +} + +// AddPathAttributeUnencoded adds a path attribute given as byte[] which must not already be pct-encoded; +// it returns an error if an attribute with the same name already existed +func (uri *Pkcs11URI) AddPathAttributeUnencoded(name string, value []byte) error { + if _, ok := uri.pathAttributes[name]; ok { + return errors.New("duplicate path attribute") + } + uri.SetPathAttributeUnencoded(name, value) + return nil +} + +// RemovePathAttribute removes a path attribute +func (uri *Pkcs11URI) RemovePathAttribute(name string) { + delete(uri.pathAttributes, name) +} + +// AddEnv adds an environment variable for the pkcs11 module +func (uri *Pkcs11URI) AddEnv(name, value string) { + uri.env[name] = value +} + +// SetEnvMap sets the environment variables for the pkcs11 module +func (uri *Pkcs11URI) SetEnvMap(env map[string]string) { + uri.env = env +} + +// GetEnvMap returns the map of environment variables +func (uri *Pkcs11URI) GetEnvMap() map[string]string { + return uri.env +} + +// GetQueryAttribute returns the value of a query attribute in unescaped or +// pct-encoded form +func (uri *Pkcs11URI) GetQueryAttribute(name string, pctencode bool) (string, bool) { + v, ok := uri.queryAttributes[name] + if ok && pctencode { + v = escape(v, false) + } + return v, ok +} + +// SetQueryAttribute sets the value for a query attribute; this function may return an error +// if the given value cannot pct-unescaped +func (uri *Pkcs11URI) SetQueryAttribute(name, value string) error { + return uri.setAttribute(uri.queryAttributes, name, value) +} + +// SetQueryAttributeUnencoded sets the value for a quiery attribute given as byte[]. +// The value must not have been pct-encoded already. +func (uri *Pkcs11URI) SetQueryAttributeUnencoded(name string, value []byte) { + uri.queryAttributes[name] = string(value) +} + +// AddQueryAttribute adds a query attribute; it returns an error if an attribute with the same +// name already existed or if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) AddQueryAttribute(name, value string) error { + if _, ok := uri.queryAttributes[name]; ok { + return errors.New("duplicate query attribute") + } + return uri.SetQueryAttribute(name, value) +} + +// AddQueryAttributeUnencoded adds a query attribute given as byte[] which must not already be pct-encoded; +// it returns an error if an attribute with the same name already existed +func (uri *Pkcs11URI) AddQueryAttributeUnencoded(name string, value []byte) error { + if _, ok := uri.queryAttributes[name]; ok { + return errors.New("duplicate query attribute") + } + uri.SetQueryAttributeUnencoded(name, value) + return nil +} + +// RemoveQueryAttribute removes a path attribute +func (uri *Pkcs11URI) RemoveQueryAttribute(name string) { + delete(uri.queryAttributes, name) +} + +// Validate validates a Pkcs11URI object's attributes following RFC 7512 rules and proper formatting of +// their values +func (uri *Pkcs11URI) Validate() error { + /* RFC 7512: 2.3 */ + /* slot-id should be DIGIT, but we go for number */ + if v, ok := uri.pathAttributes["slot-id"]; ok { + if _, err := strconv.Atoi(v); err != nil { + return fmt.Errorf("slot-id must be a number: %s", v) + } + } + + /* library-version should 1*DIGIT [ "." 1 *DIGIT ]; allow NUMBERS for DIGIT */ + if v, ok := uri.pathAttributes["library-version"]; ok { + m, err := regexp.Match("^[0-9]+(\\.[0-9]+)?$", []byte(v)) + if err != nil || !m { + return fmt.Errorf("Invalid format for library-version '%s'", v) + } + } + + if v, ok := uri.pathAttributes["type"]; ok { + m, err := regexp.Match("^(public|private|cert|secret-key}data)?$", []byte(v)) + if err != nil || !m { + return fmt.Errorf("Invalid type '%s'", v) + } + } + + /* RFC 7512: 2.4 */ + _, ok1 := uri.queryAttributes["pin-source"] + _, ok2 := uri.queryAttributes["pin-value"] + if ok1 && ok2 { + return errors.New("URI must not contain pin-source and pin-value") + } + + if v, ok := uri.queryAttributes["module-path"]; ok { + if !filepath.IsAbs(v) { + return fmt.Errorf("path %s of module-name attribute must be absolute", v) + } + } + + return nil +} + +// HasPIN allows the user to check whether a PIN has been provided either by the pin-value or the pin-source +// attributes. It should be called before GetPIN(), which may still fail getting the PIN from a file for example. +func (uri *Pkcs11URI) HasPIN() bool { + _, ok := uri.queryAttributes["pin-value"] + if ok { + return true + } + _, ok = uri.queryAttributes["pin-source"] + return ok +} + +// GetPIN gets the PIN from either the pin-value or pin-source attribute; a user may want to call HasPIN() +// before calling this function to determine whether a PIN has been provided at all so that an error code +// returned by this function indicates that the PIN value could not be retrieved. +func (uri *Pkcs11URI) GetPIN() (string, error) { + if v, ok := uri.queryAttributes["pin-value"]; ok { + return v, nil + } + if v, ok := uri.queryAttributes["pin-source"]; ok { + pinuri, err := url.ParseRequestURI(v) + if err != nil { + return "", fmt.Errorf("Could not parse pin-source: %s ", err) + } + switch pinuri.Scheme { + case "", "file": + if !filepath.IsAbs(pinuri.Path) { + return "", fmt.Errorf("PIN URI path '%s' is not absolute", pinuri.Path) + } + pin, err := os.ReadFile(pinuri.Path) + if err != nil { + return "", fmt.Errorf("Could not open PIN file: %s", err) + } + return string(pin), nil + default: + return "", fmt.Errorf("PIN URI scheme %s is not supported", pinuri.Scheme) + } + } + return "", fmt.Errorf("Neither pin-source nor pin-value are available") +} + +// Parse parses a pkcs11: URI string +func (uri *Pkcs11URI) Parse(uristring string) error { + if !strings.HasPrefix(uristring, "pkcs11:") { + return errors.New("Malformed pkcs11 URI: missing pcks11: prefix") + } + + parts := strings.SplitN(uristring[7:], "?", 2) + + uri.pathAttributes = make(map[string]string) + uri.queryAttributes = make(map[string]string) + + if len(parts[0]) > 0 { + /* parse path part */ + for _, part := range strings.Split(parts[0], ";") { + p := strings.SplitN(part, "=", 2) + if len(p) != 2 { + return errors.New("Malformed pkcs11 URI: malformed path attribute") + } + if err := uri.AddPathAttribute(p[0], p[1]); err != nil { + return fmt.Errorf("Malformed pkcs11 URI: %s", err) + } + } + } + + if len(parts) == 2 { + /* parse query part */ + for _, part := range strings.Split(parts[1], "&") { + p := strings.SplitN(part, "=", 2) + if len(p) != 2 { + return errors.New("Malformed pkcs11 URI: malformed query attribute") + } + if err := uri.AddQueryAttribute(p[0], p[1]); err != nil { + return fmt.Errorf("Malformed pkcs11 URI: %s", err) + } + } + } + return uri.Validate() +} + +// formatAttribute formats attributes and escapes their values as needed +func formatAttributes(attrMap map[string]string, ispath bool) string { + res := "" + for key, value := range attrMap { + switch key { + case "id": + /* id is always pct-encoded */ + value = escapeAll(value) + default: + if ispath { + value = escape(value, true) + } else { + value = escape(value, false) + } + } + if len(res) > 0 { + if ispath { + res += ";" + } else { + res += "&" + } + } + res += key + "=" + value + } + return res +} + +// Format formats a Pkcs11URI to it string representaion +func (uri *Pkcs11URI) Format() (string, error) { + if err := uri.Validate(); err != nil { + return "", err + } + result := "pkcs11:" + formatAttributes(uri.pathAttributes, true) + if len(uri.queryAttributes) > 0 { + result += "?" + formatAttributes(uri.queryAttributes, false) + } + return result, nil +} + +// SetModuleDirectories sets the search directories for pkcs11 modules +func (uri *Pkcs11URI) SetModuleDirectories(moduleDirectories []string) { + uri.moduleDirectories = moduleDirectories +} + +// GetModuleDirectories gets the search directories for pkcs11 modules +func (uri *Pkcs11URI) GetModuleDirectories() []string { + return uri.moduleDirectories +} + +// SetAllowedModulePaths sets allowed module paths to restrict access to modules. +// Directory entries must end with a '/', all other ones are assumed to be file entries. +// Allowed modules are filtered by string matching. +func (uri *Pkcs11URI) SetAllowedModulePaths(allowedModulePaths []string) { + uri.allowedModulePaths = allowedModulePaths +} + +// SetAllowAnyModule allows any module to be loaded; by default this is not allowed +func (uri *Pkcs11URI) SetAllowAnyModule(allowAnyModule bool) { + uri.allowAnyModule = allowAnyModule +} + +func (uri *Pkcs11URI) isAllowedPath(path string, allowedPaths []string) bool { + if uri.allowAnyModule { + return true + } + for _, allowedPath := range allowedPaths { + if allowedPath == path { + // exact filename match + return true + } + if allowedPath[len(allowedPath)-1] == '/' && strings.HasPrefix(path, allowedPath) { + // allowedPath no subdirectory is allowed + idx := strings.IndexRune(path[len(allowedPath):], os.PathSeparator) + if idx < 0 { + return true + } + } + } + return false +} + +// GetModule returns the module to use or an error in case no module could be found. +// First the module-path is checked for whether it holds an absolute that can be read +// by the current user. If this is the case the module is returned. Otherwise either the module-path +// is used or the user-provided module path is used to match a module containing what is set in the +// attribute module-name. +func (uri *Pkcs11URI) GetModule() (string, error) { + var searchdirs []string + v, ok := uri.queryAttributes["module-path"] + + if ok { + info, err := os.Stat(v) + if err != nil { + return "", fmt.Errorf("module-path '%s' is not accessible", v) + } + if err == nil && info.Mode().IsRegular() { + // it's a file + if uri.isAllowedPath(v, uri.allowedModulePaths) { + return v, nil + } + return "", fmt.Errorf("module-path '%s' is not allowed by policy", v) + } + if !info.IsDir() { + return "", fmt.Errorf("module-path '%s' points to an invalid file type", v) + } + // v is a directory + searchdirs = []string{v} + } else { + searchdirs = uri.GetModuleDirectories() + } + + moduleName, ok := uri.queryAttributes["module-name"] + if !ok { + return "", fmt.Errorf("module-name attribute is not set") + } + moduleName = strings.ToLower(moduleName) + + for _, dir := range searchdirs { + files, err := os.ReadDir(dir) + if err != nil { + continue + } + for _, file := range files { + fileLower := strings.ToLower(file.Name()) + + i := strings.Index(fileLower, moduleName) + if i < 0 { + continue + } + // we require that the fileLower ends with moduleName or that + // a suffix follows so that softhsm will not match libsofthsm2.so but only + // libsofthsm.so + if len(fileLower) == i+len(moduleName) || fileLower[i+len(moduleName)] == '.' { + f := filepath.Join(dir, file.Name()) + if uri.isAllowedPath(f, uri.allowedModulePaths) { + return f, nil + } + return "", fmt.Errorf("module '%s' is not allowed by policy", f) + } + } + } + return "", fmt.Errorf("No module could be found") +} diff --git a/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml b/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml index 7f98d55c4..0e75d86ae 100644 --- a/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ b/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml @@ -1,26 +1,19 @@ -run: - deadline: 10m +version: "2" linters: enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - interfacer - - lll - - maligned - - misspell - - prealloc - - stylecheck - - unconvert - - unparam - - errcheck - - golint - - gofmt - disable: [] - fast: false + - dupl + - goconst + - gocyclo + - godox + - gosec + - lll + - misspell + - prealloc + - staticcheck + - unconvert + - unparam -issues: - exclude-use-default: false +formatters: + enable: + - gofmt diff --git a/tools/vendor/github.com/stoewer/go-strcase/camel.go b/tools/vendor/github.com/stoewer/go-strcase/camel.go index ff9e66e0c..7a9bec7c1 100644 --- a/tools/vendor/github.com/stoewer/go-strcase/camel.go +++ b/tools/vendor/github.com/stoewer/go-strcase/camel.go @@ -30,6 +30,9 @@ func camelCase(s string, upper bool) string { } else if isUpper(prev) && isUpper(curr) && isLower(next) { // Assume a case like "R" for "XRequestId" buffer = append(buffer, curr) + } else if isUpper(curr) && isDigit(prev) { + // Preserve uppercase letters after numbers + buffer = append(buffer, curr) } else { buffer = append(buffer, toLower(curr)) } diff --git a/tools/vendor/github.com/stoewer/go-strcase/helper.go b/tools/vendor/github.com/stoewer/go-strcase/helper.go index ecad58914..96e79d6e1 100644 --- a/tools/vendor/github.com/stoewer/go-strcase/helper.go +++ b/tools/vendor/github.com/stoewer/go-strcase/helper.go @@ -38,6 +38,12 @@ func isSpace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } +// isDigit checks if a character is a digit. More precisely it evaluates if it is +// in the range of ASCII characters '0' to '9'. +func isDigit(ch rune) bool { + return ch >= '0' && ch <= '9' +} + // isDelimiter checks if a character is some kind of whitespace or '_' or '-'. func isDelimiter(ch rune) bool { return ch == '-' || ch == '_' || isSpace(ch) diff --git a/tools/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/tools/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba09..ffb24e8e3 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/tools/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/tools/vendor/github.com/stretchr/testify/assert/assertion_format.go b/tools/vendor/github.com/stretchr/testify/assert/assertion_format.go index 190634165..c592f6ad5 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/tools/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/tools/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/tools/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087b..58db92845 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/tools/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/tools/vendor/github.com/stretchr/testify/assert/assertion_order.go b/tools/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824..2fdf80fdd 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/tools/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/tools/vendor/github.com/stretchr/testify/assert/assertions.go b/tools/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb..de8de0cb6 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/tools/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/tools/vendor/github.com/stretchr/testify/assert/doc.go b/tools/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d3..a0b953aa5 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/doc.go +++ b/tools/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/tools/vendor/github.com/stretchr/testify/assert/http_assertions.go b/tools/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7c..5a6bb75f2 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/tools/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7..5a74c4f4d 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64..0bae80e34 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe6..8041803fd 100644 --- a/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/tools/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/tools/vendor/github.com/stretchr/testify/require/doc.go b/tools/vendor/github.com/stretchr/testify/require/doc.go index 968434724..c8e3f94a8 100644 --- a/tools/vendor/github.com/stretchr/testify/require/doc.go +++ b/tools/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/tools/vendor/github.com/stretchr/testify/require/require.go b/tools/vendor/github.com/stretchr/testify/require/require.go index d8921950d..2d02f9bce 100644 --- a/tools/vendor/github.com/stretchr/testify/require/require.go +++ b/tools/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/tools/vendor/github.com/stretchr/testify/require/require_forward.go b/tools/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f..e6f7e9446 100644 --- a/tools/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/tools/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/capability.go b/tools/vendor/github.com/syndtr/gocapability/capability/capability.go deleted file mode 100644 index 61a90775e..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/capability.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package capability provides utilities for manipulating POSIX capabilities. -package capability - -type Capabilities interface { - // Get check whether a capability present in the given - // capabilities set. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Get(which CapType, what Cap) bool - - // Empty check whether all capability bits of the given capabilities - // set are zero. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Empty(which CapType) bool - - // Full check whether all capability bits of the given capabilities - // set are one. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Full(which CapType) bool - - // Set sets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Set(which CapType, caps ...Cap) - - // Unset unsets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Unset(which CapType, caps ...Cap) - - // Fill sets all bits of the given capabilities kind to one. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Fill(kind CapType) - - // Clear sets all bits of the given capabilities kind to zero. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Clear(kind CapType) - - // String return current capabilities state of the given capabilities - // set as string. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE BOUNDING or AMBIENT - StringCap(which CapType) string - - // String return current capabilities state as string. - String() string - - // Load load actual capabilities value. This will overwrite all - // outstanding changes. - Load() error - - // Apply apply the capabilities settings, so all changes will take - // effect. - Apply(kind CapType) error -} - -// NewPid initializes a new Capabilities object for given pid when -// it is nonzero, or for the current process if pid is 0. -// -// Deprecated: Replace with NewPid2. For example, replace: -// -// c, err := NewPid(0) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewPid2(0) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewPid(pid int) (Capabilities, error) { - c, err := newPid(pid) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewPid2 initializes a new Capabilities object for given pid when -// it is nonzero, or for the current process if pid is 0. This -// does not load the process's current capabilities; to do that you -// must call Load explicitly. -func NewPid2(pid int) (Capabilities, error) { - return newPid(pid) -} - -// NewFile initializes a new Capabilities object for given file path. -// -// Deprecated: Replace with NewFile2. For example, replace: -// -// c, err := NewFile(path) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewFile2(path) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewFile(path string) (Capabilities, error) { - c, err := newFile(path) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewFile2 creates a new initialized Capabilities object for given -// file path. This does not load the process's current capabilities; -// to do that you must call Load explicitly. -func NewFile2(path string) (Capabilities, error) { - return newFile(path) -} diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/tools/vendor/github.com/syndtr/gocapability/capability/capability_linux.go deleted file mode 100644 index 1567dc810..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/capability_linux.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strings" - "syscall" -) - -var errUnknownVers = errors.New("unknown capability version") - -const ( - linuxCapVer1 = 0x19980330 - linuxCapVer2 = 0x20071026 - linuxCapVer3 = 0x20080522 -) - -var ( - capVers uint32 - capLastCap Cap -) - -func init() { - var hdr capHeader - capget(&hdr, nil) - capVers = hdr.version - - if initLastCap() == nil { - CAP_LAST_CAP = capLastCap - if capLastCap > 31 { - capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1 - } else { - capUpperMask = 0 - } - } -} - -func initLastCap() error { - if capLastCap != 0 { - return nil - } - - f, err := os.Open("/proc/sys/kernel/cap_last_cap") - if err != nil { - return err - } - defer f.Close() - - var b []byte = make([]byte, 11) - _, err = f.Read(b) - if err != nil { - return err - } - - fmt.Sscanf(string(b), "%d", &capLastCap) - - return nil -} - -func mkStringCap(c Capabilities, which CapType) (ret string) { - for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ { - if !c.Get(which, i) { - continue - } - if first { - first = false - } else { - ret += ", " - } - ret += i.String() - } - return -} - -func mkString(c Capabilities, max CapType) (ret string) { - ret = "{" - for i := CapType(1); i <= max; i <<= 1 { - ret += " " + i.String() + "=\"" - if c.Empty(i) { - ret += "empty" - } else if c.Full(i) { - ret += "full" - } else { - ret += c.StringCap(i) - } - ret += "\"" - } - ret += " }" - return -} - -func newPid(pid int) (c Capabilities, err error) { - switch capVers { - case linuxCapVer1: - p := new(capsV1) - p.hdr.version = capVers - p.hdr.pid = int32(pid) - c = p - case linuxCapVer2, linuxCapVer3: - p := new(capsV3) - p.hdr.version = capVers - p.hdr.pid = int32(pid) - c = p - default: - err = errUnknownVers - return - } - return -} - -type capsV1 struct { - hdr capHeader - data capData -} - -func (c *capsV1) Get(which CapType, what Cap) bool { - if what > 32 { - return false - } - - switch which { - case EFFECTIVE: - return (1< 32 { - continue - } - - if which&EFFECTIVE != 0 { - c.data.effective |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data.permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data.inheritable |= 1 << uint(what) - } - } -} - -func (c *capsV1) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - if what > 32 { - continue - } - - if which&EFFECTIVE != 0 { - c.data.effective &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data.permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data.inheritable &= ^(1 << uint(what)) - } - } -} - -func (c *capsV1) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective = 0x7fffffff - c.data.permitted = 0x7fffffff - c.data.inheritable = 0 - } -} - -func (c *capsV1) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective = 0 - c.data.permitted = 0 - c.data.inheritable = 0 - } -} - -func (c *capsV1) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsV1) String() (ret string) { - return mkString(c, BOUNDING) -} - -func (c *capsV1) Load() (err error) { - return capget(&c.hdr, &c.data) -} - -func (c *capsV1) Apply(kind CapType) error { - if kind&CAPS == CAPS { - return capset(&c.hdr, &c.data) - } - return nil -} - -type capsV3 struct { - hdr capHeader - data [2]capData - bounds [2]uint32 - ambient [2]uint32 -} - -func (c *capsV3) Get(which CapType, what Cap) bool { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable |= 1 << uint(what) - } - if which&BOUNDING != 0 { - c.bounds[i] |= 1 << uint(what) - } - if which&AMBIENT != 0 { - c.ambient[i] |= 1 << uint(what) - } - } -} - -func (c *capsV3) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable &= ^(1 << uint(what)) - } - if which&BOUNDING != 0 { - c.bounds[i] &= ^(1 << uint(what)) - } - if which&AMBIENT != 0 { - c.ambient[i] &= ^(1 << uint(what)) - } - } -} - -func (c *capsV3) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0xffffffff - c.data[0].permitted = 0xffffffff - c.data[0].inheritable = 0 - c.data[1].effective = 0xffffffff - c.data[1].permitted = 0xffffffff - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0xffffffff - c.bounds[1] = 0xffffffff - } - if kind&AMBS == AMBS { - c.ambient[0] = 0xffffffff - c.ambient[1] = 0xffffffff - } -} - -func (c *capsV3) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0 - c.data[0].permitted = 0 - c.data[0].inheritable = 0 - c.data[1].effective = 0 - c.data[1].permitted = 0 - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0 - c.bounds[1] = 0 - } - if kind&AMBS == AMBS { - c.ambient[0] = 0 - c.ambient[1] = 0 - } -} - -func (c *capsV3) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsV3) String() (ret string) { - return mkString(c, BOUNDING) -} - -func (c *capsV3) Load() (err error) { - err = capget(&c.hdr, &c.data[0]) - if err != nil { - return - } - - var status_path string - - if c.hdr.pid == 0 { - status_path = fmt.Sprintf("/proc/self/status") - } else { - status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) - } - - f, err := os.Open(status_path) - if err != nil { - return - } - b := bufio.NewReader(f) - for { - line, e := b.ReadString('\n') - if e != nil { - if e != io.EOF { - err = e - } - break - } - if strings.HasPrefix(line, "CapB") { - fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) - continue - } - if strings.HasPrefix(line, "CapA") { - fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) - continue - } - } - f.Close() - - return -} - -func (c *capsV3) Apply(kind CapType) (err error) { - if kind&BOUNDS == BOUNDS { - var data [2]capData - err = capget(&c.hdr, &data[0]) - if err != nil { - return - } - if (1< 31 { - if c.data.version == 1 { - return false - } - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable |= 1 << uint(what) - } - } -} - -func (c *capsFile) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable &= ^(1 << uint(what)) - } - } -} - -func (c *capsFile) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0xffffffff - c.data.data[0].permitted = 0xffffffff - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0xffffffff - c.data.data[1].permitted = 0xffffffff - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0 - c.data.data[0].permitted = 0 - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0 - c.data.data[1].permitted = 0 - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsFile) String() (ret string) { - return mkString(c, INHERITABLE) -} - -func (c *capsFile) Load() (err error) { - return getVfsCap(c.path, &c.data) -} - -func (c *capsFile) Apply(kind CapType) (err error) { - if kind&CAPS == CAPS { - return setVfsCap(c.path, &c.data) - } - return -} diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/capability_noop.go b/tools/vendor/github.com/syndtr/gocapability/capability/capability_noop.go deleted file mode 100644 index 9bb3070c5..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/capability_noop.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !linux - -package capability - -import "errors" - -func newPid(pid int) (Capabilities, error) { - return nil, errors.New("not supported") -} - -func newFile(path string) (Capabilities, error) { - return nil, errors.New("not supported") -} diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/enum.go b/tools/vendor/github.com/syndtr/gocapability/capability/enum.go deleted file mode 100644 index ad1078531..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/enum.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -type CapType uint - -func (c CapType) String() string { - switch c { - case EFFECTIVE: - return "effective" - case PERMITTED: - return "permitted" - case INHERITABLE: - return "inheritable" - case BOUNDING: - return "bounding" - case CAPS: - return "caps" - case AMBIENT: - return "ambient" - } - return "unknown" -} - -const ( - EFFECTIVE CapType = 1 << iota - PERMITTED - INHERITABLE - BOUNDING - AMBIENT - - CAPS = EFFECTIVE | PERMITTED | INHERITABLE - BOUNDS = BOUNDING - AMBS = AMBIENT -) - -//go:generate go run enumgen/gen.go -type Cap int - -// POSIX-draft defined capabilities and Linux extensions. -// -// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h -const ( - // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this - // overrides the restriction of changing file ownership and group - // ownership. - CAP_CHOWN = Cap(0) - - // Override all DAC access, including ACL execute access if - // [_POSIX_ACL] is defined. Excluding DAC access covered by - // CAP_LINUX_IMMUTABLE. - CAP_DAC_OVERRIDE = Cap(1) - - // Overrides all DAC restrictions regarding read and search on files - // and directories, including ACL restrictions if [_POSIX_ACL] is - // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. - CAP_DAC_READ_SEARCH = Cap(2) - - // Overrides all restrictions about allowed operations on files, where - // file owner ID must be equal to the user ID, except where CAP_FSETID - // is applicable. It doesn't override MAC and DAC restrictions. - CAP_FOWNER = Cap(3) - - // Overrides the following restrictions that the effective user ID - // shall match the file owner ID when setting the S_ISUID and S_ISGID - // bits on that file; that the effective group ID (or one of the - // supplementary group IDs) shall match the file owner ID when setting - // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are - // cleared on successful return from chown(2) (not implemented). - CAP_FSETID = Cap(4) - - // Overrides the restriction that the real or effective user ID of a - // process sending a signal must match the real or effective user ID - // of the process receiving the signal. - CAP_KILL = Cap(5) - - // Allows setgid(2) manipulation - // Allows setgroups(2) - // Allows forged gids on socket credentials passing. - CAP_SETGID = Cap(6) - - // Allows set*uid(2) manipulation (including fsuid). - // Allows forged pids on socket credentials passing. - CAP_SETUID = Cap(7) - - // Linux-specific capabilities - - // Without VFS support for capabilities: - // Transfer any capability in your permitted set to any pid, - // remove any capability in your permitted set from any pid - // With VFS support for capabilities (neither of above, but) - // Add any capability from current's capability bounding set - // to the current process' inheritable set - // Allow taking bits out of capability bounding set - // Allow modification of the securebits for a process - CAP_SETPCAP = Cap(8) - - // Allow modification of S_IMMUTABLE and S_APPEND file attributes - CAP_LINUX_IMMUTABLE = Cap(9) - - // Allows binding to TCP/UDP sockets below 1024 - // Allows binding to ATM VCIs below 32 - CAP_NET_BIND_SERVICE = Cap(10) - - // Allow broadcasting, listen to multicast - CAP_NET_BROADCAST = Cap(11) - - // Allow interface configuration - // Allow administration of IP firewall, masquerading and accounting - // Allow setting debug option on sockets - // Allow modification of routing tables - // Allow setting arbitrary process / process group ownership on - // sockets - // Allow binding to any address for transparent proxying (also via NET_RAW) - // Allow setting TOS (type of service) - // Allow setting promiscuous mode - // Allow clearing driver statistics - // Allow multicasting - // Allow read/write of device-specific registers - // Allow activation of ATM control sockets - CAP_NET_ADMIN = Cap(12) - - // Allow use of RAW sockets - // Allow use of PACKET sockets - // Allow binding to any address for transparent proxying (also via NET_ADMIN) - CAP_NET_RAW = Cap(13) - - // Allow locking of shared memory segments - // Allow mlock and mlockall (which doesn't really have anything to do - // with IPC) - CAP_IPC_LOCK = Cap(14) - - // Override IPC ownership checks - CAP_IPC_OWNER = Cap(15) - - // Insert and remove kernel modules - modify kernel without limit - CAP_SYS_MODULE = Cap(16) - - // Allow ioperm/iopl access - // Allow sending USB messages to any device via /proc/bus/usb - CAP_SYS_RAWIO = Cap(17) - - // Allow use of chroot() - CAP_SYS_CHROOT = Cap(18) - - // Allow ptrace() of any process - CAP_SYS_PTRACE = Cap(19) - - // Allow configuration of process accounting - CAP_SYS_PACCT = Cap(20) - - // Allow configuration of the secure attention key - // Allow administration of the random device - // Allow examination and configuration of disk quotas - // Allow setting the domainname - // Allow setting the hostname - // Allow calling bdflush() - // Allow mount() and umount(), setting up new smb connection - // Allow some autofs root ioctls - // Allow nfsservctl - // Allow VM86_REQUEST_IRQ - // Allow to read/write pci config on alpha - // Allow irix_prctl on mips (setstacksize) - // Allow flushing all cache on m68k (sys_cacheflush) - // Allow removing semaphores - // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores - // and shared memory - // Allow locking/unlocking of shared memory segment - // Allow turning swap on/off - // Allow forged pids on socket credentials passing - // Allow setting readahead and flushing buffers on block devices - // Allow setting geometry in floppy driver - // Allow turning DMA on/off in xd driver - // Allow administration of md devices (mostly the above, but some - // extra ioctls) - // Allow tuning the ide driver - // Allow access to the nvram device - // Allow administration of apm_bios, serial and bttv (TV) device - // Allow manufacturer commands in isdn CAPI support driver - // Allow reading non-standardized portions of pci configuration space - // Allow DDI debug ioctl on sbpcd driver - // Allow setting up serial ports - // Allow sending raw qic-117 commands - // Allow enabling/disabling tagged queuing on SCSI controllers and sending - // arbitrary SCSI commands - // Allow setting encryption key on loopback filesystem - // Allow setting zone reclaim policy - // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility - CAP_SYS_ADMIN = Cap(21) - - // Allow use of reboot() - CAP_SYS_BOOT = Cap(22) - - // Allow raising priority and setting priority on other (different - // UID) processes - // Allow use of FIFO and round-robin (realtime) scheduling on own - // processes and setting the scheduling algorithm used by another - // process. - // Allow setting cpu affinity on other processes - CAP_SYS_NICE = Cap(23) - - // Override resource limits. Set resource limits. - // Override quota limits. - // Override reserved space on ext2 filesystem - // Modify data journaling mode on ext3 filesystem (uses journaling - // resources) - // NOTE: ext2 honors fsuid when checking for resource overrides, so - // you can override using fsuid too - // Override size restrictions on IPC message queues - // Allow more than 64hz interrupts from the real-time clock - // Override max number of consoles on console allocation - // Override max number of keymaps - // Control memory reclaim behavior - CAP_SYS_RESOURCE = Cap(24) - - // Allow manipulation of system clock - // Allow irix_stime on mips - // Allow setting the real-time clock - CAP_SYS_TIME = Cap(25) - - // Allow configuration of tty devices - // Allow vhangup() of tty - CAP_SYS_TTY_CONFIG = Cap(26) - - // Allow the privileged aspects of mknod() - CAP_MKNOD = Cap(27) - - // Allow taking of leases on files - CAP_LEASE = Cap(28) - - CAP_AUDIT_WRITE = Cap(29) - CAP_AUDIT_CONTROL = Cap(30) - CAP_SETFCAP = Cap(31) - - // Override MAC access. - // The base kernel enforces no MAC policy. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based overrides of that policy, this is - // the capability it should use to do so. - CAP_MAC_OVERRIDE = Cap(32) - - // Allow MAC configuration or state changes. - // The base kernel requires no MAC configuration. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based checks on modifications to that - // policy or the data required to maintain it, this is the - // capability it should use to do so. - CAP_MAC_ADMIN = Cap(33) - - // Allow configuring the kernel's syslog (printk behaviour) - CAP_SYSLOG = Cap(34) - - // Allow triggering something that will wake the system - CAP_WAKE_ALARM = Cap(35) - - // Allow preventing system suspends - CAP_BLOCK_SUSPEND = Cap(36) - - // Allow reading the audit log via multicast netlink socket - CAP_AUDIT_READ = Cap(37) - - // Allow system performance and observability privileged operations - // using perf_events, i915_perf and other kernel subsystems - CAP_PERFMON = Cap(38) - - // CAP_BPF allows the following BPF operations: - // - Creating all types of BPF maps - // - Advanced verifier features - // - Indirect variable access - // - Bounded loops - // - BPF to BPF function calls - // - Scalar precision tracking - // - Larger complexity limits - // - Dead code elimination - // - And potentially other features - // - Loading BPF Type Format (BTF) data - // - Retrieve xlated and JITed code of BPF programs - // - Use bpf_spin_lock() helper - // - // CAP_PERFMON relaxes the verifier checks further: - // - BPF progs can use of pointer-to-integer conversions - // - speculation attack hardening measures are bypassed - // - bpf_probe_read to read arbitrary kernel memory is allowed - // - bpf_trace_printk to print kernel memory is allowed - // - // CAP_SYS_ADMIN is required to use bpf_probe_write_user. - // - // CAP_SYS_ADMIN is required to iterate system wide loaded - // programs, maps, links, BTFs and convert their IDs to file descriptors. - // - // CAP_PERFMON and CAP_BPF are required to load tracing programs. - // CAP_NET_ADMIN and CAP_BPF are required to load networking programs. - CAP_BPF = Cap(39) - - // Allow checkpoint/restore related operations. - // Introduced in kernel 5.9 - CAP_CHECKPOINT_RESTORE = Cap(40) -) - -var ( - // Highest valid capability of the running kernel. - CAP_LAST_CAP = Cap(63) - - capUpperMask = ^uint32(0) -) diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/enum_gen.go b/tools/vendor/github.com/syndtr/gocapability/capability/enum_gen.go deleted file mode 100644 index 2ff9bf4d8..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/enum_gen.go +++ /dev/null @@ -1,138 +0,0 @@ -// generated file; DO NOT EDIT - use go generate in directory with source - -package capability - -func (c Cap) String() string { - switch c { - case CAP_CHOWN: - return "chown" - case CAP_DAC_OVERRIDE: - return "dac_override" - case CAP_DAC_READ_SEARCH: - return "dac_read_search" - case CAP_FOWNER: - return "fowner" - case CAP_FSETID: - return "fsetid" - case CAP_KILL: - return "kill" - case CAP_SETGID: - return "setgid" - case CAP_SETUID: - return "setuid" - case CAP_SETPCAP: - return "setpcap" - case CAP_LINUX_IMMUTABLE: - return "linux_immutable" - case CAP_NET_BIND_SERVICE: - return "net_bind_service" - case CAP_NET_BROADCAST: - return "net_broadcast" - case CAP_NET_ADMIN: - return "net_admin" - case CAP_NET_RAW: - return "net_raw" - case CAP_IPC_LOCK: - return "ipc_lock" - case CAP_IPC_OWNER: - return "ipc_owner" - case CAP_SYS_MODULE: - return "sys_module" - case CAP_SYS_RAWIO: - return "sys_rawio" - case CAP_SYS_CHROOT: - return "sys_chroot" - case CAP_SYS_PTRACE: - return "sys_ptrace" - case CAP_SYS_PACCT: - return "sys_pacct" - case CAP_SYS_ADMIN: - return "sys_admin" - case CAP_SYS_BOOT: - return "sys_boot" - case CAP_SYS_NICE: - return "sys_nice" - case CAP_SYS_RESOURCE: - return "sys_resource" - case CAP_SYS_TIME: - return "sys_time" - case CAP_SYS_TTY_CONFIG: - return "sys_tty_config" - case CAP_MKNOD: - return "mknod" - case CAP_LEASE: - return "lease" - case CAP_AUDIT_WRITE: - return "audit_write" - case CAP_AUDIT_CONTROL: - return "audit_control" - case CAP_SETFCAP: - return "setfcap" - case CAP_MAC_OVERRIDE: - return "mac_override" - case CAP_MAC_ADMIN: - return "mac_admin" - case CAP_SYSLOG: - return "syslog" - case CAP_WAKE_ALARM: - return "wake_alarm" - case CAP_BLOCK_SUSPEND: - return "block_suspend" - case CAP_AUDIT_READ: - return "audit_read" - case CAP_PERFMON: - return "perfmon" - case CAP_BPF: - return "bpf" - case CAP_CHECKPOINT_RESTORE: - return "checkpoint_restore" - } - return "unknown" -} - -// List returns list of all supported capabilities -func List() []Cap { - return []Cap{ - CAP_CHOWN, - CAP_DAC_OVERRIDE, - CAP_DAC_READ_SEARCH, - CAP_FOWNER, - CAP_FSETID, - CAP_KILL, - CAP_SETGID, - CAP_SETUID, - CAP_SETPCAP, - CAP_LINUX_IMMUTABLE, - CAP_NET_BIND_SERVICE, - CAP_NET_BROADCAST, - CAP_NET_ADMIN, - CAP_NET_RAW, - CAP_IPC_LOCK, - CAP_IPC_OWNER, - CAP_SYS_MODULE, - CAP_SYS_RAWIO, - CAP_SYS_CHROOT, - CAP_SYS_PTRACE, - CAP_SYS_PACCT, - CAP_SYS_ADMIN, - CAP_SYS_BOOT, - CAP_SYS_NICE, - CAP_SYS_RESOURCE, - CAP_SYS_TIME, - CAP_SYS_TTY_CONFIG, - CAP_MKNOD, - CAP_LEASE, - CAP_AUDIT_WRITE, - CAP_AUDIT_CONTROL, - CAP_SETFCAP, - CAP_MAC_OVERRIDE, - CAP_MAC_ADMIN, - CAP_SYSLOG, - CAP_WAKE_ALARM, - CAP_BLOCK_SUSPEND, - CAP_AUDIT_READ, - CAP_PERFMON, - CAP_BPF, - CAP_CHECKPOINT_RESTORE, - } -} diff --git a/tools/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/tools/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go deleted file mode 100644 index 3d2bf6927..000000000 --- a/tools/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -import ( - "syscall" - "unsafe" -) - -type capHeader struct { - version uint32 - pid int32 -} - -type capData struct { - effective uint32 - permitted uint32 - inheritable uint32 -} - -func capget(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -func capset(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -// not yet in syscall -const ( - pr_CAP_AMBIENT = 47 - pr_CAP_AMBIENT_IS_SET = uintptr(1) - pr_CAP_AMBIENT_RAISE = uintptr(2) - pr_CAP_AMBIENT_LOWER = uintptr(3) - pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) -) - -func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return -} - -const ( - vfsXattrName = "security.capability" - - vfsCapVerMask = 0xff000000 - vfsCapVer1 = 0x01000000 - vfsCapVer2 = 0x02000000 - - vfsCapFlagMask = ^vfsCapVerMask - vfsCapFlageffective = 0x000001 - - vfscapDataSizeV1 = 4 * (1 + 2*1) - vfscapDataSizeV2 = 4 * (1 + 2*2) -) - -type vfscapData struct { - magic uint32 - data [2]struct { - permitted uint32 - inheritable uint32 - } - effective [2]uint32 - version int8 -} - -var ( - _vfsXattrName *byte -) - -func init() { - _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) -} - -func getVfsCap(path string, dest *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) - if e1 != 0 { - if e1 == syscall.ENODATA { - dest.version = 2 - return - } - err = e1 - } - switch dest.magic & vfsCapVerMask { - case vfsCapVer1: - dest.version = 1 - if r0 != vfscapDataSizeV1 { - return syscall.EINVAL - } - dest.data[1].permitted = 0 - dest.data[1].inheritable = 0 - case vfsCapVer2: - dest.version = 2 - if r0 != vfscapDataSizeV2 { - return syscall.EINVAL - } - default: - return syscall.EINVAL - } - if dest.magic&vfsCapFlageffective != 0 { - dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable - dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable - } else { - dest.effective[0] = 0 - dest.effective[1] = 0 - } - return -} - -func setVfsCap(path string, data *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - var size uintptr - if data.version == 1 { - data.magic = vfsCapVer1 - size = vfscapDataSizeV1 - } else if data.version == 2 { - data.magic = vfsCapVer2 - if data.effective[0] != 0 || data.effective[1] != 0 { - data.magic |= vfsCapFlageffective - } - size = vfscapDataSizeV2 - } else { - return syscall.EINVAL - } - _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/tools/vendor/github.com/titanous/rocacheck/LICENSE b/tools/vendor/github.com/titanous/rocacheck/LICENSE new file mode 100644 index 000000000..7bdce481f --- /dev/null +++ b/tools/vendor/github.com/titanous/rocacheck/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2017, Jonathan Rudenberg +Copyright (c) 2017, CRoCS, EnigmaBridge Ltd. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tools/vendor/github.com/titanous/rocacheck/README.md b/tools/vendor/github.com/titanous/rocacheck/README.md new file mode 100644 index 000000000..b8e765ea9 --- /dev/null +++ b/tools/vendor/github.com/titanous/rocacheck/README.md @@ -0,0 +1,7 @@ +# rocacheck [![GoDoc](https://godoc.org/github.com/titanous/rocacheck?status.svg)](https://godoc.org/github.com/titanous/rocacheck) + +Package rocacheck is a Go implementation of the [key fingerprint +algorithm](https://github.com/crocs-muni/roca) that checks if an RSA key was +generated by broken Infineon code and is vulnerable to factorization via the +[Return of Coppersmith's Attack +(ROCA)](https://crocs.fi.muni.cz/public/papers/rsa_ccs17) / CVE-2017-15361. diff --git a/tools/vendor/github.com/titanous/rocacheck/rocacheck.go b/tools/vendor/github.com/titanous/rocacheck/rocacheck.go new file mode 100644 index 000000000..e813579bb --- /dev/null +++ b/tools/vendor/github.com/titanous/rocacheck/rocacheck.go @@ -0,0 +1,52 @@ +// Package rocacheck checks if a key was generated by broken Infineon code and +// is vulnerable to factorization via the Return of Coppersmith's Attack (ROCA) +// / CVE-2017-15361. +package rocacheck + +import ( + "crypto/rsa" + "math/big" +) + +type test struct { + Prime *big.Int + Fingerprints map[int64]struct{} +} + +var tests = make([]test, 17) + +func init() { + bigOne := big.NewInt(1) + n := &big.Int{} + // relations table from https://github.com/crocs-muni/roca/pull/40 + for i, r := range [][2]int64{ + {2, 11}, {6, 13}, {8, 17}, {9, 19}, {3, 37}, {26, 53}, {20, 61}, + {35, 71}, {24, 73}, {13, 79}, {6, 97}, {51, 103}, {53, 107}, + {54, 109}, {42, 127}, {50, 151}, {78, 157}, + } { + fps := make(map[int64]struct{}) + bp := big.NewInt(r[1]) + br := big.NewInt(r[0]) + for j := int64(0); j < r[1]; j++ { + if n.Exp(big.NewInt(j), br, bp).Cmp(bigOne) == 0 { + fps[j] = struct{}{} + } + } + tests[i] = test{ + Prime: big.NewInt(r[1]), + Fingerprints: fps, + } + } +} + +// IsWeak returns true if a RSA public key is vulnerable to Return of +// Coppersmith's Attack (ROCA). +func IsWeak(k *rsa.PublicKey) bool { + tmp := &big.Int{} + for _, t := range tests { + if _, ok := t.Fingerprints[tmp.Mod(k.N, t.Prime).Int64()]; !ok { + return false + } + } + return true +} diff --git a/tools/vendor/github.com/ulikunitz/xz/.gitignore b/tools/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 000000000..eb3d5f517 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,28 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/tools/vendor/github.com/ulikunitz/xz/LICENSE b/tools/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 000000000..8a7f0877d --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2022 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/vendor/github.com/ulikunitz/xz/README.md b/tools/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 000000000..56d49275a --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,88 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/tools/vendor/github.com/ulikunitz/xz/SECURITY.md b/tools/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..1bdc88878 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/tools/vendor/github.com/ulikunitz/xz/TODO.md b/tools/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 000000000..8f9650c13 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,386 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +## 2025-08-28 + +Release v0.5.14 addresses the security vulnerability CVE-2025-58058. If you put +bytes in from of a LZMA stream, the header might not be read correctly and +memory for the dictionary buffer allocated. I have implemented mitigations for +the problem. + +### 2025-08-20 + +Release v0.5.13 addressed issue #61 regarding handling of multiple WriteClosers +together. So I added a new package xio with a WriteCloserStack to address the +issue. + +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Since the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/tools/vendor/github.com/ulikunitz/xz/bits.go b/tools/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 000000000..b30f1ec97 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/tools/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000000000000000000000000000000000..46043f7dc89b610dc3badb9db3426620c4c97462 GIT binary patch literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx literal 0 HcmV?d00001 diff --git a/tools/vendor/github.com/ulikunitz/xz/fox.xz b/tools/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/tools/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/tools/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 000000000..dae159db5 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/tools/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/tools/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 000000000..b4cf8b75e --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/tools/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/tools/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 000000000..5322342ee --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/tools/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/tools/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 000000000..a98983356 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/tools/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/tools/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 000000000..f4627ea11 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,456 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/tools/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 000000000..2b39da6f7 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/tools/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 000000000..201091709 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/breader.go b/tools/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 000000000..9dfdf28b2 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/tools/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 000000000..af41d5b2d --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/tools/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 000000000..f27e31a4a --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/tools/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 000000000..3765484e6 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/tools/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 000000000..d5b814f0a --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/tools/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 000000000..76b713106 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/tools/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 000000000..b447d8ec4 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/tools/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 000000000..e40938318 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/tools/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 000000000..4b3916eab --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/tools/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/tools/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 000000000..f66e9cdd9 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/header.go b/tools/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 000000000..34aa097e1 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,170 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// Header represents the Header of an LZMA file. +type Header struct { + Properties Properties + DictSize uint32 + // uncompressed Size; negative value if no Size is given + Size int64 +} + +// marshalBinary marshals the header. +func (h *Header) marshalBinary() (data []byte, err error) { + if err = h.Properties.verify(); err != nil { + return nil, err + } + if !(h.DictSize <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.DictSize) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.Properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.DictSize)) + + // uncompressed size + var s uint64 + if h.Size > 0 { + s = uint64(h.Size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *Header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.Properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.DictSize = uint32LE(data[1:]) + if int(h.DictSize) < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.Size = -1 + } else { + h.Size = int64(s) + if h.Size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictSize checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictSize(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +// +// This function should be disregarded because there is no guarantee that LZMA +// files follow the constraints. +func ValidHeader(data []byte) bool { + var h Header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictSize(int(h.DictSize)) { + return false + } + return h.Size < 0 || h.Size <= 1<<38 +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/header2.go b/tools/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 000000000..081fc840b --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/tools/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 000000000..1ea5320a0 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,115 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/tools/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 000000000..e4ef5fc59 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/tools/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 000000000..02dfb8bf5 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/operation.go b/tools/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 000000000..7b7eddc3d --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/prob.go b/tools/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 000000000..2feccba11 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/properties.go b/tools/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 000000000..15b754ccb --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/tools/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 000000000..4b0fee3ff --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/reader.go b/tools/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 000000000..eef6bea76 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,193 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and does not rely on any external +// library. +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + // Since v0.5.14 this parameter sets an upper limit for a .lzma file's + // dictionary size. This helps to mitigate problems with mangled + // headers. + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + // set an upper limit of 2 GiB-1 for dictionary capacity + // to address the zero prefix security issue. + c.DictCap = (1 << 31) - 1 + // original: c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +// +// # Security concerns +// +// Note that LZMA format doesn't support a magic marker in the header. So +// [NewReader] cannot determine whether it reads the actual header. For instance +// the LZMA stream might have a zero byte in front of the reader, leading to +// larger dictionary sizes and file sizes. The code will detect later that there +// are problems with the stream, but the dictionary has already been allocated +// and this might consume a lot of memory. +// +// Version 0.5.14 introduces built-in mitigations: +// +// - The [ReaderConfig] DictCap field is now interpreted as a limit for the +// dictionary size. +// - The default is 2 Gigabytes minus 1 byte (2^31-1 bytes). +// - Users can check with the [Reader.Header] method what the actual values are in +// their LZMA files and set a smaller limit using [ReaderConfig]. +// - The dictionary size doesn't exceed the larger of the file size and +// the minimum dictionary size. This is another measure to prevent huge +// memory allocations for the dictionary. +// - The code supports stream sizes only up to a pebibyte (1024^5). +type Reader struct { + lzma io.Reader + header Header + // headerOrig stores the original header read from the stream. + headerOrig Header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// ErrDictSize reports about an error of the dictionary size. +type ErrDictSize struct { + ConfigDictCap int + HeaderDictSize uint32 + Message string +} + +// Error returns the error message. +func (e *ErrDictSize) Error() string { + return e.Message +} + +func newErrDictSize(messageformat string, + configDictCap int, headerDictSize uint32, + args ...interface{}) *ErrDictSize { + newArgs := make([]interface{}, len(args)+2) + newArgs[0] = configDictCap + newArgs[1] = headerDictSize + copy(newArgs[2:], args) + return &ErrDictSize{ + ConfigDictCap: configDictCap, + HeaderDictSize: headerDictSize, + Message: fmt.Sprintf(messageformat, newArgs...), + } +} + +// We support only files not larger than 1 << 50 bytes (a pebibyte, 1024^5). +const maxStreamSize = 1 << 50 + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.header.unmarshalBinary(data); err != nil { + return nil, err + } + r.headerOrig = r.header + dictSize := int64(r.header.DictSize) + if int64(c.DictCap) < dictSize { + return nil, newErrDictSize( + "lzma: header dictionary size %[2]d exceeds configured dictionary capacity %[1]d", + c.DictCap, uint32(dictSize), + ) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + // original code: disabled this because there is no point in increasing + // the dictionary above what is stated in the file. + /* + if int64(c.DictCap) > int64(dictSize) { + dictSize = int64(c.DictCap) + } + */ + size := r.header.Size + if size >= 0 && size < dictSize { + dictSize = size + } + // Protect against modified or malicious headers. + if size > maxStreamSize { + return nil, fmt.Errorf( + "lzma: stream size %d exceeds a pebibyte (1024^5)", + size) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + + r.header.DictSize = uint32(dictSize) + + state := newState(r.header.Properties) + dict, err := newDecoderDict(int(dictSize)) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.header.Size) + if err != nil { + return nil, err + } + return r, nil +} + +// Header returns the header as read from the LZMA stream. It is intended to +// allow the user to understand what parameters are typically provided in the +// headers of the LZMA files and set the DictCap field in [ReaderConfig] +// accordingly. +func (r *Reader) Header() (h Header, ok bool) { + return r.headerOrig, r.d != nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/tools/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 000000000..f36e26505 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/state.go b/tools/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 000000000..34779c513 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/tools/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 000000000..36b29b598 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() Header { + h := Header{ + Properties: *c.Properties, + DictSize: uint32(c.DictCap), + Size: -1, + } + if c.SizeInHeader { + h.Size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h Header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.Properties) + m, err := c.Matcher.new(int(w.h.DictSize)) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(int(w.h.DictSize), c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.Size >= 0 { + m := w.h.Size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.Size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.Size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/tools/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 000000000..97bbafa11 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/lzmafilter.go b/tools/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 000000000..bd5f42ee8 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/tools/vendor/github.com/ulikunitz/xz/make-docs b/tools/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 000000000..a8c612ce1 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/tools/vendor/github.com/ulikunitz/xz/none-check.go b/tools/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 000000000..6a56a2612 --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/tools/vendor/github.com/ulikunitz/xz/reader.go b/tools/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 000000000..bde1412cf --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/tools/vendor/github.com/ulikunitz/xz/writer.go b/tools/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 000000000..f693e0aef --- /dev/null +++ b/tools/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go index e80498d03..893eac00a 100644 --- a/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go +++ b/tools/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { flag = TypeXHeader } data := buf.String() + if len(data) > maxSpecialFileSize { + return ErrFieldTooLong + } if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { return err // Global headers return here } diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/asm/README.md b/tools/vendor/github.com/vbatts/tar-split/tar/asm/README.md new file mode 100644 index 000000000..2a3a5b56a --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/asm/README.md @@ -0,0 +1,44 @@ +asm +=== + +This library for assembly and disassembly of tar archives, facilitated by +`github.com/vbatts/tar-split/tar/storage`. + + +Concerns +-------- + +For completely safe assembly/disassembly, there will need to be a Content +Addressable Storage (CAS) directory, that maps to a checksum in the +`storage.Entity` of `storage.FileType`. + +This is due to the fact that tar archives _can_ allow multiple records for the +same path, but the last one effectively wins. Even if the prior records had a +different payload. + +In this way, when assembling an archive from relative paths, if the archive has +multiple entries for the same path, then all payloads read in from a relative +path would be identical. + + +Thoughts +-------- + +Have a look-aside directory or storage. This way when a clobbering record is +encountered from the tar stream, then the payload of the prior/existing file is +stored to the CAS. This way the clobbering record's file payload can be +extracted, but we'll have preserved the payload needed to reassemble a precise +tar archive. + +clobbered/path/to/file.[0-N] + +*alternatively* + +We could just _not_ support tar streams that have clobbering file paths. +Appending records to the archive is not incredibly common, and doesn't happen +by default for most implementations. Not supporting them wouldn't be a +security concern either, as if it did occur, we would reassemble an archive +that doesn't validate signature/checksum, so it shouldn't be trusted anyway. + +Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE. + diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go b/tools/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go new file mode 100644 index 000000000..3eb32ab61 --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go @@ -0,0 +1,132 @@ +package asm + +import ( + "bytes" + "fmt" + "hash" + "hash/crc64" + "io" + "sync" + + "github.com/vbatts/tar-split/tar/storage" +) + +// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive +// stream. +// +// It takes a storage.FileGetter, for mapping the file payloads that are to be read in, +// and a storage.Unpacker, which has access to the rawbytes and file order +// metadata. With the combination of these two items, a precise assembled Tar +// archive is possible. +func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + pr, pw := io.Pipe() + go func() { + err := WriteOutputTarStream(fg, up, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + return pr +} + +// WriteOutputTarStream writes assembled tar archive to a writer. +func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + var copyBuffer []byte + var crcHash hash.Hash + var crcSum []byte + var multiWriter io.Writer + for { + entry, err := up.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + switch entry.Type { + case storage.SegmentType: + if _, err := w.Write(entry.Payload); err != nil { + return err + } + case storage.FileType: + if entry.Size == 0 { + continue + } + fh, err := fg.Get(entry.GetName()) + if err != nil { + return err + } + if crcHash == nil { + crcHash = crc64.New(storage.CRCTable) + crcSum = make([]byte, 8) + multiWriter = io.MultiWriter(w, crcHash) + copyBuffer = byteBufferPool.Get().([]byte) + // TODO once we have some benchmark or memory profile then we can experiment with using *bytes.Buffer + //nolint:staticcheck // SA6002 not going to do a pointer here + defer byteBufferPool.Put(copyBuffer) + } else { + crcHash.Reset() + } + + if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil { + fh.Close() + return err + } + + if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) { + // I would rather this be a comparable ErrInvalidChecksum or such, + // but since it's coming through the PipeReader, the context of + // _which_ file would be lost... + fh.Close() + return fmt.Errorf("file integrity checksum failed for %q", entry.GetName()) + } + fh.Close() + } + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// copyWithBuffer is taken from stdlib io.Copy implementation +// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367 +func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/tools/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go new file mode 100644 index 000000000..80c2522af --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -0,0 +1,156 @@ +package asm + +import ( + "io" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// NewInputTarStream wraps the Reader stream of a tar archive and provides a +// Reader stream of the same. +// +// In the middle it will pack the segments and file metadata to storage.Packer +// `p`. +// +// The the storage.FilePutter is where payload of files in the stream are +// stashed. If this stashing is not needed, you can provide a nil +// storage.FilePutter. Since the checksumming is still needed, then a default +// of NewDiscardFilePutter will be used internally +func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) { + // What to do here... folks will want their own access to the Reader that is + // their tar archive stream, but we'll need that same stream to use our + // forked 'archive/tar'. + // Perhaps do an io.TeeReader that hands back an io.Reader for them to read + // from, and we'll MITM the stream to store metadata. + // We'll need a storage.FilePutter too ... + + // Another concern, whether to do any storage.FilePutter operations, such that we + // don't extract any amount of the archive. But then again, we're not making + // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter. + // Perhaps we have a DiscardFilePutter that is a bit bucket. + + // we'll return the pipe reader, since TeeReader does not buffer and will + // only read what the outputRdr Read's. Since Tar archives have padding on + // the end, we want to be the one reading the padding, even if the user's + // `archive/tar` doesn't care. + pR, pW := io.Pipe() + outputRdr := io.TeeReader(r, pW) + + // we need a putter that will generate the crc64 sums of file payloads + if fp == nil { + fp = storage.NewDiscardFilePutter() + } + + go func() { + tr := tar.NewReader(outputRdr) + tr.RawAccounting = true + for { + hdr, err := tr.Next() + if err != nil { + if err != io.EOF { + pW.CloseWithError(err) + return + } + // even when an EOF is reached, there is often 1024 null bytes on + // the end of an archive. Collect them too. + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + break // not return. We need the end of the reader. + } + if hdr == nil { + break // not return. We need the end of the reader. + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + + var csum []byte + if hdr.Size > 0 { + var err error + _, csum, err = fp.Put(hdr.Name, tr) + if err != nil { + pW.CloseWithError(err) + return + } + } + + entry := storage.Entry{ + Type: storage.FileType, + Size: hdr.Size, + Payload: csum, + } + // For proper marshalling of non-utf8 characters + entry.SetName(hdr.Name) + + // File entries added, regardless of size + _, err = p.AddEntry(entry) + if err != nil { + pW.CloseWithError(err) + return + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + } + + // It is allowable, and not uncommon that there is further padding on + // the end of an archive, apart from the expected 1024 null bytes. We + // do this in chunks rather than in one go to avoid cases where a + // maliciously crafted tar file tries to trick us into reading many GBs + // into memory. + const paddingChunkSize = 1024 * 1024 + var paddingChunk [paddingChunkSize]byte + for { + var isEOF bool + n, err := outputRdr.Read(paddingChunk[:]) + if err != nil { + if err != io.EOF { + pW.CloseWithError(err) + return + } + isEOF = true + } + if n != 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: paddingChunk[:n], + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + if isEOF { + break + } + } + pW.Close() + }() + + return pR, nil +} diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/asm/doc.go b/tools/vendor/github.com/vbatts/tar-split/tar/asm/doc.go new file mode 100644 index 000000000..4367b9022 --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/asm/doc.go @@ -0,0 +1,9 @@ +/* +Package asm provides the API for streaming assembly and disassembly of tar +archives. + +Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the +metadata for a stream, as well as an implementation of Getting/Putting the file +entries' payload. +*/ +package asm diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go b/tools/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go new file mode 100644 index 000000000..8a65887cf --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/asm/iterate.go @@ -0,0 +1,57 @@ +package asm + +import ( + "bytes" + "fmt" + "io" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// IterateHeaders calls handler for each tar header provided by Unpacker +func IterateHeaders(unpacker storage.Unpacker, handler func(hdr *tar.Header) error) error { + // We assume about NewInputTarStream: + // - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions + // - (There is a FileType entry for every tar header, we ignore it) + // - Trailing padding of a file, if any, is included in the next SegmentType entry + // - At the end, there may be SegmentType entries just for the terminating zero blocks. + + var pendingPadding int64 = 0 + for { + tsEntry, err := unpacker.Next() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("reading tar-split entries: %w", err) + } + switch tsEntry.Type { + case storage.SegmentType: + payload := tsEntry.Payload + if int64(len(payload)) < pendingPadding { + return fmt.Errorf("expected %d bytes of padding after previous file, but next SegmentType only has %d bytes", pendingPadding, len(payload)) + } + payload = payload[pendingPadding:] + pendingPadding = 0 + + tr := tar.NewReader(bytes.NewReader(payload)) + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that. + break + } + return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err) + } + if err := handler(hdr); err != nil { + return err + } + pendingPadding = tr.ExpectedPadding() + + case storage.FileType: + // Nothing + default: + return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type) + } + } +} diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/storage/doc.go b/tools/vendor/github.com/vbatts/tar-split/tar/storage/doc.go new file mode 100644 index 000000000..83f7089ff --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/storage/doc.go @@ -0,0 +1,12 @@ +/* +Package storage is for metadata of a tar archive. + +Packing and unpacking the Entries of the stream. The types of streams are +either segments of raw bytes (for the raw headers and various padding) and for +an entry marking a file payload. + +The raw bytes are stored precisely in the packed (marshalled) Entry, whereas +the file payload marker include the name of the file, size, and crc64 checksum +(for basic file integrity). +*/ +package storage diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/storage/entry.go b/tools/vendor/github.com/vbatts/tar-split/tar/storage/entry.go new file mode 100644 index 000000000..c91e7ea1e --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/storage/entry.go @@ -0,0 +1,78 @@ +package storage + +import "unicode/utf8" + +// Entries is for sorting by Position +type Entries []Entry + +func (e Entries) Len() int { return len(e) } +func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } + +// Type of Entry +type Type int + +const ( + // FileType represents a file payload from the tar stream. + // + // This will be used to map to relative paths on disk. Only Size > 0 will get + // read into a resulting output stream (due to hardlinks). + FileType Type = 1 + iota + // SegmentType represents a raw bytes segment from the archive stream. These raw + // byte segments consist of the raw headers and various padding. + // + // Its payload is to be marshalled base64 encoded. + SegmentType +) + +// Entry is the structure for packing and unpacking the information read from +// the Tar archive. +// +// FileType Payload checksum is using `hash/crc64` for basic file integrity, +// _not_ for cryptography. +// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000 +// collisions in a sample of 18.2 million, CRC64 had none. +type Entry struct { + Type Type `json:"type"` + Name string `json:"name,omitempty"` + NameRaw []byte `json:"name_raw,omitempty"` + Size int64 `json:"size,omitempty"` + Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here; + Position int `json:"position"` +} + +// SetName will check name for valid UTF-8 string, and set the appropriate +// field. See https://github.com/vbatts/tar-split/issues/17 +func (e *Entry) SetName(name string) { + if utf8.ValidString(name) { + e.Name = name + } else { + e.NameRaw = []byte(name) + } +} + +// SetNameBytes will check name for valid UTF-8 string, and set the appropriate +// field +func (e *Entry) SetNameBytes(name []byte) { + if utf8.Valid(name) { + e.Name = string(name) + } else { + e.NameRaw = name + } +} + +// GetName returns the string for the entry's name, regardless of the field stored in +func (e *Entry) GetName() string { + if len(e.NameRaw) > 0 { + return string(e.NameRaw) + } + return e.Name +} + +// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in +func (e *Entry) GetNameBytes() []byte { + if len(e.NameRaw) > 0 { + return e.NameRaw + } + return []byte(e.Name) +} diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/tools/vendor/github.com/vbatts/tar-split/tar/storage/getter.go new file mode 100644 index 000000000..9fed24aa8 --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/storage/getter.go @@ -0,0 +1,105 @@ +package storage + +import ( + "bytes" + "errors" + "hash/crc64" + "io" + "os" + "path/filepath" +) + +// FileGetter is the interface for getting a stream of a file payload, +// addressed by name/filename. Presumably, the names will be scoped to relative +// file paths. +type FileGetter interface { + // Get returns a stream for the provided file path + Get(filename string) (output io.ReadCloser, err error) +} + +// FilePutter is the interface for storing a stream of a file payload, +// addressed by name/filename. +type FilePutter interface { + // Put returns the size of the stream received, and the crc64 checksum for + // the provided stream + Put(filename string, input io.Reader) (size int64, checksum []byte, err error) +} + +// FileGetPutter is the interface that groups both Getting and Putting file +// payloads. +type FileGetPutter interface { + FileGetter + FilePutter +} + +// NewPathFileGetter returns a FileGetter that is for files relative to path +// relpath. +func NewPathFileGetter(relpath string) FileGetter { + return &pathFileGetter{root: relpath} +} + +type pathFileGetter struct { + root string +} + +func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) { + return os.Open(filepath.Join(pfg.root, filename)) +} + +type bufferFileGetPutter struct { + files map[string][]byte +} + +func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) { + if _, ok := bfgp.files[name]; !ok { + return nil, errors.New("no such file") + } + b := bytes.NewBuffer(bfgp.files[name]) + return &readCloserWrapper{b}, nil +} + +func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { + crc := crc64.New(CRCTable) + buf := bytes.NewBuffer(nil) + cw := io.MultiWriter(crc, buf) + i, err := io.Copy(cw, r) + if err != nil { + return 0, nil, err + } + bfgp.files[name] = buf.Bytes() + return i, crc.Sum(nil), nil +} + +type readCloserWrapper struct { + io.Reader +} + +func (w *readCloserWrapper) Close() error { return nil } + +// NewBufferFileGetPutter is a simple in-memory FileGetPutter +// +// Implication is this is memory intensive... +// Probably best for testing or light weight cases. +func NewBufferFileGetPutter() FileGetPutter { + return &bufferFileGetPutter{ + files: map[string][]byte{}, + } +} + +// NewDiscardFilePutter is a bit bucket FilePutter +func NewDiscardFilePutter() FilePutter { + return &bitBucketFilePutter{} +} + +type bitBucketFilePutter struct { + buffer [32 * 1024]byte // 32 kB is the buffer size currently used by io.Copy, as of August 2021. +} + +func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { + c := crc64.New(CRCTable) + i, err := io.CopyBuffer(c, r, bbfp.buffer[:]) + return i, c.Sum(nil), err +} + +// CRCTable is the default table used for crc64 sum calculations +var CRCTable = crc64.MakeTable(crc64.ISO) diff --git a/tools/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/tools/vendor/github.com/vbatts/tar-split/tar/storage/packer.go new file mode 100644 index 000000000..4ba62d9b7 --- /dev/null +++ b/tools/vendor/github.com/vbatts/tar-split/tar/storage/packer.go @@ -0,0 +1,110 @@ +package storage + +import ( + "encoding/json" + "errors" + "io" + "path/filepath" + "unicode/utf8" +) + +// ErrDuplicatePath occurs when a tar archive has more than one entry for the +// same file path +var ErrDuplicatePath = errors.New("duplicates of file paths not supported") + +// Packer describes the methods to pack Entries to a storage destination +type Packer interface { + // AddEntry packs the Entry and returns its position + AddEntry(e Entry) (int, error) +} + +// Unpacker describes the methods to read Entries from a source +type Unpacker interface { + // Next returns the next Entry being unpacked, or error, until io.EOF + Next() (*Entry, error) +} + +type jsonUnpacker struct { + seen seenNames + dec *json.Decoder +} + +func (jup *jsonUnpacker) Next() (*Entry, error) { + var e Entry + err := jup.dec.Decode(&e) + if err != nil { + return nil, err + } + + // check for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jup.seen[cName]; ok { + return nil, ErrDuplicatePath + } + jup.seen[cName] = struct{}{} + } + + return &e, err +} + +// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and +// FileType) as a json document. +// +// Each Entry read are expected to be delimited by new line. +func NewJSONUnpacker(r io.Reader) Unpacker { + return &jsonUnpacker{ + dec: json.NewDecoder(r), + seen: seenNames{}, + } +} + +type jsonPacker struct { + w io.Writer + e *json.Encoder + pos int + seen seenNames +} + +type seenNames map[string]struct{} + +func (jp *jsonPacker) AddEntry(e Entry) (int, error) { + // if Name is not valid utf8, switch it to raw first. + if e.Name != "" { + if !utf8.ValidString(e.Name) { + e.NameRaw = []byte(e.Name) + e.Name = "" + } + } + + // check early for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jp.seen[cName]; ok { + return -1, ErrDuplicatePath + } + jp.seen[cName] = struct{}{} + } + + e.Position = jp.pos + err := jp.e.Encode(e) + if err != nil { + return -1, err + } + + // made it this far, increment now + jp.pos++ + return e.Position, nil +} + +// NewJSONPacker provides a Packer that writes each Entry (SegmentType and +// FileType) as a json document. +// +// The Entries are delimited by new line. +func NewJSONPacker(w io.Writer) Packer { + return &jsonPacker{ + w: w, + e: json.NewEncoder(w), + seen: seenNames{}, + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/.gitignore b/tools/vendor/github.com/vbauerster/mpb/v8/.gitignore new file mode 100644 index 000000000..63bd91672 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/.gitignore @@ -0,0 +1,5 @@ +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING b/tools/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING new file mode 100644 index 000000000..6ca54533c --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING @@ -0,0 +1,15 @@ +When contributing your first changes, please include an empty commit for +copyright waiver using the following message (replace 'John Doe' with +your name or nickname): + + John Doe Copyright Waiver + + I dedicate any and all copyright interest in this software to the + public domain. I make this dedication for the benefit of the public at + large and to the detriment of my heirs and successors. I intend this + dedication to be an overt act of relinquishment in perpetuity of all + present and future rights to this software under copyright law. + +The command to create an empty commit from the command-line is: + + git commit --allow-empty diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/README.md b/tools/vendor/github.com/vbauerster/mpb/v8/README.md new file mode 100644 index 000000000..05088f246 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/README.md @@ -0,0 +1,117 @@ +# Multi Progress Bar + +[![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v8) +[![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) +[![Lint status](https://github.com/vbauerster/mpb/actions/workflows/golangci-lint.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/golangci-lint.yml) + +**mpb** is a Go lib for rendering progress bars in terminal applications. + +## Features + +- **Multiple Bars**: Multiple progress bars are supported +- **Dynamic Total**: Set total while bar is running +- **Dynamic Add/Remove**: Dynamically add or remove bars +- **Cancellation**: Cancel whole rendering process +- **Predefined Decorators**: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter +- **Decorator's width sync**: Synchronized decorator's width among multiple bars + +## Usage + +#### [Rendering single bar](_examples/singleBar/main.go) + +```go +package main + +import ( + "math/rand" + "time" + + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" +) + +func main() { + // initialize progress container, with custom width + p := mpb.New(mpb.WithWidth(64)) + + total := 100 + name := "Single Bar:" + // create a single bar, which will inherit container's width + bar := p.New(int64(total), + // BarFillerBuilder with custom style + mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), + mpb.PrependDecorators( + // display our name with one space on the right + decor.Name(name, decor.WC{C: decor.DindentRight | decor.DextraSpace}), + // replace ETA decorator with "done" message, OnComplete event + decor.OnComplete(decor.AverageETA(decor.ET_STYLE_GO), "done"), + ), + mpb.AppendDecorators(decor.Percentage()), + ) + // simulating some work + max := 100 * time.Millisecond + for i := 0; i < total; i++ { + time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) + bar.Increment() + } + // wait for our bar to complete and flush + p.Wait() +} +``` + +#### [Rendering multiple bars](_examples/multiBars/main.go) + +```go + var wg sync.WaitGroup + // passed wg will be accounted at p.Wait() call + p := mpb.New(mpb.WithWaitGroup(&wg)) + total, numBars := 100, 3 + wg.Add(numBars) + + for i := 0; i < numBars; i++ { + name := fmt.Sprintf("Bar#%d:", i) + bar := p.AddBar(int64(total), + mpb.PrependDecorators( + // simple name decorator + decor.Name(name), + // decor.DSyncWidth bit enables column width synchronization + decor.Percentage(decor.WCSyncSpace), + ), + mpb.AppendDecorators( + // replace ETA decorator with "done" message, OnComplete event + decor.OnComplete( + // ETA decorator with ewma age of 30 + decor.EwmaETA(decor.ET_STYLE_GO, 30, decor.WCSyncWidth), "done", + ), + ), + ) + // simulating some work + go func() { + defer wg.Done() + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + max := 100 * time.Millisecond + for i := 0; i < total; i++ { + // start variable is solely for EWMA calculation + // EWMA's unit of measure is an iteration's duration + start := time.Now() + time.Sleep(time.Duration(rng.Intn(10)+1) * max / 10) + // we need to call EwmaIncrement to fulfill ewma decorator's contract + bar.EwmaIncrement(time.Since(start)) + } + }() + } + // wait for passed wg and for all bars to complete and flush + p.Wait() +``` + +#### [dynTotal example](_examples/dynTotal/main.go) + +![dynTotal](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) + +#### [complex example](_examples/complex/main.go) + +![complex](_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg) + +#### [io example](_examples/io/main.go) + +![io](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/UNLICENSE b/tools/vendor/github.com/vbauerster/mpb/v8/UNLICENSE new file mode 100644 index 000000000..68a49daad --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar.go new file mode 100644 index 000000000..db4f99c29 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -0,0 +1,582 @@ +package mpb + +import ( + "bytes" + "context" + "io" + "strings" + "sync" + "time" + + "github.com/acarl005/stripansi" + "github.com/mattn/go-runewidth" + "github.com/vbauerster/mpb/v8/decor" +) + +// Bar represents a progress bar. +type Bar struct { + index int // used by heap + priority int // used by heap + frameCh chan *renderFrame + operateState chan func(*bState) + container *Progress + bs *bState + bsOk chan struct{} + ctx context.Context + cancel func() +} + +type syncTable [2][]chan int +type extenderFunc func(decor.Statistics, ...io.Reader) ([]io.Reader, error) + +// bState is actual bar's state. +type bState struct { + id int + priority int + reqWidth int + shutdown int + total int64 + current int64 + refill int64 + trimSpace bool + aborted bool + triggerComplete bool + rmOnComplete bool + noPop bool + autoRefresh bool + buffers [3]*bytes.Buffer + decorGroups [2][]decor.Decorator + ewmaDecorators []decor.EwmaDecorator + filler BarFiller + extender extenderFunc + renderReq chan<- time.Time + waitBar *Bar // key for (*pState).queueBars +} + +type renderFrame struct { + rows []io.Reader + shutdown int + rmOnComplete bool + noPop bool + err error +} + +func newBar(ctx context.Context, container *Progress, bs *bState) *Bar { + ctx, cancel := context.WithCancel(ctx) + + bar := &Bar{ + priority: bs.priority, + frameCh: make(chan *renderFrame, 1), + operateState: make(chan func(*bState)), + bsOk: make(chan struct{}), + container: container, + ctx: ctx, + cancel: cancel, + } + + container.bwg.Add(1) + go bar.serve(bs) + return bar +} + +// ProxyReader wraps io.Reader with metrics required for progress +// tracking. If `r` is 'unknown total/size' reader it's mandatory +// to call `(*Bar).SetTotal(-1, true)` after the wrapper returns +// `io.EOF`. If bar is already completed or aborted, returns nil. +// Panics if `r` is nil. +func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { + if r == nil { + panic("expected non nil io.Reader") + } + result := make(chan io.ReadCloser) + select { + case b.operateState <- func(s *bState) { + result <- newProxyReader(r, b, len(s.ewmaDecorators) != 0) + }: + return <-result + case <-b.ctx.Done(): + return nil + } +} + +// ProxyWriter wraps io.Writer with metrics required for progress tracking. +// If bar is already completed or aborted, returns nil. +// Panics if `w` is nil. +func (b *Bar) ProxyWriter(w io.Writer) io.WriteCloser { + if w == nil { + panic("expected non nil io.Writer") + } + result := make(chan io.WriteCloser) + select { + case b.operateState <- func(s *bState) { + result <- newProxyWriter(w, b, len(s.ewmaDecorators) != 0) + }: + return <-result + case <-b.ctx.Done(): + return nil + } +} + +// ID returns id of the bar. +func (b *Bar) ID() int { + result := make(chan int) + select { + case b.operateState <- func(s *bState) { result <- s.id }: + return <-result + case <-b.bsOk: + return b.bs.id + } +} + +// Current returns bar's current value, in other words sum of all increments. +func (b *Bar) Current() int64 { + result := make(chan int64) + select { + case b.operateState <- func(s *bState) { result <- s.current }: + return <-result + case <-b.bsOk: + return b.bs.current + } +} + +// SetRefill sets refill flag with specified amount. +// The underlying BarFiller will change its visual representation, to +// indicate refill event. Refill event may be referred to some retry +// operation for example. +func (b *Bar) SetRefill(amount int64) { + select { + case b.operateState <- func(s *bState) { s.refill = min(amount, s.current) }: + case <-b.ctx.Done(): + } +} + +// TraverseDecorators traverses available decorators and calls `cb` +// on each unwrapped one. +func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { + select { + case b.operateState <- func(s *bState) { + for _, group := range s.decorGroups { + for _, d := range group { + cb(unwrap(d)) + } + } + }: + case <-b.ctx.Done(): + } +} + +// EnableTriggerComplete enables triggering complete event. It's effective +// only for bars which were constructed with `total <= 0`. If `current >= total` +// at the moment of call, complete event is triggered right away. +func (b *Bar) EnableTriggerComplete() { + select { + case b.operateState <- func(s *bState) { + if s.triggerComplete { + return + } + if s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } else { + s.triggerComplete = true + } + }: + case <-b.ctx.Done(): + } +} + +// SetTotal sets total to an arbitrary value. It's effective only for bar +// which was constructed with `total <= 0`. Setting total to negative value +// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. +// If `complete` is true complete event is triggered right away. +// Calling `(*Bar).EnableTriggerComplete` makes this one no operational. +func (b *Bar) SetTotal(total int64, complete bool) { + select { + case b.operateState <- func(s *bState) { + if s.triggerComplete { + return + } + if total < 0 { + s.total = s.current + } else { + s.total = total + } + if complete { + s.current = s.total + s.triggerCompletion(b) + } + }: + case <-b.ctx.Done(): + } +} + +// SetCurrent sets progress' current to an arbitrary value. +func (b *Bar) SetCurrent(current int64) { + if current < 0 { + return + } + select { + case b.operateState <- func(s *bState) { + s.current = current + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } + }: + case <-b.ctx.Done(): + } +} + +// Increment is a shorthand for b.IncrInt64(1). +func (b *Bar) Increment() { + b.IncrInt64(1) +} + +// IncrBy is a shorthand for b.IncrInt64(int64(n)). +func (b *Bar) IncrBy(n int) { + b.IncrInt64(int64(n)) +} + +// IncrInt64 increments progress by amount of n. +func (b *Bar) IncrInt64(n int64) { + select { + case b.operateState <- func(s *bState) { + s.current += n + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } + }: + case <-b.ctx.Done(): + } +} + +// EwmaIncrement is a shorthand for b.EwmaIncrInt64(1, iterDur). +func (b *Bar) EwmaIncrement(iterDur time.Duration) { + b.EwmaIncrInt64(1, iterDur) +} + +// EwmaIncrBy is a shorthand for b.EwmaIncrInt64(int64(n), iterDur). +func (b *Bar) EwmaIncrBy(n int, iterDur time.Duration) { + b.EwmaIncrInt64(int64(n), iterDur) +} + +// EwmaIncrInt64 increments progress by amount of n and updates EWMA based +// decorators by dur of a single iteration. +func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { + select { + case b.operateState <- func(s *bState) { + var wg sync.WaitGroup + wg.Add(len(s.ewmaDecorators)) + for _, d := range s.ewmaDecorators { + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines + go func() { + defer wg.Done() + d.EwmaUpdate(n, iterDur) + }() + } + s.current += n + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } + wg.Wait() + }: + case <-b.ctx.Done(): + } +} + +// EwmaSetCurrent sets progress' current to an arbitrary value and updates +// EWMA based decorators by dur of a single iteration. +func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { + if current < 0 { + return + } + select { + case b.operateState <- func(s *bState) { + n := current - s.current + var wg sync.WaitGroup + wg.Add(len(s.ewmaDecorators)) + for _, d := range s.ewmaDecorators { + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines + go func() { + defer wg.Done() + d.EwmaUpdate(n, iterDur) + }() + } + s.current = current + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.triggerCompletion(b) + } + wg.Wait() + }: + case <-b.ctx.Done(): + } +} + +// DecoratorAverageAdjust adjusts decorators implementing decor.AverageDecorator interface. +// Call if there is need to set start time after decorators have been constructed. +func (b *Bar) DecoratorAverageAdjust(start time.Time) { + b.TraverseDecorators(func(d decor.Decorator) { + if d, ok := d.(decor.AverageDecorator); ok { + d.AverageAdjust(start) + } + }) +} + +// SetPriority changes bar's order among multiple bars. Zero is highest +// priority, i.e. bar will be on top. If you don't need to set priority +// dynamically, better use BarPriority option. +func (b *Bar) SetPriority(priority int) { + b.container.UpdateBarPriority(b, priority, false) +} + +// Abort interrupts bar's running goroutine. Abort won't be engaged +// if bar is already in complete state. If drop is true bar will be +// removed as well. To make sure that bar has been removed call +// `(*Bar).Wait()` method. +func (b *Bar) Abort(drop bool) { + select { + case b.operateState <- func(s *bState) { + if s.aborted || s.completed() { + return + } + s.aborted = true + s.rmOnComplete = drop + s.triggerCompletion(b) + }: + case <-b.ctx.Done(): + } +} + +// Aborted reports whether the bar is in aborted state. +func (b *Bar) Aborted() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- s.aborted }: + return <-result + case <-b.bsOk: + return b.bs.aborted + } +} + +// Completed reports whether the bar is in completed state. +func (b *Bar) Completed() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- s.completed() }: + return <-result + case <-b.bsOk: + return b.bs.completed() + } +} + +// IsRunning reports whether the bar is in running state. +func (b *Bar) IsRunning() bool { + select { + case <-b.ctx.Done(): + return false + default: + return true + } +} + +// Wait blocks until bar is completed or aborted. +func (b *Bar) Wait() { + <-b.bsOk +} + +func (b *Bar) serve(bs *bState) { + defer b.container.bwg.Done() + decoratorsOnShutdown := func(group []decor.Decorator) { + for _, d := range group { + if d, ok := unwrap(d).(decor.ShutdownListener); ok { + b.container.bwg.Add(1) + go func() { + defer b.container.bwg.Done() + d.OnShutdown() + }() + } + } + } + for { + select { + case op := <-b.operateState: + op(bs) + case <-b.ctx.Done(): + decoratorsOnShutdown(bs.decorGroups[0]) + decoratorsOnShutdown(bs.decorGroups[1]) + // bar can be aborted by canceling parent ctx without calling b.Abort + bs.aborted = !bs.completed() + b.bs = bs + close(b.bsOk) + return + } + } +} + +func (b *Bar) render(tw int) { + fn := func(s *bState) { + frame := new(renderFrame) + stat := s.newStatistics(tw) + r, err := s.draw(stat) + if err != nil { + for _, buf := range s.buffers { + buf.Reset() + } + frame.err = err + b.frameCh <- frame + return + } + frame.rows, frame.err = s.extender(stat, r) + if s.aborted || s.completed() { + frame.shutdown = s.shutdown + frame.rmOnComplete = s.rmOnComplete + frame.noPop = s.noPop + // post increment makes sure OnComplete decorators are rendered + s.shutdown++ + } + b.frameCh <- frame + } + select { + case b.operateState <- fn: + case <-b.bsOk: + fn(b.bs) + } +} + +func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { + var otherRunning int + b.container.traverseBars(func(bar *Bar) bool { + if b != bar && bar.IsRunning() { + otherRunning++ + return false // stop traverse + } + return true // continue traverse + }) + if otherRunning == 0 { + for { + select { + case renderReq <- time.Now(): + case <-b.ctx.Done(): + return + } + } + } +} + +func (b *Bar) wSyncTable() syncTable { + result := make(chan syncTable) + select { + case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: + return <-result + case <-b.bsOk: + return b.bs.wSyncTable() + } +} + +func (s *bState) draw(stat decor.Statistics) (_ io.Reader, err error) { + decorFiller := func(buf *bytes.Buffer, group []decor.Decorator) (err error) { + for _, d := range group { + // need to call Decor in any case because of width synchronization + str, width := d.Decor(stat) + if err != nil { + continue + } + if w := stat.AvailableWidth - width; w >= 0 { + _, err = buf.WriteString(str) + stat.AvailableWidth = w + } else if stat.AvailableWidth > 0 { + trunc := runewidth.Truncate(stripansi.Strip(str), stat.AvailableWidth, "…") + _, err = buf.WriteString(trunc) + stat.AvailableWidth = 0 + } + } + return err + } + + for i, buf := range s.buffers[:2] { + err = decorFiller(buf, s.decorGroups[i]) + if err != nil { + return nil, err + } + } + + spaces := []io.Reader{ + strings.NewReader(" "), + strings.NewReader(" "), + } + if s.trimSpace || stat.AvailableWidth < 2 { + for _, r := range spaces { + _, _ = io.Copy(io.Discard, r) + } + } else { + stat.AvailableWidth -= 2 + } + + err = s.filler.Fill(s.buffers[2], stat) + if err != nil { + return nil, err + } + + return io.MultiReader( + s.buffers[0], + spaces[0], + s.buffers[2], + spaces[1], + s.buffers[1], + strings.NewReader("\n"), + ), nil +} + +func (s *bState) wSyncTable() (table syncTable) { + var start int + var row []chan int + + for i, group := range s.decorGroups { + for _, d := range group { + if ch, ok := d.Sync(); ok { + row = append(row, ch) + } + } + table[i], start = row[start:], len(row) + } + return table +} + +func (s *bState) triggerCompletion(b *Bar) { + s.triggerComplete = true + if s.autoRefresh { + // Technically this call isn't required, but if refresh rate is set to + // one hour for example and bar completes within a few minutes p.Wait() + // will wait for one hour. This call helps to avoid unnecessary waiting. + go b.tryEarlyRefresh(s.renderReq) + } else { + b.cancel() + } +} + +func (s bState) completed() bool { + return s.triggerComplete && s.current == s.total +} + +func (s bState) newStatistics(tw int) decor.Statistics { + return decor.Statistics{ + AvailableWidth: tw, + RequestedWidth: s.reqWidth, + ID: s.id, + Total: s.total, + Current: s.current, + Refill: s.refill, + Completed: s.completed(), + Aborted: s.aborted, + } +} + +func unwrap(d decor.Decorator) decor.Decorator { + if d, ok := d.(decor.Wrapper); ok { + return unwrap(d.Unwrap()) + } + return d +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler.go new file mode 100644 index 000000000..379cfeaba --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler.go @@ -0,0 +1,31 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v8/decor" +) + +// BarFiller interface. +// Bar (without decorators) renders itself by calling BarFiller's Fill method. +type BarFiller interface { + Fill(io.Writer, decor.Statistics) error +} + +// BarFillerBuilder interface. +// Default implementations are: +// +// BarStyle() +// SpinnerStyle() +// NopStyle() +type BarFillerBuilder interface { + Build() BarFiller +} + +// BarFillerFunc is function type adapter to convert compatible function +// into BarFiller interface. +type BarFillerFunc func(io.Writer, decor.Statistics) error + +func (f BarFillerFunc) Fill(w io.Writer, stat decor.Statistics) error { + return f(w, stat) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go new file mode 100644 index 000000000..4dca113d0 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -0,0 +1,274 @@ +package mpb + +import ( + "io" + + "github.com/mattn/go-runewidth" + "github.com/vbauerster/mpb/v8/decor" + "github.com/vbauerster/mpb/v8/internal" +) + +const ( + iLbound = iota + iRefiller + iFiller + iTip + iPadding + iRbound + iLen +) + +var defaultBarStyle = [iLen]string{"[", "+", "=", ">", "-", "]"} + +// BarStyleComposer interface. +type BarStyleComposer interface { + BarFillerBuilder + Lbound(string) BarStyleComposer + LboundMeta(func(string) string) BarStyleComposer + Rbound(string) BarStyleComposer + RboundMeta(func(string) string) BarStyleComposer + Filler(string) BarStyleComposer + FillerMeta(func(string) string) BarStyleComposer + Refiller(string) BarStyleComposer + RefillerMeta(func(string) string) BarStyleComposer + Padding(string) BarStyleComposer + PaddingMeta(func(string) string) BarStyleComposer + Tip(frames ...string) BarStyleComposer + TipMeta(func(string) string) BarStyleComposer + TipOnComplete() BarStyleComposer + Reverse() BarStyleComposer +} + +type component struct { + width int + bytes []byte +} + +type barSection struct { + meta func(string) string + bytes []byte +} + +type barSections [iLen]barSection + +type barFiller struct { + components [iLen]component + metas [iLen]func(string) string + flushOp func(barSections, io.Writer) error + tip struct { + onComplete bool + count uint + frames []component + } +} + +type barStyle struct { + style [iLen]string + metas [iLen]func(string) string + tipFrames []string + tipOnComplete bool + rev bool +} + +// BarStyle constructs default bar style which can be altered via +// BarStyleComposer interface. +func BarStyle() BarStyleComposer { + bs := barStyle{ + style: defaultBarStyle, + tipFrames: []string{defaultBarStyle[iTip]}, + } + return bs +} + +func (s barStyle) Lbound(bound string) BarStyleComposer { + s.style[iLbound] = bound + return s +} + +func (s barStyle) LboundMeta(fn func(string) string) BarStyleComposer { + s.metas[iLbound] = fn + return s +} + +func (s barStyle) Rbound(bound string) BarStyleComposer { + s.style[iRbound] = bound + return s +} + +func (s barStyle) RboundMeta(fn func(string) string) BarStyleComposer { + s.metas[iRbound] = fn + return s +} + +func (s barStyle) Filler(filler string) BarStyleComposer { + s.style[iFiller] = filler + return s +} + +func (s barStyle) FillerMeta(fn func(string) string) BarStyleComposer { + s.metas[iFiller] = fn + return s +} + +func (s barStyle) Refiller(refiller string) BarStyleComposer { + s.style[iRefiller] = refiller + return s +} + +func (s barStyle) RefillerMeta(fn func(string) string) BarStyleComposer { + s.metas[iRefiller] = fn + return s +} + +func (s barStyle) Padding(padding string) BarStyleComposer { + s.style[iPadding] = padding + return s +} + +func (s barStyle) PaddingMeta(fn func(string) string) BarStyleComposer { + s.metas[iPadding] = fn + return s +} + +func (s barStyle) Tip(frames ...string) BarStyleComposer { + if len(frames) != 0 { + s.tipFrames = frames + } + return s +} + +func (s barStyle) TipMeta(fn func(string) string) BarStyleComposer { + s.metas[iTip] = fn + return s +} + +func (s barStyle) TipOnComplete() BarStyleComposer { + s.tipOnComplete = true + return s +} + +func (s barStyle) Reverse() BarStyleComposer { + s.rev = true + return s +} + +func (s barStyle) Build() BarFiller { + bf := &barFiller{metas: s.metas} + bf.components[iLbound] = component{ + width: runewidth.StringWidth(s.style[iLbound]), + bytes: []byte(s.style[iLbound]), + } + bf.components[iRbound] = component{ + width: runewidth.StringWidth(s.style[iRbound]), + bytes: []byte(s.style[iRbound]), + } + bf.components[iFiller] = component{ + width: runewidth.StringWidth(s.style[iFiller]), + bytes: []byte(s.style[iFiller]), + } + bf.components[iRefiller] = component{ + width: runewidth.StringWidth(s.style[iRefiller]), + bytes: []byte(s.style[iRefiller]), + } + bf.components[iPadding] = component{ + width: runewidth.StringWidth(s.style[iPadding]), + bytes: []byte(s.style[iPadding]), + } + bf.tip.onComplete = s.tipOnComplete + bf.tip.frames = make([]component, 0, len(s.tipFrames)) + for _, t := range s.tipFrames { + bf.tip.frames = append(bf.tip.frames, component{ + width: runewidth.StringWidth(t), + bytes: []byte(t), + }) + } + if s.rev { + bf.flushOp = barSections.flushRev + } else { + bf.flushOp = barSections.flush + } + return bf +} + +func (s *barFiller) Fill(w io.Writer, stat decor.Statistics) error { + width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) + // don't count brackets as progress + width -= (s.components[iLbound].width + s.components[iRbound].width) + if width < 0 { + return nil + } + + var tip component + var refilling, filling, padding []byte + var fillCount int + curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) + + if curWidth != 0 { + if !stat.Completed || s.tip.onComplete { + tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] + s.tip.count++ + fillCount += tip.width + } + switch refWidth := 0; { + case stat.Refill != 0: + refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) + curWidth -= refWidth + refWidth += curWidth + fallthrough + default: + for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { + filling = append(filling, s.components[iFiller].bytes...) + } + for w := s.components[iRefiller].width; refWidth-fillCount >= w; fillCount += w { + refilling = append(refilling, s.components[iRefiller].bytes...) + } + } + } + + for w := s.components[iPadding].width; width-fillCount >= w; fillCount += w { + padding = append(padding, s.components[iPadding].bytes...) + } + + for w := 1; width-fillCount >= w; fillCount += w { + padding = append(padding, "…"...) + } + + return s.flushOp(barSections{ + {s.metas[iLbound], s.components[iLbound].bytes}, + {s.metas[iRefiller], refilling}, + {s.metas[iFiller], filling}, + {s.metas[iTip], tip.bytes}, + {s.metas[iPadding], padding}, + {s.metas[iRbound], s.components[iRbound].bytes}, + }, w) +} + +func (s barSection) flush(w io.Writer) (err error) { + if s.meta != nil { + _, err = io.WriteString(w, s.meta(string(s.bytes))) + } else { + _, err = w.Write(s.bytes) + } + return err +} + +func (bb barSections) flush(w io.Writer) error { + for _, s := range bb { + err := s.flush(w) + if err != nil { + return err + } + } + return nil +} + +func (bb barSections) flushRev(w io.Writer) error { + bb[0], bb[len(bb)-1] = bb[len(bb)-1], bb[0] + for i := len(bb) - 1; i >= 0; i-- { + err := bb[i].flush(w) + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go new file mode 100644 index 000000000..a23c61b7d --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go @@ -0,0 +1,24 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v8/decor" +) + +// barFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type barFillerBuilderFunc func() BarFiller + +func (f barFillerBuilderFunc) Build() BarFiller { + return f() +} + +// NopStyle provides BarFillerBuilder which builds NOP BarFiller. +func NopStyle() BarFillerBuilder { + return barFillerBuilderFunc(func() BarFiller { + return BarFillerFunc(func(io.Writer, decor.Statistics) error { + return nil + }) + }) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go new file mode 100644 index 000000000..56075810c --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go @@ -0,0 +1,103 @@ +package mpb + +import ( + "io" + "strings" + + "github.com/mattn/go-runewidth" + "github.com/vbauerster/mpb/v8/decor" + "github.com/vbauerster/mpb/v8/internal" +) + +const ( + positionLeft = 1 + iota + positionRight +) + +var defaultSpinnerStyle = [...]string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} + +// SpinnerStyleComposer interface. +type SpinnerStyleComposer interface { + BarFillerBuilder + PositionLeft() SpinnerStyleComposer + PositionRight() SpinnerStyleComposer + Meta(func(string) string) SpinnerStyleComposer +} + +type spinnerFiller struct { + frames []string + count uint + meta func(string) string + position func(string, int) string +} + +type spinnerStyle struct { + position uint + frames []string + meta func(string) string +} + +// SpinnerStyle constructs default spinner style which can be altered via +// SpinnerStyleComposer interface. +func SpinnerStyle(frames ...string) SpinnerStyleComposer { + var ss spinnerStyle + if len(frames) != 0 { + ss.frames = frames + } else { + ss.frames = defaultSpinnerStyle[:] + } + return ss +} + +func (s spinnerStyle) PositionLeft() SpinnerStyleComposer { + s.position = positionLeft + return s +} + +func (s spinnerStyle) PositionRight() SpinnerStyleComposer { + s.position = positionRight + return s +} + +func (s spinnerStyle) Meta(fn func(string) string) SpinnerStyleComposer { + s.meta = fn + return s +} + +func (s spinnerStyle) Build() BarFiller { + sf := &spinnerFiller{frames: s.frames} + switch s.position { + case positionLeft: + sf.position = func(frame string, padWidth int) string { + return frame + strings.Repeat(" ", padWidth) + } + case positionRight: + sf.position = func(frame string, padWidth int) string { + return strings.Repeat(" ", padWidth) + frame + } + default: + sf.position = func(frame string, padWidth int) string { + return strings.Repeat(" ", padWidth/2) + frame + strings.Repeat(" ", padWidth/2+padWidth%2) + } + } + if s.meta != nil { + sf.meta = s.meta + } else { + sf.meta = func(s string) string { return s } + } + return sf +} + +func (s *spinnerFiller) Fill(w io.Writer, stat decor.Statistics) error { + width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) + frame := s.frames[s.count%uint(len(s.frames))] + frameWidth := runewidth.StringWidth(frame) + s.count++ + + if width < frameWidth { + return nil + } + + _, err := io.WriteString(w, s.position(s.meta(frame), width-frameWidth)) + return err +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/tools/vendor/github.com/vbauerster/mpb/v8/bar_option.go new file mode 100644 index 000000000..6c1b7e6f1 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -0,0 +1,221 @@ +package mpb + +import ( + "bytes" + "io" + + "github.com/vbauerster/mpb/v8/decor" +) + +// BarOption is a func option to alter default behavior of a bar. +type BarOption func(*bState) + +// PrependDecorators let you inject decorators to the bar's left side. +func PrependDecorators(decorators ...decor.Decorator) BarOption { + var group []decor.Decorator + for _, decorator := range decorators { + if decorator != nil { + group = append(group, decorator) + } + } + return func(s *bState) { + s.decorGroups[0] = group + } +} + +// AppendDecorators let you inject decorators to the bar's right side. +func AppendDecorators(decorators ...decor.Decorator) BarOption { + var group []decor.Decorator + for _, decorator := range decorators { + if decorator != nil { + group = append(group, decorator) + } + } + return func(s *bState) { + s.decorGroups[1] = group + } +} + +// BarID sets bar id. +func BarID(id int) BarOption { + return func(s *bState) { + s.id = id + } +} + +// BarWidth sets bar width independent of the container. +func BarWidth(width int) BarOption { + return func(s *bState) { + s.reqWidth = width + } +} + +// BarQueueAfter puts this (being constructed) bar into the queue. +// BarPriority will be inherited from the argument bar. +// When argument bar completes or aborts queued bar replaces its place. +func BarQueueAfter(bar *Bar) BarOption { + return func(s *bState) { + s.waitBar = bar + } +} + +// BarRemoveOnComplete removes both bar's filler and its decorators +// on complete event. +func BarRemoveOnComplete() BarOption { + return func(s *bState) { + s.rmOnComplete = true + } +} + +// BarFillerClearOnComplete clears bar's filler on complete event. +// It's shortcut for BarFillerOnComplete(""). +func BarFillerClearOnComplete() BarOption { + return BarFillerOnComplete("") +} + +// BarFillerOnComplete replaces bar's filler with message, on complete event. +func BarFillerOnComplete(message string) BarOption { + return BarFillerMiddleware(func(base BarFiller) BarFiller { + return BarFillerFunc(func(w io.Writer, st decor.Statistics) error { + if st.Completed { + _, err := io.WriteString(w, message) + return err + } + return base.Fill(w, st) + }) + }) +} + +// BarFillerClearOnAbort clears bar's filler on abort event. +// It's shortcut for BarFillerOnAbort(""). +func BarFillerClearOnAbort() BarOption { + return BarFillerOnAbort("") +} + +// BarFillerOnAbort replaces bar's filler with message, on abort event. +func BarFillerOnAbort(message string) BarOption { + return BarFillerMiddleware(func(base BarFiller) BarFiller { + return BarFillerFunc(func(w io.Writer, st decor.Statistics) error { + if st.Aborted { + _, err := io.WriteString(w, message) + return err + } + return base.Fill(w, st) + }) + }) +} + +// BarFillerMiddleware provides a way to augment the underlying BarFiller. +func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { + if middle == nil { + return nil + } + return func(s *bState) { + s.filler = middle(s.filler) + } +} + +// BarPriority sets bar's priority. Zero is highest priority, i.e. bar +// will be on top. This option isn't effective with `BarQueueAfter` option. +func BarPriority(priority int) BarOption { + return func(s *bState) { + s.priority = priority + } +} + +// BarExtender extends bar with arbitrary lines. Provided BarFiller will be +// called at each render/flush cycle. Any lines written to the underlying +// io.Writer will extend the bar either in above (rev = true) or below +// (rev = false) direction. +func BarExtender(filler BarFiller, rev bool) BarOption { + if filler == nil { + return nil + } + if f, ok := filler.(BarFillerFunc); ok && f == nil { + return nil + } + fn := makeExtenderFunc(filler, rev) + return func(s *bState) { + s.extender = fn + } +} + +func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc { + buf := new(bytes.Buffer) + base := func(stat decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { + err := filler.Fill(buf, stat) + if err != nil { + buf.Reset() + return rows, err + } + for { + line, err := buf.ReadBytes('\n') + if err != nil { + buf.Reset() + break + } + rows = append(rows, bytes.NewReader(line)) + } + return rows, err + } + if !rev { + return base + } + return func(stat decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { + rows, err := base(stat, rows...) + if err != nil { + return rows, err + } + for left, right := 0, len(rows)-1; left < right; left, right = left+1, right-1 { + rows[left], rows[right] = rows[right], rows[left] + } + return rows, err + } +} + +// BarFillerTrim removes leading and trailing space around the underlying BarFiller. +func BarFillerTrim() BarOption { + return func(s *bState) { + s.trimSpace = true + } +} + +// BarNoPop disables bar pop out of container. Effective when +// PopCompletedMode of container is enabled. +func BarNoPop() BarOption { + return func(s *bState) { + s.noPop = true + } +} + +// BarOptional will return provided option only when cond is true. +func BarOptional(option BarOption, cond bool) BarOption { + if cond { + return option + } + return nil +} + +// BarOptOn will return provided option only when predicate evaluates to true. +func BarOptOn(option BarOption, predicate func() bool) BarOption { + if predicate() { + return option + } + return nil +} + +// BarFuncOptional will call option and return its value only when cond is true. +func BarFuncOptional(option func() BarOption, cond bool) BarOption { + if cond { + return option() + } + return nil +} + +// BarFuncOptOn will call option and return its value only when predicate evaluates to true. +func BarFuncOptOn(option func() BarOption, predicate func() bool) BarOption { + if predicate() { + return option() + } + return nil +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/container_option.go b/tools/vendor/github.com/vbauerster/mpb/v8/container_option.go new file mode 100644 index 000000000..85e12f225 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/container_option.go @@ -0,0 +1,146 @@ +package mpb + +import ( + "io" + "sync" + "time" +) + +// ContainerOption is a func option to alter default behavior of a bar +// container. Container term refers to a Progress struct which can +// hold one or more Bars. +type ContainerOption func(*pState) + +// WithWaitGroup provides means to have a single joint point. If +// *sync.WaitGroup is provided, you can safely call just p.Wait() +// without calling Wait() on provided *sync.WaitGroup. Makes sense +// when there are more than one bar to render. +func WithWaitGroup(wg *sync.WaitGroup) ContainerOption { + return func(s *pState) { + s.uwg = wg + } +} + +// WithWidth sets container width. If not set it defaults to terminal +// width. A bar added to the container will inherit its width, unless +// overridden by `func BarWidth(int) BarOption`. +func WithWidth(width int) ContainerOption { + return func(s *pState) { + s.reqWidth = width + } +} + +// WithQueueLen sets buffer size of heap manager channel. Ideally it must be +// kept at MAX value, where MAX is number of bars to be rendered at the same +// time. If len < MAX then backpressure to the scheduler will be increased as +// MAX-len extra goroutines will be launched at each render cycle. +// Default queue len is 128. +func WithQueueLen(len int) ContainerOption { + return func(s *pState) { + s.hmQueueLen = len + } +} + +// WithRefreshRate overrides default 150ms refresh rate. +func WithRefreshRate(d time.Duration) ContainerOption { + return func(s *pState) { + s.refreshRate = d + } +} + +// WithManualRefresh disables internal auto refresh time.Ticker. +// Refresh will occur upon receive value from provided ch. +func WithManualRefresh(ch <-chan interface{}) ContainerOption { + return func(s *pState) { + s.manualRC = ch + } +} + +// WithRenderDelay delays rendering. By default rendering starts as +// soon as bar is added, with this option it's possible to delay +// rendering process by keeping provided chan unclosed. In other words +// rendering will start as soon as provided chan is closed. +func WithRenderDelay(ch <-chan struct{}) ContainerOption { + return func(s *pState) { + s.delayRC = ch + } +} + +// WithShutdownNotifier value of type `[]*mpb.Bar` will be send into provided +// channel upon container shutdown. +func WithShutdownNotifier(ch chan<- interface{}) ContainerOption { + return func(s *pState) { + s.shutdownNotifier = ch + } +} + +// WithOutput overrides default os.Stdout output. If underlying io.Writer +// is not a terminal then auto refresh is disabled unless WithAutoRefresh +// option is set. +func WithOutput(w io.Writer) ContainerOption { + if w == nil { + w = io.Discard + } + return func(s *pState) { + s.output = w + } +} + +// WithDebugOutput sets debug output. +func WithDebugOutput(w io.Writer) ContainerOption { + if w == nil { + w = io.Discard + } + return func(s *pState) { + s.debugOut = w + } +} + +// WithAutoRefresh force auto refresh regardless of what output is set to. +// Applicable only if not WithManualRefresh set. +func WithAutoRefresh() ContainerOption { + return func(s *pState) { + s.autoRefresh = true + } +} + +// PopCompletedMode pop completed bars out of progress container. +// In this mode completed bars get moved to the top and stop +// participating in rendering cycle. +func PopCompletedMode() ContainerOption { + return func(s *pState) { + s.popCompleted = true + } +} + +// ContainerOptional will return provided option only when cond is true. +func ContainerOptional(option ContainerOption, cond bool) ContainerOption { + if cond { + return option + } + return nil +} + +// ContainerOptOn will return provided option only when predicate evaluates to true. +func ContainerOptOn(option ContainerOption, predicate func() bool) ContainerOption { + if predicate() { + return option + } + return nil +} + +// ContainerFuncOptional will call option and return its value only when cond is true. +func ContainerFuncOptional(option func() ContainerOption, cond bool) ContainerOption { + if cond { + return option() + } + return nil +} + +// ContainerFuncOptOn will call option and return its value only when predicate evaluates to true. +func ContainerFuncOptOn(option func() ContainerOption, predicate func() bool) ContainerOption { + if predicate() { + return option() + } + return nil +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go new file mode 100644 index 000000000..93c8f8268 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go @@ -0,0 +1,2 @@ +// Package cwriter is a console writer abstraction for the underlying OS. +package cwriter diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go new file mode 100644 index 000000000..215643b45 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go @@ -0,0 +1,7 @@ +//go:build darwin || dragonfly || freebsd || netbsd || openbsd + +package cwriter + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go new file mode 100644 index 000000000..7d0e76123 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go @@ -0,0 +1,7 @@ +//go:build aix || linux + +package cwriter + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go new file mode 100644 index 000000000..981f574f4 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go @@ -0,0 +1,7 @@ +//go:build solaris + +package cwriter + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETA diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go new file mode 100644 index 000000000..5daf003a3 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go @@ -0,0 +1,7 @@ +//go:build zos + +package cwriter + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go new file mode 100644 index 000000000..23a72d3ec --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go @@ -0,0 +1,59 @@ +package cwriter + +import ( + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// https://github.com/dylanaraps/pure-sh-bible#cursor-movement +const ( + escOpen = "\x1b[" + cuuAndEd = "A\x1b[J" +) + +// ErrNotTTY not a TeleTYpewriter error. +var ErrNotTTY = errors.New("not a terminal") + +// New returns a new Writer with defaults. +func New(out io.Writer) *Writer { + w := &Writer{ + Buffer: new(bytes.Buffer), + out: out, + termSize: func(_ int) (int, int, error) { + return -1, -1, ErrNotTTY + }, + } + if f, ok := out.(*os.File); ok { + w.fd = int(f.Fd()) + if IsTerminal(w.fd) { + w.terminal = true + w.termSize = func(fd int) (int, int, error) { + return GetSize(fd) + } + } + } + bb := make([]byte, 16) + w.ew = escWriter(bb[:copy(bb, []byte(escOpen))]) + return w +} + +// IsTerminal tells whether underlying io.Writer is terminal. +func (w *Writer) IsTerminal() bool { + return w.terminal +} + +// GetTermSize returns WxH of underlying terminal. +func (w *Writer) GetTermSize() (width, height int, err error) { + return w.termSize(w.fd) +} + +type escWriter []byte + +func (b escWriter) ansiCuuAndEd(out io.Writer, n int) error { + b = strconv.AppendInt(b, int64(n), 10) + _, err := out.Write(append(b, []byte(cuuAndEd)...)) + return err +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go new file mode 100644 index 000000000..e80d757af --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go @@ -0,0 +1,48 @@ +//go:build !windows + +package cwriter + +import ( + "bytes" + "io" + + "golang.org/x/sys/unix" +) + +// Writer is a buffered terminal writer, which moves cursor N lines up +// on each flush except the first one, where N is a number of lines of +// a previous flush. +type Writer struct { + *bytes.Buffer + out io.Writer + ew escWriter + fd int + terminal bool + termSize func(int) (int, int, error) +} + +// Flush flushes the underlying buffer. +// It's caller's responsibility to pass correct number of lines. +func (w *Writer) Flush(lines int) error { + _, err := w.WriteTo(w.out) + // some terminals interpret 'cursor up 0' as 'cursor up 1' + if err == nil && lines > 0 { + err = w.ew.ansiCuuAndEd(w, lines) + } + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go new file mode 100644 index 000000000..44293f26a --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go @@ -0,0 +1,101 @@ +//go:build windows + +package cwriter + +import ( + "bytes" + "io" + "unsafe" + + "golang.org/x/sys/windows" +) + +var kernel32 = windows.NewLazySystemDLL("kernel32.dll") + +var ( + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") +) + +// Writer is a buffered terminal writer, which moves cursor N lines up +// on each flush except the first one, where N is a number of lines of +// a previous flush. +type Writer struct { + *bytes.Buffer + out io.Writer + ew escWriter + lines int + fd int + terminal bool + termSize func(int) (int, int, error) +} + +// Flush flushes the underlying buffer. +// It's caller's responsibility to pass correct number of lines. +func (w *Writer) Flush(lines int) error { + if w.lines > 0 { + err := w.clearLines(w.lines) + if err != nil { + return err + } + } + w.lines = lines + _, err := w.WriteTo(w.out) + return err +} + +func (w *Writer) clearLines(n int) error { + if !w.terminal { + // hope it's cygwin or similar + return w.ew.ansiCuuAndEd(w.out, n) + } + + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(w.fd), &info); err != nil { + return err + } + + info.CursorPosition.Y -= int16(n) + if info.CursorPosition.Y < 0 { + info.CursorPosition.Y = 0 + } + _, _, _ = procSetConsoleCursorPosition.Call( + uintptr(w.fd), + uintptr(uint32(uint16(info.CursorPosition.Y))<<16|uint32(uint16(info.CursorPosition.X))), + ) + + // clear the lines + cursor := &windows.Coord{ + X: info.Window.Left, + Y: info.CursorPosition.Y, + } + count := uint32(info.Size.X) * uint32(n) + _, _, _ = procFillConsoleOutputCharacter.Call( + uintptr(w.fd), + uintptr(' '), + uintptr(count), + *(*uintptr)(unsafe.Pointer(cursor)), + uintptr(unsafe.Pointer(new(uint32))), + ) + return nil +} + +// GetSize returns the visible dimensions of the given terminal. +// These dimensions don't include any scrollback buffer height. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + // terminal.GetSize from crypto/ssh adds "+ 1" to both width and height: + // https://go.googlesource.com/crypto/+/refs/heads/release-branch.go1.14/ssh/terminal/util_windows.go#75 + // but looks like this is a root cause of issue #66, so removing both "+ 1" have fixed it. + return int(info.Window.Right - info.Window.Left), int(info.Window.Bottom - info.Window.Top), nil +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/any.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/any.go new file mode 100644 index 000000000..ca208d8ac --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/any.go @@ -0,0 +1,21 @@ +package decor + +var _ Decorator = any{} + +// Any decorator. +// Converts DecorFunc into Decorator. +// +// `fn` DecorFunc callback +// `wcc` optional WC config +func Any(fn DecorFunc, wcc ...WC) Decorator { + return any{initWC(wcc...), fn} +} + +type any struct { + WC + fn DecorFunc +} + +func (d any) Decor(s Statistics) (string, int) { + return d.Format(d.fn(s)) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/counters.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/counters.go new file mode 100644 index 000000000..042027578 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/counters.go @@ -0,0 +1,253 @@ +package decor + +import ( + "fmt" +) + +// CountersNoUnit is a wrapper around Counters with no unit param. +func CountersNoUnit(pairFmt string, wcc ...WC) Decorator { + return Counters(0, pairFmt, wcc...) +} + +// CountersKibiByte is a wrapper around Counters with predefined unit +// as SizeB1024(0). +func CountersKibiByte(pairFmt string, wcc ...WC) Decorator { + return Counters(SizeB1024(0), pairFmt, wcc...) +} + +// CountersKiloByte is a wrapper around Counters with predefined unit +// as SizeB1000(0). +func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { + return Counters(SizeB1000(0), pairFmt, wcc...) +} + +// Counters decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `pairFmt` printf compatible verbs for current and total +// +// `wcc` optional WC config +// +// pairFmt example if unit=SizeB1000(0): +// +// pairFmt="%d / %d" output: "1MB / 12MB" +// pairFmt="% d / % d" output: "1 MB / 12 MB" +// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" +// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" +// pairFmt="%f / %f" output: "1.000000MB / 12.000000MB" +// pairFmt="% f / % f" output: "1.000000 MB / 12.000000 MB" +func Counters(unit interface{}, pairFmt string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if pairFmt == "" { + pairFmt = "% d / % d" + } + return func(s Statistics) string { + return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total)) + } + case SizeB1000: + if pairFmt == "" { + pairFmt = "% d / % d" + } + return func(s Statistics) string { + return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total)) + } + default: + if pairFmt == "" { + pairFmt = "%d / %d" + } + return func(s Statistics) string { + return fmt.Sprintf(pairFmt, s.Current, s.Total) + } + } + } + return Any(producer(), wcc...) +} + +// TotalNoUnit is a wrapper around Total with no unit param. +func TotalNoUnit(format string, wcc ...WC) Decorator { + return Total(0, format, wcc...) +} + +// TotalKibiByte is a wrapper around Total with predefined unit +// as SizeB1024(0). +func TotalKibiByte(format string, wcc ...WC) Decorator { + return Total(SizeB1024(0), format, wcc...) +} + +// TotalKiloByte is a wrapper around Total with predefined unit +// as SizeB1000(0). +func TotalKiloByte(format string, wcc ...WC) Decorator { + return Total(SizeB1000(0), format, wcc...) +} + +// Total decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `format` printf compatible verb for Total +// +// `wcc` optional WC config +// +// format example if unit=SizeB1024(0): +// +// format="%d" output: "12MiB" +// format="% d" output: "12 MiB" +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Total(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1024(s.Total)) + } + case SizeB1000: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1000(s.Total)) + } + default: + if format == "" { + format = "%d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, s.Total) + } + } + } + return Any(producer(), wcc...) +} + +// CurrentNoUnit is a wrapper around Current with no unit param. +func CurrentNoUnit(format string, wcc ...WC) Decorator { + return Current(0, format, wcc...) +} + +// CurrentKibiByte is a wrapper around Current with predefined unit +// as SizeB1024(0). +func CurrentKibiByte(format string, wcc ...WC) Decorator { + return Current(SizeB1024(0), format, wcc...) +} + +// CurrentKiloByte is a wrapper around Current with predefined unit +// as SizeB1000(0). +func CurrentKiloByte(format string, wcc ...WC) Decorator { + return Current(SizeB1000(0), format, wcc...) +} + +// Current decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `format` printf compatible verb for Current +// +// `wcc` optional WC config +// +// format example if unit=SizeB1024(0): +// +// format="%d" output: "12MiB" +// format="% d" output: "12 MiB" +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Current(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1024(s.Current)) + } + case SizeB1000: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1000(s.Current)) + } + default: + if format == "" { + format = "%d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, s.Current) + } + } + } + return Any(producer(), wcc...) +} + +// InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param. +func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator { + return InvertedCurrent(0, format, wcc...) +} + +// InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit +// as SizeB1024(0). +func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator { + return InvertedCurrent(SizeB1024(0), format, wcc...) +} + +// InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit +// as SizeB1000(0). +func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator { + return InvertedCurrent(SizeB1000(0), format, wcc...) +} + +// InvertedCurrent decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `format` printf compatible verb for InvertedCurrent +// +// `wcc` optional WC config +// +// format example if unit=SizeB1024(0): +// +// format="%d" output: "12MiB" +// format="% d" output: "12 MiB" +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func InvertedCurrent(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1024(s.Total-s.Current)) + } + case SizeB1000: + if format == "" { + format = "% d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, SizeB1000(s.Total-s.Current)) + } + default: + if format == "" { + format = "%d" + } + return func(s Statistics) string { + return fmt.Sprintf(format, s.Total-s.Current) + } + } + } + return Any(producer(), wcc...) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go new file mode 100644 index 000000000..6bec1151b --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -0,0 +1,183 @@ +package decor + +import ( + "fmt" + "time" + + "github.com/mattn/go-runewidth" +) + +const ( + // DindentRight sets indentation from right to left. + // + // |foo |b | DindentRight is set + // | foo| b| DindentRight is not set + DindentRight = 1 << iota + + // DextraSpace bit adds extra indentation space. + DextraSpace + + // DSyncWidth bit enables same column width synchronization. + // Effective with multiple bars only. + DSyncWidth + + // DSyncWidthR is shortcut for DSyncWidth|DindentRight + DSyncWidthR = DSyncWidth | DindentRight + + // DSyncSpace is shortcut for DSyncWidth|DextraSpace + DSyncSpace = DSyncWidth | DextraSpace + + // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DindentRight + DSyncSpaceR = DSyncWidth | DextraSpace | DindentRight +) + +// TimeStyle enum. +type TimeStyle int + +// TimeStyle kinds. +const ( + ET_STYLE_GO TimeStyle = iota + ET_STYLE_HHMMSS + ET_STYLE_HHMM + ET_STYLE_MMSS +) + +// Statistics contains fields which are necessary for implementing +// `decor.Decorator` and `mpb.BarFiller` interfaces. +type Statistics struct { + AvailableWidth int // calculated width initially equal to terminal width + RequestedWidth int // width set by `mpb.WithWidth` + ID int + Total int64 + Current int64 + Refill int64 + Completed bool + Aborted bool +} + +// Decorator interface. +// Most of the time there is no need to implement this interface +// manually, as decor package already provides a wide range of decorators +// which implement this interface. If however built-in decorators don't +// meet your needs, you're free to implement your own one by implementing +// this particular interface. The easy way to go is to convert a +// `DecorFunc` into a `Decorator` interface by using provided +// `func Any(DecorFunc, ...WC) Decorator`. +type Decorator interface { + Synchronizer + Formatter + Decor(Statistics) (str string, viewWidth int) +} + +// DecorFunc func type. +// To be used with `func Any(DecorFunc, ...WC) Decorator`. +type DecorFunc func(Statistics) string + +// Synchronizer interface. +// All decorators implement this interface implicitly. Its Sync +// method exposes width sync channel, if DSyncWidth bit is set. +type Synchronizer interface { + Sync() (chan int, bool) +} + +// Formatter interface. +// Format method needs to be called from within Decorator.Decor method +// in order to format string according to decor.WC settings. +// No need to implement manually as long as decor.WC is embedded. +type Formatter interface { + Format(string) (_ string, width int) +} + +// Wrapper interface. +// If you're implementing custom Decorator by wrapping a built-in one, +// it is necessary to implement this interface to retain functionality +// of built-in Decorator. +type Wrapper interface { + Unwrap() Decorator +} + +// EwmaDecorator interface. +// EWMA based decorators should implement this one. +type EwmaDecorator interface { + EwmaUpdate(int64, time.Duration) +} + +// AverageDecorator interface. +// Average decorators should implement this interface to provide start +// time adjustment facility, for resume-able tasks. +type AverageDecorator interface { + AverageAdjust(time.Time) +} + +// ShutdownListener interface. +// If decorator needs to be notified once upon bar shutdown event, so +// this is the right interface to implement. +type ShutdownListener interface { + OnShutdown() +} + +// Global convenience instances of WC with sync width bit set. +// To be used with multiple bars only, i.e. not effective for single bar usage. +var ( + WCSyncWidth = WC{C: DSyncWidth} + WCSyncWidthR = WC{C: DSyncWidthR} + WCSyncSpace = WC{C: DSyncSpace} + WCSyncSpaceR = WC{C: DSyncSpaceR} +) + +// WC is a struct with two public fields W and C, both of int type. +// W represents width and C represents bit set of width related config. +// A decorator should embed WC, to enable width synchronization. +type WC struct { + W int + C int + fill func(s string, w int) string + wsync chan int +} + +// Format should be called by any Decorator implementation. +// Returns formatted string and its view (visual) width. +func (wc WC) Format(str string) (string, int) { + width := runewidth.StringWidth(str) + if wc.W > width { + width = wc.W + } else if (wc.C & DextraSpace) != 0 { + width++ + } + if (wc.C & DSyncWidth) != 0 { + wc.wsync <- width + width = <-wc.wsync + } + return wc.fill(str, width), width +} + +// Init initializes width related config. +func (wc *WC) Init() WC { + if (wc.C & DindentRight) != 0 { + wc.fill = runewidth.FillRight + } else { + wc.fill = runewidth.FillLeft + } + if (wc.C & DSyncWidth) != 0 { + // it's deliberate choice to override wsync on each Init() call, + // this way globals like WCSyncSpace can be reused + wc.wsync = make(chan int) + } + return *wc +} + +// Sync is implementation of Synchronizer interface. +func (wc WC) Sync() (chan int, bool) { + if (wc.C&DSyncWidth) != 0 && wc.wsync == nil { + panic(fmt.Sprintf("%T is not initialized", wc)) + } + return wc.wsync, (wc.C & DSyncWidth) != 0 +} + +func initWC(wcc ...WC) WC { + var wc WC + for _, nwc := range wcc { + wc = nwc + } + return wc.Init() +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/doc.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/doc.go new file mode 100644 index 000000000..d41aa5061 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/doc.go @@ -0,0 +1,19 @@ +// Package decor provides common decorators for "github.com/vbauerster/mpb/v8" module. +// +// Some decorators returned by this package might have a closure state. It is ok to use +// decorators concurrently, unless you share the same decorator among multiple +// *mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance. +// +// Don't: +// +// p := mpb.New() +// name := decor.Name("bar") +// p.AddBar(100, mpb.AppendDecorators(name)) +// p.AddBar(100, mpb.AppendDecorators(name)) +// +// Do: +// +// p := mpb.New() +// p.AddBar(100, mpb.AppendDecorators(decor.Name("bar1"))) +// p.AddBar(100, mpb.AppendDecorators(decor.Name("bar2"))) +package decor diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go new file mode 100644 index 000000000..f3ed7a8e6 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go @@ -0,0 +1,33 @@ +package decor + +import ( + "time" +) + +// Elapsed decorator. It's wrapper of NewElapsed. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `wcc` optional WC config +func Elapsed(style TimeStyle, wcc ...WC) Decorator { + return NewElapsed(style, time.Now(), wcc...) +} + +// NewElapsed returns elapsed time decorator. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `start` start time +// +// `wcc` optional WC config +func NewElapsed(style TimeStyle, start time.Time, wcc ...WC) Decorator { + var msg string + producer := chooseTimeProducer(style) + fn := func(s Statistics) string { + if !s.Completed && !s.Aborted { + msg = producer(time.Since(start)) + } + return msg + } + return Any(fn, wcc...) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/eta.go new file mode 100644 index 000000000..d8fcc2b92 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -0,0 +1,226 @@ +package decor + +import ( + "fmt" + "math" + "time" + + "github.com/VividCortex/ewma" +) + +var ( + _ Decorator = (*movingAverageETA)(nil) + _ EwmaDecorator = (*movingAverageETA)(nil) + _ Decorator = (*averageETA)(nil) + _ AverageDecorator = (*averageETA)(nil) +) + +// TimeNormalizer interface. Implementers could be passed into +// MovingAverageETA, in order to affect i.e. normalize its output. +type TimeNormalizer interface { + Normalize(time.Duration) time.Duration +} + +// TimeNormalizerFunc is function type adapter to convert function +// into TimeNormalizer. +type TimeNormalizerFunc func(time.Duration) time.Duration + +func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration { + return f(src) +} + +// EwmaETA exponential-weighted-moving-average based ETA decorator. For this +// decorator to work correctly you have to measure each iteration's duration +// and pass it to one of the (*Bar).EwmaIncr... family methods. +func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { + return EwmaNormalizedETA(style, age, nil, wcc...) +} + +// EwmaNormalizedETA same as EwmaETA but with TimeNormalizer option. +func EwmaNormalizedETA(style TimeStyle, age float64, normalizer TimeNormalizer, wcc ...WC) Decorator { + var average ewma.MovingAverage + if age == 0 { + average = ewma.NewMovingAverage() + } else { + average = ewma.NewMovingAverage(age) + } + return MovingAverageETA(style, average, normalizer, wcc...) +} + +// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `average` implementation of MovingAverage interface +// +// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] +// +// `wcc` optional WC config +func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { + if average == nil { + average = NewMedian() + } + d := &movingAverageETA{ + WC: initWC(wcc...), + producer: chooseTimeProducer(style), + average: average, + normalizer: normalizer, + } + return d +} + +type movingAverageETA struct { + WC + producer func(time.Duration) string + average ewma.MovingAverage + normalizer TimeNormalizer + zDur time.Duration +} + +func (d *movingAverageETA) Decor(s Statistics) (string, int) { + v := math.Round(d.average.Value()) + remaining := time.Duration((s.Total - s.Current) * int64(v)) + if d.normalizer != nil { + remaining = d.normalizer.Normalize(remaining) + } + return d.Format(d.producer(remaining)) +} + +func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { + if n <= 0 { + d.zDur += dur + return + } + durPerItem := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerItem) +} + +// AverageETA decorator. It's wrapper of NewAverageETA. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `wcc` optional WC config +func AverageETA(style TimeStyle, wcc ...WC) Decorator { + return NewAverageETA(style, time.Now(), nil, wcc...) +} + +// NewAverageETA decorator with user provided start time. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `start` start time +// +// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] +// +// `wcc` optional WC config +func NewAverageETA(style TimeStyle, start time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { + d := &averageETA{ + WC: initWC(wcc...), + start: start, + normalizer: normalizer, + producer: chooseTimeProducer(style), + } + return d +} + +type averageETA struct { + WC + start time.Time + normalizer TimeNormalizer + producer func(time.Duration) string +} + +func (d *averageETA) Decor(s Statistics) (string, int) { + var remaining time.Duration + if s.Current != 0 { + durPerItem := float64(time.Since(d.start)) / float64(s.Current) + durPerItem = math.Round(durPerItem) + remaining = time.Duration((s.Total - s.Current) * int64(durPerItem)) + if d.normalizer != nil { + remaining = d.normalizer.Normalize(remaining) + } + } + return d.Format(d.producer(remaining)) +} + +func (d *averageETA) AverageAdjust(start time.Time) { + d.start = start +} + +// MaxTolerateTimeNormalizer returns implementation of TimeNormalizer. +func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer { + var normalized time.Duration + var lastCall time.Time + return TimeNormalizerFunc(func(remaining time.Duration) time.Duration { + if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < time.Minute { + normalized = remaining + lastCall = time.Now() + return remaining + } + normalized -= time.Since(lastCall) + lastCall = time.Now() + if normalized > 0 { + return normalized + } + return remaining + }) +} + +// FixedIntervalTimeNormalizer returns implementation of TimeNormalizer. +func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer { + var normalized time.Duration + var lastCall time.Time + var count int + return TimeNormalizerFunc(func(remaining time.Duration) time.Duration { + if count == 0 || remaining < time.Minute { + count = updInterval + normalized = remaining + lastCall = time.Now() + return remaining + } + count-- + normalized -= time.Since(lastCall) + lastCall = time.Now() + if normalized > 0 { + return normalized + } + return remaining + }) +} + +func chooseTimeProducer(style TimeStyle) func(time.Duration) string { + switch style { + case ET_STYLE_HHMMSS: + return func(remaining time.Duration) string { + hours := int64(remaining/time.Hour) % 60 + minutes := int64(remaining/time.Minute) % 60 + seconds := int64(remaining/time.Second) % 60 + return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + } + case ET_STYLE_HHMM: + return func(remaining time.Duration) string { + hours := int64(remaining/time.Hour) % 60 + minutes := int64(remaining/time.Minute) % 60 + return fmt.Sprintf("%02d:%02d", hours, minutes) + } + case ET_STYLE_MMSS: + return func(remaining time.Duration) string { + hours := int64(remaining/time.Hour) % 60 + minutes := int64(remaining/time.Minute) % 60 + seconds := int64(remaining/time.Second) % 60 + if hours > 0 { + return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + } + return fmt.Sprintf("%02d:%02d", minutes, seconds) + } + default: + return func(remaining time.Duration) string { + return remaining.Truncate(time.Second).String() + } + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/meta.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/meta.go new file mode 100644 index 000000000..0045a31f4 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/meta.go @@ -0,0 +1,34 @@ +package decor + +var ( + _ Decorator = metaWrapper{} + _ Wrapper = metaWrapper{} +) + +// Meta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func Meta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return metaWrapper{decorator, fn} +} + +type metaWrapper struct { + Decorator + fn func(string) string +} + +func (d metaWrapper) Decor(s Statistics) (string, int) { + str, width := d.Decorator.Decor(s) + return d.fn(str), width +} + +func (d metaWrapper) Unwrap() Decorator { + return d.Decorator +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go new file mode 100644 index 000000000..165ef1eb5 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go @@ -0,0 +1,74 @@ +package decor + +import ( + "sort" + "sync" + + "github.com/VividCortex/ewma" +) + +var ( + _ ewma.MovingAverage = (*threadSafeMovingAverage)(nil) + _ ewma.MovingAverage = (*medianWindow)(nil) + _ sort.Interface = (*medianWindow)(nil) +) + +type threadSafeMovingAverage struct { + ewma.MovingAverage + mu sync.Mutex +} + +func (s *threadSafeMovingAverage) Add(value float64) { + s.mu.Lock() + s.MovingAverage.Add(value) + s.mu.Unlock() +} + +func (s *threadSafeMovingAverage) Value() float64 { + s.mu.Lock() + defer s.mu.Unlock() + return s.MovingAverage.Value() +} + +func (s *threadSafeMovingAverage) Set(value float64) { + s.mu.Lock() + s.MovingAverage.Set(value) + s.mu.Unlock() +} + +// NewThreadSafeMovingAverage converts provided ewma.MovingAverage +// into thread safe ewma.MovingAverage. +func NewThreadSafeMovingAverage(average ewma.MovingAverage) ewma.MovingAverage { + if tsma, ok := average.(*threadSafeMovingAverage); ok { + return tsma + } + return &threadSafeMovingAverage{MovingAverage: average} +} + +type medianWindow [3]float64 + +func (s *medianWindow) Len() int { return len(s) } +func (s *medianWindow) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s *medianWindow) Less(i, j int) bool { return s[i] < s[j] } + +func (s *medianWindow) Add(value float64) { + s[0], s[1] = s[1], s[2] + s[2] = value +} + +func (s *medianWindow) Value() float64 { + tmp := *s + sort.Sort(&tmp) + return tmp[1] +} + +func (s *medianWindow) Set(value float64) { + for i := 0; i < len(s); i++ { + s[i] = value + } +} + +// NewMedian is fixed last 3 samples median MovingAverage. +func NewMedian() ewma.MovingAverage { + return new(medianWindow) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/name.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/name.go new file mode 100644 index 000000000..31ac123b5 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/name.go @@ -0,0 +1,11 @@ +package decor + +// Name decorator displays text that is set once and can't be changed +// during decorator's lifetime. +// +// `str` string to display +// +// `wcc` optional WC config +func Name(str string, wcc ...WC) Decorator { + return Any(func(Statistics) string { return str }, wcc...) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go new file mode 100644 index 000000000..3e35ddfd8 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -0,0 +1,68 @@ +package decor + +var ( + _ Decorator = onAbortWrapper{} + _ Wrapper = onAbortWrapper{} + _ Decorator = onAbortMetaWrapper{} + _ Wrapper = onAbortMetaWrapper{} +) + +// OnAbort wrap decorator. +// Displays provided message on abort event. +// Has no effect if bar.Abort(true) is called. +// +// `decorator` Decorator to wrap +// `message` message to display +func OnAbort(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + return onAbortWrapper{decorator, message} +} + +type onAbortWrapper struct { + Decorator + msg string +} + +func (d onAbortWrapper) Decor(s Statistics) (string, int) { + if s.Aborted { + return d.Format(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d onAbortWrapper) Unwrap() Decorator { + return d.Decorator +} + +// OnAbortMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnAbortMeta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return onAbortMetaWrapper{decorator, fn} +} + +type onAbortMetaWrapper struct { + Decorator + fn func(string) string +} + +func (d onAbortMetaWrapper) Decor(s Statistics) (string, int) { + if s.Aborted { + str, width := d.Decorator.Decor(s) + return d.fn(str), width + } + return d.Decorator.Decor(s) +} + +func (d onAbortMetaWrapper) Unwrap() Decorator { + return d.Decorator +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go new file mode 100644 index 000000000..f9ca8416a --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go @@ -0,0 +1,21 @@ +package decor + +// OnCompleteOrOnAbort wrap decorator. +// Displays provided message on complete or on abort event. +// +// `decorator` Decorator to wrap +// `message` message to display +func OnCompleteOrOnAbort(decorator Decorator, message string) Decorator { + return OnComplete(OnAbort(decorator, message), message) +} + +// OnCompleteMetaOrOnAbortMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnCompleteMetaOrOnAbortMeta(decorator Decorator, fn func(string) string) Decorator { + return OnCompleteMeta(OnAbortMeta(decorator, fn), fn) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go new file mode 100644 index 000000000..f18b5a60c --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go @@ -0,0 +1,67 @@ +package decor + +var ( + _ Decorator = onCompleteWrapper{} + _ Wrapper = onCompleteWrapper{} + _ Decorator = onCompleteMetaWrapper{} + _ Wrapper = onCompleteMetaWrapper{} +) + +// OnComplete wrap decorator. +// Displays provided message on complete event. +// +// `decorator` Decorator to wrap +// `message` message to display +func OnComplete(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + return onCompleteWrapper{decorator, message} +} + +type onCompleteWrapper struct { + Decorator + msg string +} + +func (d onCompleteWrapper) Decor(s Statistics) (string, int) { + if s.Completed { + return d.Format(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d onCompleteWrapper) Unwrap() Decorator { + return d.Decorator +} + +// OnCompleteMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnCompleteMeta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return onCompleteMetaWrapper{decorator, fn} +} + +type onCompleteMetaWrapper struct { + Decorator + fn func(string) string +} + +func (d onCompleteMetaWrapper) Decor(s Statistics) (string, int) { + if s.Completed { + str, width := d.Decorator.Decor(s) + return d.fn(str), width + } + return d.Decorator.Decor(s) +} + +func (d onCompleteMetaWrapper) Unwrap() Decorator { + return d.Decorator +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go new file mode 100644 index 000000000..f4626c33b --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go @@ -0,0 +1,51 @@ +package decor + +// OnCondition applies decorator only if a condition is true. +// +// `decorator` Decorator +// +// `cond` bool +func OnCondition(decorator Decorator, cond bool) Decorator { + return Conditional(cond, decorator, nil) +} + +// OnPredicate applies decorator only if a predicate evaluates to true. +// +// `decorator` Decorator +// +// `predicate` func() bool +func OnPredicate(decorator Decorator, predicate func() bool) Decorator { + return Predicative(predicate, decorator, nil) +} + +// Conditional returns decorator `a` if condition is true, otherwise +// decorator `b`. +// +// `cond` bool +// +// `a` Decorator +// +// `b` Decorator +func Conditional(cond bool, a, b Decorator) Decorator { + if cond { + return a + } else { + return b + } +} + +// Predicative returns decorator `a` if predicate evaluates to true, +// otherwise decorator `b`. +// +// `predicate` func() bool +// +// `a` Decorator +// +// `b` Decorator +func Predicative(predicate func() bool, a, b Decorator) Decorator { + if predicate() { + return a + } else { + return b + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go new file mode 100644 index 000000000..547117b25 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -0,0 +1,68 @@ +package decor + +import ( + "fmt" + "strconv" + + "github.com/vbauerster/mpb/v8/internal" +) + +var _ fmt.Formatter = percentageType(0) + +type percentageType float64 + +func (s percentageType) Format(st fmt.State, verb rune) { + prec := -1 + switch verb { + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': + if p, ok := st.Precision(); ok { + prec = p + } + default: + verb, prec = 'f', 0 + } + + b := strconv.AppendFloat(make([]byte, 0, 16), float64(s), byte(verb), prec, 64) + if st.Flag(' ') { + b = append(b, ' ', '%') + } else { + b = append(b, '%') + } + _, err := st.Write(b) + if err != nil { + panic(err) + } +} + +// Percentage returns percentage decorator. It's a wrapper of NewPercentage. +func Percentage(wcc ...WC) Decorator { + return NewPercentage("% d", wcc...) +} + +// NewPercentage percentage decorator with custom format string. +// +// `format` printf compatible verb +// +// `wcc` optional WC config +// +// format examples: +// +// format="%d" output: "1%" +// format="% d" output: "1 %" +// format="%.1f" output: "1.0%" +// format="% .1f" output: "1.0 %" +// format="%f" output: "1.000000%" +// format="% f" output: "1.000000 %" +func NewPercentage(format string, wcc ...WC) Decorator { + if format == "" { + format = "% d" + } + f := func(s Statistics) string { + p := internal.Percentage(uint(s.Total), uint(s.Current), 100) + return fmt.Sprintf(format, percentageType(p)) + } + return Any(f, wcc...) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go new file mode 100644 index 000000000..90ecda688 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -0,0 +1,120 @@ +//go:generate go tool stringer -type=SizeB1024 -trimprefix=_i +//go:generate go tool stringer -type=SizeB1000 -trimprefix=_ + +package decor + +import ( + "fmt" + "strconv" +) + +var ( + _ fmt.Formatter = SizeB1024(0) + _ fmt.Stringer = SizeB1024(0) + _ fmt.Formatter = SizeB1000(0) + _ fmt.Stringer = SizeB1000(0) +) + +const ( + _ib SizeB1024 = iota + 1 + _iKiB SizeB1024 = 1 << (iota * 10) + _iMiB + _iGiB + _iTiB +) + +// SizeB1024 named type, which implements fmt.Formatter interface. It +// adjusts its value according to byte size multiple by 1024 and appends +// appropriate size marker (KiB, MiB, GiB, TiB). +type SizeB1024 int64 + +func (s SizeB1024) Format(f fmt.State, verb rune) { + prec := -1 + switch verb { + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': + if p, ok := f.Precision(); ok { + prec = p + } + default: + verb, prec = 'f', 0 + } + + var unit SizeB1024 + switch { + case s < _iKiB: + unit = _ib + case s < _iMiB: + unit = _iKiB + case s < _iGiB: + unit = _iMiB + case s < _iTiB: + unit = _iGiB + default: + unit = _iTiB + } + + b := strconv.AppendFloat(make([]byte, 0, 24), float64(s)/float64(unit), byte(verb), prec, 64) + if f.Flag(' ') { + b = append(b, ' ') + } + b = append(b, []byte(unit.String())...) + _, err := f.Write(b) + if err != nil { + panic(err) + } +} + +const ( + _b SizeB1000 = 1 + _KB SizeB1000 = _b * 1000 + _MB SizeB1000 = _KB * 1000 + _GB SizeB1000 = _MB * 1000 + _TB SizeB1000 = _GB * 1000 +) + +// SizeB1000 named type, which implements fmt.Formatter interface. It +// adjusts its value according to byte size multiple by 1000 and appends +// appropriate size marker (KB, MB, GB, TB). +type SizeB1000 int64 + +func (s SizeB1000) Format(f fmt.State, verb rune) { + prec := -1 + switch verb { + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': + if p, ok := f.Precision(); ok { + prec = p + } + default: + verb, prec = 'f', 0 + } + + var unit SizeB1000 + switch { + case s < _KB: + unit = _b + case s < _MB: + unit = _KB + case s < _GB: + unit = _MB + case s < _TB: + unit = _GB + default: + unit = _TB + } + + b := strconv.AppendFloat(make([]byte, 0, 24), float64(s)/float64(unit), byte(verb), prec, 64) + if f.Flag(' ') { + b = append(b, ' ') + } + b = append(b, []byte(unit.String())...) + _, err := f.Write(b) + if err != nil { + panic(err) + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go new file mode 100644 index 000000000..3f32ef715 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go @@ -0,0 +1,41 @@ +// Code generated by "stringer -type=SizeB1000 -trimprefix=_"; DO NOT EDIT. + +package decor + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[_b-1] + _ = x[_KB-1000] + _ = x[_MB-1000000] + _ = x[_GB-1000000000] + _ = x[_TB-1000000000000] +} + +const ( + _SizeB1000_name_0 = "b" + _SizeB1000_name_1 = "KB" + _SizeB1000_name_2 = "MB" + _SizeB1000_name_3 = "GB" + _SizeB1000_name_4 = "TB" +) + +func (i SizeB1000) String() string { + switch { + case i == 1: + return _SizeB1000_name_0 + case i == 1000: + return _SizeB1000_name_1 + case i == 1000000: + return _SizeB1000_name_2 + case i == 1000000000: + return _SizeB1000_name_3 + case i == 1000000000000: + return _SizeB1000_name_4 + default: + return "SizeB1000(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go new file mode 100644 index 000000000..9fca66cc7 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go @@ -0,0 +1,41 @@ +// Code generated by "stringer -type=SizeB1024 -trimprefix=_i"; DO NOT EDIT. + +package decor + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[_ib-1] + _ = x[_iKiB-1024] + _ = x[_iMiB-1048576] + _ = x[_iGiB-1073741824] + _ = x[_iTiB-1099511627776] +} + +const ( + _SizeB1024_name_0 = "b" + _SizeB1024_name_1 = "KiB" + _SizeB1024_name_2 = "MiB" + _SizeB1024_name_3 = "GiB" + _SizeB1024_name_4 = "TiB" +) + +func (i SizeB1024) String() string { + switch { + case i == 1: + return _SizeB1024_name_0 + case i == 1024: + return _SizeB1024_name_1 + case i == 1048576: + return _SizeB1024_name_2 + case i == 1073741824: + return _SizeB1024_name_3 + case i == 1099511627776: + return _SizeB1024_name_4 + default: + return "SizeB1024(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/speed.go new file mode 100644 index 000000000..b643e10fb --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -0,0 +1,185 @@ +package decor + +import ( + "fmt" + "io" + "math" + "time" + + "github.com/VividCortex/ewma" +) + +var ( + _ Decorator = (*movingAverageSpeed)(nil) + _ EwmaDecorator = (*movingAverageSpeed)(nil) + _ Decorator = (*averageSpeed)(nil) + _ AverageDecorator = (*averageSpeed)(nil) +) + +// FmtAsSpeed adds "/s" to the end of the input formatter. To be +// used with SizeB1000 or SizeB1024 types, for example: +// +// fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048))) +func FmtAsSpeed(input fmt.Formatter) fmt.Formatter { + return &speedFormatter{input} +} + +type speedFormatter struct { + fmt.Formatter +} + +func (s *speedFormatter) Format(st fmt.State, verb rune) { + s.Formatter.Format(st, verb) + _, err := io.WriteString(st, "/s") + if err != nil { + panic(err) + } +} + +// EwmaSpeed exponential-weighted-moving-average based speed decorator. +// For this decorator to work correctly you have to measure each iteration's +// duration and pass it to one of the (*Bar).EwmaIncr... family methods. +func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorator { + var average ewma.MovingAverage + if age == 0 { + average = ewma.NewMovingAverage() + } else { + average = ewma.NewMovingAverage(age) + } + return MovingAverageSpeed(unit, format, average, wcc...) +} + +// MovingAverageSpeed decorator relies on MovingAverage implementation +// to calculate its average. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `format` printf compatible verb for value, like "%f" or "%d" +// +// `average` MovingAverage implementation +// +// `wcc` optional WC config +// +// format examples: +// +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator { + d := &movingAverageSpeed{ + WC: initWC(wcc...), + producer: chooseSpeedProducer(unit, format), + average: average, + } + return d +} + +type movingAverageSpeed struct { + WC + producer func(float64) string + average ewma.MovingAverage + zDur time.Duration +} + +func (d *movingAverageSpeed) Decor(_ Statistics) (string, int) { + var str string + // ewma implementation may return 0 before accumulating certain number of samples + if v := d.average.Value(); v != 0 { + str = d.producer(1e9 / v) + } else { + str = d.producer(0) + } + return d.Format(str) +} + +func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { + if n <= 0 { + d.zDur += dur + return + } + durPerByte := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerByte) +} + +// AverageSpeed decorator with dynamic unit measure adjustment. It's +// a wrapper of NewAverageSpeed. +func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator { + return NewAverageSpeed(unit, format, time.Now(), wcc...) +} + +// NewAverageSpeed decorator with dynamic unit measure adjustment and +// user provided start time. +// +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] +// +// `format` printf compatible verb for value, like "%f" or "%d" +// +// `start` start time +// +// `wcc` optional WC config +// +// format examples: +// +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func NewAverageSpeed(unit interface{}, format string, start time.Time, wcc ...WC) Decorator { + d := &averageSpeed{ + WC: initWC(wcc...), + start: start, + producer: chooseSpeedProducer(unit, format), + } + return d +} + +type averageSpeed struct { + WC + start time.Time + producer func(float64) string + msg string +} + +func (d *averageSpeed) Decor(s Statistics) (string, int) { + if !s.Completed { + speed := float64(s.Current) / float64(time.Since(d.start)) + d.msg = d.producer(speed * 1e9) + } + return d.Format(d.msg) +} + +func (d *averageSpeed) AverageAdjust(start time.Time) { + d.start = start +} + +func chooseSpeedProducer(unit interface{}, format string) func(float64) string { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } + return func(speed float64) string { + return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed)))) + } + case SizeB1000: + if format == "" { + format = "% d" + } + return func(speed float64) string { + return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed)))) + } + default: + if format == "" { + format = "%f" + } + return func(speed float64) string { + return fmt.Sprintf(format, speed) + } + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go b/tools/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go new file mode 100644 index 000000000..9d2f89094 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go @@ -0,0 +1,21 @@ +package decor + +var defaultSpinnerStyle = [...]string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} + +// Spinner returns spinner decorator. +// +// `frames` spinner frames, if nil or len==0, default is used +// +// `wcc` optional WC config +func Spinner(frames []string, wcc ...WC) Decorator { + if len(frames) == 0 { + frames = defaultSpinnerStyle[:] + } + var count uint + f := func(s Statistics) string { + frame := frames[count%uint(len(frames))] + count++ + return frame + } + return Any(f, wcc...) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/doc.go b/tools/vendor/github.com/vbauerster/mpb/v8/doc.go new file mode 100644 index 000000000..5ada71774 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/doc.go @@ -0,0 +1,2 @@ +// Package mpb is a library for rendering progress bars in terminal applications. +package mpb diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/tools/vendor/github.com/vbauerster/mpb/v8/heap_manager.go new file mode 100644 index 000000000..88efb4823 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -0,0 +1,177 @@ +package mpb + +import "container/heap" + +type heapManager chan heapRequest + +type heapCmd int + +const ( + h_sync heapCmd = iota + h_push + h_iter + h_fix + h_state + h_end +) + +type heapRequest struct { + cmd heapCmd + data interface{} +} + +type iterData struct { + drop <-chan struct{} + iter chan<- *Bar + iterPop chan<- *Bar +} + +type pushData struct { + bar *Bar + sync bool +} + +type fixData struct { + bar *Bar + priority int + lazy bool +} + +func (m heapManager) run() { + var bHeap priorityQueue + var pMatrix, aMatrix map[int][]chan int + + var l int + var sync bool + + for req := range m { + switch req.cmd { + case h_push: + data := req.data.(pushData) + heap.Push(&bHeap, data.bar) + sync = sync || data.sync + case h_sync: + if sync || l != bHeap.Len() { + pMatrix = make(map[int][]chan int) + aMatrix = make(map[int][]chan int) + for _, b := range bHeap { + table := b.wSyncTable() + for i, ch := range table[0] { + pMatrix[i] = append(pMatrix[i], ch) + } + for i, ch := range table[1] { + aMatrix[i] = append(aMatrix[i], ch) + } + } + sync = false + l = bHeap.Len() + } + drop := req.data.(<-chan struct{}) + syncWidth(pMatrix, drop) + syncWidth(aMatrix, drop) + case h_iter: + data := req.data.(iterData) + loop: // unordered iteration + for _, b := range bHeap { + select { + case data.iter <- b: + case <-data.drop: + data.iterPop = nil + break loop + } + } + close(data.iter) + if data.iterPop == nil { + break + } + loop_pop: // ordered iteration + for bHeap.Len() != 0 { + bar := heap.Pop(&bHeap).(*Bar) + select { + case data.iterPop <- bar: + case <-data.drop: + heap.Push(&bHeap, bar) + break loop_pop + } + } + close(data.iterPop) + case h_fix: + data := req.data.(fixData) + if data.bar.index < 0 { + break + } + data.bar.priority = data.priority + if !data.lazy { + heap.Fix(&bHeap, data.bar.index) + } + case h_state: + ch := req.data.(chan<- bool) + ch <- sync || l != bHeap.Len() + case h_end: + ch := req.data.(chan<- interface{}) + if ch != nil { + go func() { + ch <- []*Bar(bHeap) + }() + } + close(m) + } + } +} + +func (m heapManager) sync(drop <-chan struct{}) { + m <- heapRequest{cmd: h_sync, data: drop} +} + +func (m heapManager) push(b *Bar, sync bool) { + data := pushData{b, sync} + req := heapRequest{cmd: h_push, data: data} + select { + case m <- req: + default: + go func() { + m <- req + }() + } +} + +func (m heapManager) iter(drop <-chan struct{}, iter, iterPop chan<- *Bar) { + data := iterData{drop, iter, iterPop} + m <- heapRequest{cmd: h_iter, data: data} +} + +func (m heapManager) fix(b *Bar, priority int, lazy bool) { + data := fixData{b, priority, lazy} + m <- heapRequest{cmd: h_fix, data: data} +} + +func (m heapManager) state(ch chan<- bool) { + m <- heapRequest{cmd: h_state, data: ch} +} + +func (m heapManager) end(ch chan<- interface{}) { + m <- heapRequest{cmd: h_end, data: ch} +} + +func syncWidth(matrix map[int][]chan int, drop <-chan struct{}) { + for _, column := range matrix { + go maxWidthDistributor(column, drop) + } +} + +func maxWidthDistributor(column []chan int, drop <-chan struct{}) { + var maxWidth int + for _, ch := range column { + select { + case w := <-ch: + if w > maxWidth { + maxWidth = w + } + case <-drop: + return + } + } + for _, ch := range column { + ch <- maxWidth + } +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go b/tools/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go new file mode 100644 index 000000000..e25cf9992 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go @@ -0,0 +1,22 @@ +package internal + +import "math" + +// Percentage is a helper function, to calculate percentage. +func Percentage(total, current, width uint) float64 { + if total == 0 { + return 0 + } + if current >= total { + return float64(width) + } + return float64(width*current) / float64(total) +} + +// PercentageRound same as Percentage but with math.Round. +func PercentageRound(total, current int64, width uint) float64 { + if total < 0 || current < 0 { + return 0 + } + return math.Round(Percentage(uint(total), uint(current), width)) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/internal/width.go b/tools/vendor/github.com/vbauerster/mpb/v8/internal/width.go new file mode 100644 index 000000000..842e811f0 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/internal/width.go @@ -0,0 +1,10 @@ +package internal + +// CheckRequestedWidth checks that requested width doesn't overflow +// available width +func CheckRequestedWidth(requested, available int) int { + if requested < 1 || requested > available { + return available + } + return requested +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/priority_queue.go b/tools/vendor/github.com/vbauerster/mpb/v8/priority_queue.go new file mode 100644 index 000000000..c2f657db0 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/priority_queue.go @@ -0,0 +1,37 @@ +package mpb + +import "container/heap" + +var _ heap.Interface = (*priorityQueue)(nil) + +type priorityQueue []*Bar + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + // greater priority pops first + return pq[i].priority > pq[j].priority +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + s := *pq + b := x.(*Bar) + b.index = len(s) + *pq = append(s, b) +} + +func (pq *priorityQueue) Pop() interface{} { + var b *Bar + s := *pq + i := len(s) - 1 + b, s[i] = s[i], nil // nil to avoid memory leak + b.index = -1 // for safety + *pq = s[:i] + return b +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/progress.go b/tools/vendor/github.com/vbauerster/mpb/v8/progress.go new file mode 100644 index 000000000..851083c40 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -0,0 +1,463 @@ +package mpb + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "os" + "sync" + "time" + + "github.com/vbauerster/mpb/v8/cwriter" + "github.com/vbauerster/mpb/v8/decor" +) + +const defaultRefreshRate = 150 * time.Millisecond +const defaultHmQueueLength = 128 + +// ErrDone represents use after `(*Progress).Wait()` error. +var ErrDone = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil)) + +// Progress represents a container that renders one or more progress bars. +type Progress struct { + uwg *sync.WaitGroup + pwg, bwg sync.WaitGroup + operateState chan func(*pState) + interceptIO chan func(io.Writer) + done <-chan struct{} + cancel func() +} + +// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. +type pState struct { + ctx context.Context + hm heapManager + iterDrop chan struct{} + renderReq chan time.Time + idCount int + popPriority int + + // following are provided/overrode by user + hmQueueLen int + reqWidth int + refreshRate time.Duration + popCompleted bool + autoRefresh bool + delayRC <-chan struct{} + manualRC <-chan interface{} + shutdownNotifier chan<- interface{} + queueBars map[*Bar]*Bar + output io.Writer + debugOut io.Writer + uwg *sync.WaitGroup +} + +// New creates new Progress container instance. It's not possible to +// reuse instance after `(*Progress).Wait` method has been called. +func New(options ...ContainerOption) *Progress { + return NewWithContext(context.Background(), options...) +} + +// NewWithContext creates new Progress container instance with provided +// context. It's not possible to reuse instance after `(*Progress).Wait` +// method has been called. +func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { + if ctx == nil { + ctx = context.Background() + } + ctx, cancel := context.WithCancel(ctx) + s := &pState{ + ctx: ctx, + hmQueueLen: defaultHmQueueLength, + iterDrop: make(chan struct{}), + renderReq: make(chan time.Time), + popPriority: math.MinInt32, + refreshRate: defaultRefreshRate, + queueBars: make(map[*Bar]*Bar), + output: os.Stdout, + debugOut: io.Discard, + } + + for _, opt := range options { + if opt != nil { + opt(s) + } + } + + s.hm = make(heapManager, s.hmQueueLen) + + p := &Progress{ + uwg: s.uwg, + operateState: make(chan func(*pState)), + interceptIO: make(chan func(io.Writer)), + cancel: cancel, + } + + cw := cwriter.New(s.output) + if s.manualRC != nil { + done := make(chan struct{}) + p.done = done + s.autoRefresh = false + go s.manualRefreshListener(done) + } else if cw.IsTerminal() || s.autoRefresh { + done := make(chan struct{}) + p.done = done + s.autoRefresh = true + go s.autoRefreshListener(done) + } else { + p.done = ctx.Done() + s.autoRefresh = false + } + + p.pwg.Add(1) + go p.serve(s, cw) + go s.hm.run() + return p +} + +// AddBar creates a bar with default bar filler. +func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { + return p.New(total, BarStyle(), options...) +} + +// AddSpinner creates a bar with default spinner filler. +func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar by calling `Build` method on provided `BarFillerBuilder`. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + if builder == nil { + return p.MustAdd(total, nil, options...) + } + return p.MustAdd(total, builder.Build(), options...) +} + +// MustAdd creates a bar which renders itself by provided BarFiller. +// If `total <= 0` triggering complete event by increment methods is +// disabled. Panics if called after `(*Progress).Wait()`. +func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) *Bar { + bar, err := p.Add(total, filler, options...) + if err != nil { + panic(err) + } + return bar +} + +// Add creates a bar which renders itself by provided BarFiller. +// If `total <= 0` triggering complete event by increment methods +// is disabled. If called after `(*Progress).Wait()` then +// `(nil, ErrDone)` is returned. +func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) { + if filler == nil { + filler = NopStyle().Build() + } else if f, ok := filler.(BarFillerFunc); ok && f == nil { + filler = NopStyle().Build() + } + ch := make(chan *Bar) + select { + case p.operateState <- func(ps *pState) { + bs := ps.makeBarState(total, filler, options...) + bar := newBar(ps.ctx, p, bs) + if bs.waitBar != nil { + ps.queueBars[bs.waitBar] = bar + } else { + ps.hm.push(bar, true) + } + ps.idCount++ + ch <- bar + }: + return <-ch, nil + case <-p.done: + return nil, ErrDone + } +} + +func (p *Progress) traverseBars(cb func(b *Bar) bool) { + drop, iter := make(chan struct{}), make(chan *Bar) + select { + case p.operateState <- func(s *pState) { s.hm.iter(drop, iter, nil) }: + for b := range iter { + if !cb(b) { + close(drop) + break + } + } + case <-p.done: + } +} + +// UpdateBarPriority either immediately or lazy. +// With lazy flag order is updated after the next refresh cycle. +// If you don't care about laziness just use `(*Bar).SetPriority(int)`. +func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) { + if b == nil { + return + } + select { + case p.operateState <- func(s *pState) { s.hm.fix(b, priority, lazy) }: + case <-p.done: + } +} + +// Write is implementation of io.Writer. +// Writing to `*Progress` will print lines above a running bar. +// Writes aren't flushed immediately, but at next refresh cycle. +// If called after `(*Progress).Wait()` then `(0, ErrDone)` is returned. +func (p *Progress) Write(b []byte) (int, error) { + type result struct { + n int + err error + } + ch := make(chan result) + select { + case p.interceptIO <- func(w io.Writer) { + n, err := w.Write(b) + ch <- result{n, err} + }: + res := <-ch + return res.n, res.err + case <-p.done: + return 0, ErrDone + } +} + +// Wait waits for all bars to complete and finally shutdowns container. After +// this method has been called, there is no way to reuse `*Progress` instance. +func (p *Progress) Wait() { + p.bwg.Wait() + p.Shutdown() + // wait for user wg, if any + if p.uwg != nil { + p.uwg.Wait() + } +} + +// Shutdown cancels any running bar immediately and then shutdowns `*Progress` +// instance. Normally this method shouldn't be called unless you know what you +// are doing. Proper way to shutdown is to call `(*Progress).Wait()` instead. +func (p *Progress) Shutdown() { + p.cancel() + p.pwg.Wait() +} + +func (p *Progress) serve(s *pState, cw *cwriter.Writer) { + defer p.pwg.Done() + var err error + var w *cwriter.Writer + renderReq := s.renderReq + operateState := p.operateState + interceptIO := p.interceptIO + + if s.delayRC != nil { + w = cwriter.New(io.Discard) + } else { + w, cw = cw, nil + } + + for { + select { + case <-s.delayRC: + w, cw = cw, nil + s.delayRC = nil + case op := <-operateState: + op(s) + case fn := <-interceptIO: + fn(w) + case <-renderReq: + err = s.render(w) + if err != nil { + // (*pState).(autoRefreshListener|manualRefreshListener) may block + // if not launching following short lived goroutine + go func() { + for { + select { + case <-s.renderReq: + case <-p.done: + return + } + } + }() + p.cancel() // cancel all bars + renderReq = nil + operateState = nil + interceptIO = nil + } + case <-p.done: + if err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + } else if s.autoRefresh { + update := make(chan bool) + for i := 0; i == 0 || <-update; i++ { + if err := s.render(w); err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + break + } + s.hm.state(update) + } + } + s.hm.end(s.shutdownNotifier) + return + } + } +} + +func (s *pState) autoRefreshListener(done chan struct{}) { + ticker := time.NewTicker(s.refreshRate) + defer ticker.Stop() + for { + select { + case t := <-ticker.C: + s.renderReq <- t + case <-s.ctx.Done(): + close(done) + return + } + } +} + +func (s *pState) manualRefreshListener(done chan struct{}) { + for { + select { + case x := <-s.manualRC: + if t, ok := x.(time.Time); ok { + s.renderReq <- t + } else { + s.renderReq <- time.Now() + } + case <-s.ctx.Done(): + close(done) + return + } + } +} + +func (s *pState) render(cw *cwriter.Writer) (err error) { + iter, iterPop := make(chan *Bar), make(chan *Bar) + s.hm.sync(s.iterDrop) + s.hm.iter(s.iterDrop, iter, iterPop) + + var width, height int + if cw.IsTerminal() { + width, height, err = cw.GetTermSize() + if err != nil { + close(s.iterDrop) + return err + } + } else { + if s.reqWidth > 0 { + width = s.reqWidth + } else { + width = 80 + } + height = width + } + + var barCount int + for b := range iter { + barCount++ + go b.render(width) + } + + return s.flush(cw, height, barCount, iterPop) +} + +func (s *pState) flush(cw *cwriter.Writer, height, barCount int, iter <-chan *Bar) error { + var total, popCount int + rows := make([][]io.Reader, 0, barCount) + + for b := range iter { + frame := <-b.frameCh + if frame.err != nil { + close(s.iterDrop) + b.cancel() + return frame.err // b.frameCh is buffered it's ok to return here + } + var discarded int + for i := len(frame.rows) - 1; i >= 0; i-- { + if total < height { + total++ + } else { + _, _ = io.Copy(io.Discard, frame.rows[i]) // Found IsInBounds + discarded++ + } + } + rows = append(rows, frame.rows) + + switch frame.shutdown { + case 1: + b.cancel() + if qb, ok := s.queueBars[b]; ok { + delete(s.queueBars, b) + qb.priority = b.priority + s.hm.push(qb, true) + } else if s.popCompleted && !frame.noPop { + b.priority = s.popPriority + s.popPriority++ + s.hm.push(b, false) + } else if !frame.rmOnComplete { + s.hm.push(b, false) + } + case 2: + if s.popCompleted && !frame.noPop { + popCount += len(frame.rows) - discarded + continue + } + fallthrough + default: + s.hm.push(b, false) + } + } + + for i := len(rows) - 1; i >= 0; i-- { + for _, r := range rows[i] { + _, err := cw.ReadFrom(r) + if err != nil { + return err + } + } + } + + return cw.Flush(total - popCount) +} + +func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { + bs := &bState{ + id: s.idCount, + priority: s.idCount, + reqWidth: s.reqWidth, + total: total, + filler: filler, + renderReq: s.renderReq, + autoRefresh: s.autoRefresh, + extender: func(_ decor.Statistics, rows ...io.Reader) ([]io.Reader, error) { + return rows, nil + }, + } + + if total > 0 { + bs.triggerComplete = true + } + + for _, opt := range options { + if opt != nil { + opt(bs) + } + } + + for _, group := range bs.decorGroups { + for _, d := range group { + if d, ok := unwrap(d).(decor.EwmaDecorator); ok { + bs.ewmaDecorators = append(bs.ewmaDecorators, d) + } + } + } + + bs.buffers[0] = bytes.NewBuffer(make([]byte, 0, 128)) // prepend + bs.buffers[1] = bytes.NewBuffer(make([]byte, 0, 128)) // append + bs.buffers[2] = bytes.NewBuffer(make([]byte, 0, 256)) // filler + + return bs +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/proxyreader.go b/tools/vendor/github.com/vbauerster/mpb/v8/proxyreader.go new file mode 100644 index 000000000..8c324f894 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/proxyreader.go @@ -0,0 +1,73 @@ +package mpb + +import ( + "io" + "time" +) + +type proxyReader struct { + io.ReadCloser + bar *Bar +} + +func (x proxyReader) Read(p []byte) (int, error) { + n, err := x.ReadCloser.Read(p) + x.bar.IncrBy(n) + return n, err +} + +type proxyWriterTo struct { + proxyReader +} + +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { + n, err := x.ReadCloser.(io.WriterTo).WriteTo(w) + x.bar.IncrInt64(n) + return n, err +} + +type ewmaProxyReader struct { + io.ReadCloser + bar *Bar +} + +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.ReadCloser.Read(p) + x.bar.EwmaIncrBy(n, time.Since(start)) + return n, err +} + +type ewmaProxyWriterTo struct { + ewmaProxyReader +} + +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() + n, err := x.ReadCloser.(io.WriterTo).WriteTo(w) + x.bar.EwmaIncrInt64(n, time.Since(start)) + return n, err +} + +func newProxyReader(r io.Reader, b *Bar, hasEwma bool) io.ReadCloser { + rc := toReadCloser(r) + if hasEwma { + epr := ewmaProxyReader{rc, b} + if _, ok := r.(io.WriterTo); ok { + return ewmaProxyWriterTo{epr} + } + return epr + } + pr := proxyReader{rc, b} + if _, ok := r.(io.WriterTo); ok { + return proxyWriterTo{pr} + } + return pr +} + +func toReadCloser(r io.Reader) io.ReadCloser { + if rc, ok := r.(io.ReadCloser); ok { + return rc + } + return io.NopCloser(r) +} diff --git a/tools/vendor/github.com/vbauerster/mpb/v8/proxywriter.go b/tools/vendor/github.com/vbauerster/mpb/v8/proxywriter.go new file mode 100644 index 000000000..f260dafa8 --- /dev/null +++ b/tools/vendor/github.com/vbauerster/mpb/v8/proxywriter.go @@ -0,0 +1,96 @@ +package mpb + +import ( + "io" + "time" +) + +type proxyWriter struct { + io.WriteCloser + bar *Bar +} + +func (x proxyWriter) Write(p []byte) (int, error) { + n, err := x.WriteCloser.Write(p) + x.bar.IncrBy(n) + return n, err +} + +type proxyReaderFrom struct { + proxyWriter +} + +func (x proxyReaderFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := x.WriteCloser.(io.ReaderFrom).ReadFrom(r) + x.bar.IncrInt64(n) + return n, err +} + +type ewmaProxyWriter struct { + io.WriteCloser + bar *Bar +} + +func (x ewmaProxyWriter) Write(p []byte) (int, error) { + start := time.Now() + n, err := x.WriteCloser.Write(p) + x.bar.EwmaIncrBy(n, time.Since(start)) + return n, err +} + +type ewmaProxyReaderFrom struct { + ewmaProxyWriter +} + +func (x ewmaProxyReaderFrom) ReadFrom(r io.Reader) (int64, error) { + start := time.Now() + n, err := x.WriteCloser.(io.ReaderFrom).ReadFrom(r) + x.bar.EwmaIncrInt64(n, time.Since(start)) + return n, err +} + +func newProxyWriter(w io.Writer, b *Bar, hasEwma bool) io.WriteCloser { + wc := toWriteCloser(w) + if hasEwma { + epw := ewmaProxyWriter{wc, b} + if _, ok := w.(io.ReaderFrom); ok { + return ewmaProxyReaderFrom{epw} + } + return epw + } + pw := proxyWriter{wc, b} + if _, ok := w.(io.ReaderFrom); ok { + return proxyReaderFrom{pw} + } + return pw +} + +func toWriteCloser(w io.Writer) io.WriteCloser { + if wc, ok := w.(io.WriteCloser); ok { + return wc + } + return toNopWriteCloser(w) +} + +func toNopWriteCloser(w io.Writer) io.WriteCloser { + if _, ok := w.(io.ReaderFrom); ok { + return nopWriteCloserReaderFrom{w} + } + return nopWriteCloser{w} +} + +type nopWriteCloser struct { + io.Writer +} + +func (nopWriteCloser) Close() error { return nil } + +type nopWriteCloserReaderFrom struct { + io.Writer +} + +func (nopWriteCloserReaderFrom) Close() error { return nil } + +func (c nopWriteCloserReaderFrom) ReadFrom(r io.Reader) (int64, error) { + return c.Writer.(io.ReaderFrom).ReadFrom(r) +} diff --git a/tools/vendor/github.com/xeipuuv/gojsonpointer/README.md b/tools/vendor/github.com/xeipuuv/gojsonpointer/README.md deleted file mode 100644 index a4f5f1458..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonpointer/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# gojsonpointer -An implementation of JSON Pointer - Go language - -## Usage - jsonText := `{ - "name": "Bobby B", - "occupation": { - "title" : "King", - "years" : 15, - "heir" : "Joffrey B" - } - }` - - var jsonDocument map[string]interface{} - json.Unmarshal([]byte(jsonText), &jsonDocument) - - //create a JSON pointer - pointerString := "/occupation/title" - pointer, _ := NewJsonPointer(pointerString) - - //SET a new value for the "title" in the document - pointer.Set(jsonDocument, "Supreme Leader of Westeros") - - //GET the new "title" from the document - title, _, _ := pointer.Get(jsonDocument) - fmt.Println(title) //outputs "Supreme Leader of Westeros" - - //DELETE the "heir" from the document - deletePointer := NewJsonPointer("/occupation/heir") - deletePointer.Delete(jsonDocument) - - b, _ := json.Marshal(jsonDocument) - fmt.Println(string(b)) - //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` - - -## References -https://tools.ietf.org/html/rfc6901 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/tools/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/tools/vendor/github.com/xeipuuv/gojsonpointer/pointer.go deleted file mode 100644 index 798c1f1c5..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonpointer/pointer.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package gojsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - const_empty_pointer = `` - const_pointer_separator = `/` - - const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` -) - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -type JsonPointer struct { - referenceTokens []string -} - -// NewJsonPointer parses the given string JSON pointer and returns an object -func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { - - // Pointer to the root of the document - if len(jsonPointerString) == 0 { - // Keep referenceTokens nil - return - } - if jsonPointerString[0] != '/' { - return p, errors.New(const_invalid_start) - } - - p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) - return -} - -// Uses the pointer to retrieve a value from a JSON document -func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - - is := &implStruct{mode: "GET", inDocument: document} - p.implementation(is) - return is.getOutNode, is.getOutKind, is.outError - -} - -// Uses the pointer to update a value from a JSON document -func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { - - is := &implStruct{mode: "SET", inDocument: document, setInValue: value} - p.implementation(is) - return document, is.outError - -} - -// Uses the pointer to delete a value from a JSON document -func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { - is := &implStruct{mode: "DEL", inDocument: document} - p.implementation(is) - return document, is.outError -} - -// Both Get and Set functions use the same implementation to avoid code duplication -func (p *JsonPointer) implementation(i *implStruct) { - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - i.getOutNode = i.inDocument - i.outError = nil - i.getOutKind = kind - i.outError = nil - return - } - - node := i.inDocument - - previousNodes := make([]interface{}, len(p.referenceTokens)) - previousTokens := make([]string, len(p.referenceTokens)) - - for ti, token := range p.referenceTokens { - - isLastToken := ti == len(p.referenceTokens)-1 - previousNodes[ti] = node - previousTokens[ti] = token - - switch v := node.(type) { - - case map[string]interface{}: - decodedToken := decodeReferenceToken(token) - if _, ok := v[decodedToken]; ok { - node = v[decodedToken] - if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else if isLastToken && i.mode == "DEL" { - delete(v, decodedToken) - } - } else if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else { - i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) - i.getOutKind = reflect.Map - i.getOutNode = nil - return - } - - case []interface{}: - tokenIndex, err := strconv.Atoi(token) - if err != nil { - i.outError = fmt.Errorf("Invalid array index '%s'", token) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - if tokenIndex < 0 || tokenIndex >= len(v) { - i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - - node = v[tokenIndex] - if isLastToken && i.mode == "SET" { - v[tokenIndex] = i.setInValue - } else if isLastToken && i.mode == "DEL" { - v[tokenIndex] = v[len(v)-1] - v[len(v)-1] = nil - v = v[:len(v)-1] - previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v - } - - default: - i.outError = fmt.Errorf("Invalid token reference '%s'", token) - i.getOutKind = reflect.ValueOf(node).Kind() - i.getOutNode = nil - return - } - - } - - i.getOutNode = node - i.getOutKind = reflect.ValueOf(node).Kind() - i.outError = nil -} - -// Pointer to string representation function -func (p *JsonPointer) String() string { - - if len(p.referenceTokens) == 0 { - return const_empty_pointer - } - - pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -func decodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~1`, `/`, -1) - step2 := strings.Replace(step1, `~0`, `~`, -1) - return step2 -} - -func encodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~`, `~0`, -1) - step2 := strings.Replace(step1, `/`, `~1`, -1) - return step2 -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonreference/README.md b/tools/vendor/github.com/xeipuuv/gojsonreference/README.md deleted file mode 100644 index 9ab6e1eb1..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonreference/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# gojsonreference -An implementation of JSON Reference - Go language - -## Dependencies -https://github.com/xeipuuv/gojsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/tools/vendor/github.com/xeipuuv/gojsonreference/reference.go b/tools/vendor/github.com/xeipuuv/gojsonreference/reference.go deleted file mode 100644 index 645729130..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonreference/reference.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package gojsonreference - -import ( - "errors" - "net/url" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonpointer" -) - -const ( - const_fragment_char = `#` -) - -func NewJsonReference(jsonReferenceString string) (JsonReference, error) { - - var r JsonReference - err := r.parse(jsonReferenceString) - return r, err - -} - -type JsonReference struct { - referenceUrl *url.URL - referencePointer gojsonpointer.JsonPointer - - HasFullUrl bool - HasUrlPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -func (r *JsonReference) GetUrl() *url.URL { - return r.referenceUrl -} - -func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { - return &r.referencePointer -} - -func (r *JsonReference) String() string { - - if r.referenceUrl != nil { - return r.referenceUrl.String() - } - - if r.HasFragmentOnly { - return const_fragment_char + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -func (r *JsonReference) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) -} - -// "Constructor", parses the given string JSON reference -func (r *JsonReference) parse(jsonReferenceString string) (err error) { - - r.referenceUrl, err = url.Parse(jsonReferenceString) - if err != nil { - return - } - refUrl := r.referenceUrl - - if refUrl.Scheme != "" && refUrl.Host != "" { - r.HasFullUrl = true - } else { - if refUrl.Path != "" { - r.HasUrlPathOnly = true - } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refUrl.Scheme == "file" - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, and if it - // doesn't then its first component will be treated as the host by the - // Go runtime - if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) - } - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path) - } - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) - - return -} - -// Creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { - if child.GetUrl() == nil { - return nil, errors.New("childUrl is nil!") - } - - if r.GetUrl() == nil { - return nil, errors.New("parentUrl is nil!") - } - - // Get a copy of the parent url to make sure we do not modify the original. - // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. - // The fragment of the child must be used, so the fragment of the parent is manually removed. - parentUrl := *r.GetUrl() - parentUrl.Fragment = "" - - ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) - if err != nil { - return nil, err - } - return &ref, err -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/tools/vendor/github.com/xeipuuv/gojsonschema/.gitignore deleted file mode 100644 index 68e993ce3..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.sw[nop] -*.iml -.vscode/ diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/tools/vendor/github.com/xeipuuv/gojsonschema/.travis.yml deleted file mode 100644 index 3289001cd..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - "1.11" - - "1.12" - - "1.13" -before_install: - - go get github.com/xeipuuv/gojsonreference - - go get github.com/xeipuuv/gojsonpointer - - go get github.com/stretchr/testify/assert diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/README.md b/tools/vendor/github.com/xeipuuv/gojsonschema/README.md deleted file mode 100644 index 758f26df0..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/README.md +++ /dev/null @@ -1,466 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) -[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) -[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) - -# gojsonschema - -## Description - -An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. - -References : - -* http://json-schema.org -* http://json-schema.org/latest/json-schema-core.html -* http://json-schema.org/latest/json-schema-validation.html - -## Installation - -``` -go get github.com/xeipuuv/gojsonschema -``` - -Dependencies : -* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) -* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) -* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) - -## Usage - -### Example - -```go - -package main - -import ( - "fmt" - "github.com/xeipuuv/gojsonschema" -) - -func main() { - - schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") - documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") - - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - panic(err.Error()) - } - - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - } -} - - -``` - -#### Loaders - -There are various ways to load your JSON data. -In order to load your schemas and documents, -first declare an appropriate loader : - -* Web / HTTP, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") -``` - -* Local file, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") -``` - -References use the URI scheme, the prefix (file://) and a full path to the file are required. - -* JSON strings : - -```go -loader := gojsonschema.NewStringLoader(`{"type": "string"}`) -``` - -* Custom Go types : - -```go -m := map[string]interface{}{"type": "string"} -loader := gojsonschema.NewGoLoader(m) -``` - -And - -```go -type Root struct { - Users []User `json:"users"` -} - -type User struct { - Name string `json:"name"` -} - -... - -data := Root{} -data.Users = append(data.Users, User{"John"}) -data.Users = append(data.Users, User{"Sophia"}) -data.Users = append(data.Users, User{"Bill"}) - -loader := gojsonschema.NewGoLoader(data) -``` - -#### Validation - -Once the loaders are set, validation is easy : - -```go -result, err := gojsonschema.Validate(schemaLoader, documentLoader) -``` - -Alternatively, you might want to load a schema only once and process to multiple validations : - -```go -schema, err := gojsonschema.NewSchema(schemaLoader) -... -result1, err := schema.Validate(documentLoader1) -... -result2, err := schema.Validate(documentLoader2) -... -// etc ... -``` - -To check the result : - -```go - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, err := range result.Errors() { - // Err implements the ResultError interface - fmt.Printf("- %s\n", err) - } - } -``` - - -## Loading local schemas - -By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. - -```go - sl := gojsonschema.NewSchemaLoader() - loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) - err := sl.AddSchema("http://some_host.com/string.json", loader1) -``` - -Alternatively if your schema already has an `$id` you can use the `AddSchemas` function -```go - loader2 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/maxlength.json", - "maxLength" : 5 - }`) - err = sl.AddSchemas(loader2) -``` - -The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. -```go - loader3 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/main.json", - "allOf" : [ - { "$ref" : "http://some_host.com/string.json" }, - { "$ref" : "http://some_host.com/maxlength.json" } - ] - }`) - - schema, err := sl.Compile(loader3) - - documentLoader := gojsonschema.NewStringLoader(`"hello world"`) - - result, err := schema.Validate(documentLoader) -``` - -It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. - -```go -err = sl.AddSchemas(loader3) -schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) -``` - -Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. - -## Using a specific draft -By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. - -Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Draft = gojsonschema.Draft7 -sl.AutoDetect = false -``` - -If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. - -## Meta-schema validation -Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. - -The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Validate = true -err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ - $id" : "http://some_host.com/invalid.json", - "$schema": "http://json-schema.org/draft-07/schema#", - "multipleOf" : true -}`)) - ``` -``` - ``` - -Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. - -Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. - - -## Working with Errors - -The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it -```go -gojsonschema.Locale = YourCustomLocale{} -``` - -However, each error contains additional contextual information. - -Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. - -**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below - -Note: An error of RequiredType has an err.Type() return value of "required" - - "required": RequiredError - "invalid_type": InvalidTypeError - "number_any_of": NumberAnyOfError - "number_one_of": NumberOneOfError - "number_all_of": NumberAllOfError - "number_not": NumberNotError - "missing_dependency": MissingDependencyError - "internal": InternalError - "const": ConstEror - "enum": EnumError - "array_no_additional_items": ArrayNoAdditionalItemsError - "array_min_items": ArrayMinItemsError - "array_max_items": ArrayMaxItemsError - "unique": ItemsMustBeUniqueError - "contains" : ArrayContainsError - "array_min_properties": ArrayMinPropertiesError - "array_max_properties": ArrayMaxPropertiesError - "additional_property_not_allowed": AdditionalPropertyNotAllowedError - "invalid_property_pattern": InvalidPropertyPatternError - "invalid_property_name": InvalidPropertyNameError - "string_gte": StringLengthGTEError - "string_lte": StringLengthLTEError - "pattern": DoesNotMatchPatternError - "multiple_of": MultipleOfError - "number_gte": NumberGTEError - "number_gt": NumberGTError - "number_lte": NumberLTEError - "number_lt": NumberLTError - "condition_then" : ConditionThenError - "condition_else" : ConditionElseError - -**err.Value()**: *interface{}* Returns the value given - -**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName - -**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. - -**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. - -**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. - -**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* - -Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. -``` -{{.field}} must be greater than or equal to {{.min}} -``` - -The library allows you to specify custom template functions, should you require more complex error message handling. -```go -gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ - "allcaps": func(s string) string { - return strings.ToUpper(s) - }, -} -``` - -Given the above definition, you can use the custom function `"allcaps"` in your localization templates: -``` -{{allcaps .field}} must be greater than or equal to {{.min}} -``` - -The above error message would then be rendered with the `field` value in capital letters. For example: -``` -"PASSWORD must be greater than or equal to 8" -``` - -Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. - -## Formats -JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: - -````json -{"type": "string", "format": "email"} -```` - -Not all formats defined in draft-07 are available. Implemented formats are: - -* `date` -* `time` -* `date-time` -* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. -* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. -* `idn-email`. Same caveat as `email`. -* `ipv4` -* `ipv6` -* `uri`. Includes unicode support. -* `uri-reference`. Includes unicode support. -* `iri` -* `iri-reference` -* `uri-template` -* `uuid` -* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. -* `json-pointer` -* `relative-json-pointer` - -`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific -unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. - -The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. - -For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: - -```go -// Define the format checker -type RoleFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f RoleFormatChecker) IsFormat(input interface{}) bool { - - asString, ok := input.(string) - if ok == false { - return false - } - - return strings.HasPrefix("ROLE_", asString) -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) -```` - -Now to use in your json schema: -````json -{"type": "string", "format": "role"} -```` - -Another example would be to check if the provided integer matches an id on database: - -JSON schema: -```json -{"type": "integer", "format": "ValidUserId"} -``` - -```go -// Define the format checker -type ValidUserIdFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { - - asFloat64, ok := input.(float64) // Numbers are always float64 here - if ok == false { - return false - } - - // XXX - // do the magic on the database looking for the int(asFloat64) - - return true -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) -```` - -Formats can also be removed, for example if you want to override one of the formats that is defined by default. - -```go -gojsonschema.FormatCheckers.Remove("hostname") -``` - - -## Additional custom validation -After the validation has run and you have the results, you may add additional -errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead -of having to add special exceptions for your own errors. Below is an example. - -```go -type AnswerInvalidError struct { - gojsonschema.ResultErrorFields -} - -func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { - err := AnswerInvalidError{} - err.SetContext(context) - err.SetType("custom_invalid_error") - // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed - // using the description of err will be overridden by this. - err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") - err.SetValue(value) - err.SetDetails(details) - - return &err -} - -func main() { - // ... - schema, err := gojsonschema.NewSchema(schemaLoader) - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - - if true { // some validation - jsonContext := gojsonschema.NewJsonContext("question", nil) - errDetail := gojsonschema.ErrorDetails{ - "answer": 42, - } - result.AddError( - newAnswerInvalidError( - gojsonschema.NewJsonContext("answer", jsonContext), - 52, - errDetail, - ), - errDetail, - ) - } - - return result, err - -} -``` - -This is especially useful if you want to add validation beyond what the -json schema drafts can provide such business specific logic. - -## Uses - -gojsonschema uses the following test suite : - -https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/draft.go b/tools/vendor/github.com/xeipuuv/gojsonschema/draft.go deleted file mode 100644 index 61298e7aa..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/draft.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "errors" - "math" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -// Draft is a JSON-schema draft version -type Draft int - -// Supported Draft versions -const ( - Draft4 Draft = 4 - Draft6 Draft = 6 - Draft7 Draft = 7 - Hybrid Draft = math.MaxInt32 -) - -type draftConfig struct { - Version Draft - MetaSchemaURL string - MetaSchema string -} -type draftConfigs []draftConfig - -var drafts draftConfigs - -func init() { - drafts = []draftConfig{ - { - Version: Draft4, - MetaSchemaURL: "http://json-schema.org/draft-04/schema", - MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, - }, - { - Version: Draft6, - MetaSchemaURL: "http://json-schema.org/draft-06/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, - }, - { - Version: Draft7, - MetaSchemaURL: "http://json-schema.org/draft-07/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, - }, - } -} - -func (dc draftConfigs) GetMetaSchema(url string) string { - for _, config := range dc { - if config.MetaSchemaURL == url { - return config.MetaSchema - } - } - return "" -} -func (dc draftConfigs) GetDraftVersion(url string) *Draft { - for _, config := range dc { - if config.MetaSchemaURL == url { - return &config.Version - } - } - return nil -} -func (dc draftConfigs) GetSchemaURL(draft Draft) string { - for _, config := range dc { - if config.Version == draft { - return config.MetaSchemaURL - } - } - return "" -} - -func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { - - if isKind(documentNode, reflect.Bool) { - return "", nil, nil - } - - if !isKind(documentNode, reflect.Map) { - return "", nil, errors.New("schema is invalid") - } - - m := documentNode.(map[string]interface{}) - - if existsMapKey(m, KEY_SCHEMA) { - if !isKind(m[KEY_SCHEMA], reflect.String) { - return "", nil, errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": KEY_SCHEMA, - "type": TYPE_STRING, - }, - )) - } - - schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) - - if err != nil { - return "", nil, err - } - - schema := schemaReference.String() - - return schema, drafts.GetDraftVersion(schema), nil - } - - return "", nil, nil -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/errors.go b/tools/vendor/github.com/xeipuuv/gojsonschema/errors.go deleted file mode 100644 index e4e9814f3..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/errors.go +++ /dev/null @@ -1,364 +0,0 @@ -package gojsonschema - -import ( - "bytes" - "sync" - "text/template" -) - -var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} - -// template.Template is not thread-safe for writing, so some locking is done -// sync.RWMutex is used for efficiently locking when new templates are created -type errorTemplate struct { - *template.Template - sync.RWMutex -} - -type ( - - // FalseError. ErrorDetails: - - FalseError struct { - ResultErrorFields - } - - // RequiredError indicates that a required field is missing - // ErrorDetails: property string - RequiredError struct { - ResultErrorFields - } - - // InvalidTypeError indicates that a field has the incorrect type - // ErrorDetails: expected, given - InvalidTypeError struct { - ResultErrorFields - } - - // NumberAnyOfError is produced in case of a failing "anyOf" validation - // ErrorDetails: - - NumberAnyOfError struct { - ResultErrorFields - } - - // NumberOneOfError is produced in case of a failing "oneOf" validation - // ErrorDetails: - - NumberOneOfError struct { - ResultErrorFields - } - - // NumberAllOfError is produced in case of a failing "allOf" validation - // ErrorDetails: - - NumberAllOfError struct { - ResultErrorFields - } - - // NumberNotError is produced if a "not" validation failed - // ErrorDetails: - - NumberNotError struct { - ResultErrorFields - } - - // MissingDependencyError is produced in case of a "missing dependency" problem - // ErrorDetails: dependency - MissingDependencyError struct { - ResultErrorFields - } - - // InternalError indicates an internal error - // ErrorDetails: error - InternalError struct { - ResultErrorFields - } - - // ConstError indicates a const error - // ErrorDetails: allowed - ConstError struct { - ResultErrorFields - } - - // EnumError indicates an enum error - // ErrorDetails: allowed - EnumError struct { - ResultErrorFields - } - - // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed - // ErrorDetails: - - ArrayNoAdditionalItemsError struct { - ResultErrorFields - } - - // ArrayMinItemsError is produced if an array contains less items than the allowed minimum - // ErrorDetails: min - ArrayMinItemsError struct { - ResultErrorFields - } - - // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum - // ErrorDetails: max - ArrayMaxItemsError struct { - ResultErrorFields - } - - // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items - // ErrorDetails: type, i, j - ItemsMustBeUniqueError struct { - ResultErrorFields - } - - // ArrayContainsError is produced if an array contains invalid items - // ErrorDetails: - ArrayContainsError struct { - ResultErrorFields - } - - // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum - // ErrorDetails: min - ArrayMinPropertiesError struct { - ResultErrorFields - } - - // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum - // ErrorDetails: max - ArrayMaxPropertiesError struct { - ResultErrorFields - } - - // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed - // ErrorDetails: property - AdditionalPropertyNotAllowedError struct { - ResultErrorFields - } - - // InvalidPropertyPatternError is produced if an pattern was found - // ErrorDetails: property, pattern - InvalidPropertyPatternError struct { - ResultErrorFields - } - - // InvalidPropertyNameError is produced if an invalid-named property was found - // ErrorDetails: property - InvalidPropertyNameError struct { - ResultErrorFields - } - - // StringLengthGTEError is produced if a string is shorter than the minimum required length - // ErrorDetails: min - StringLengthGTEError struct { - ResultErrorFields - } - - // StringLengthLTEError is produced if a string is longer than the maximum allowed length - // ErrorDetails: max - StringLengthLTEError struct { - ResultErrorFields - } - - // DoesNotMatchPatternError is produced if a string does not match the defined pattern - // ErrorDetails: pattern - DoesNotMatchPatternError struct { - ResultErrorFields - } - - // DoesNotMatchFormatError is produced if a string does not match the defined format - // ErrorDetails: format - DoesNotMatchFormatError struct { - ResultErrorFields - } - - // MultipleOfError is produced if a number is not a multiple of the defined multipleOf - // ErrorDetails: multiple - MultipleOfError struct { - ResultErrorFields - } - - // NumberGTEError is produced if a number is lower than the allowed minimum - // ErrorDetails: min - NumberGTEError struct { - ResultErrorFields - } - - // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set - // ErrorDetails: min - NumberGTError struct { - ResultErrorFields - } - - // NumberLTEError is produced if a number is higher than the allowed maximum - // ErrorDetails: max - NumberLTEError struct { - ResultErrorFields - } - - // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set - // ErrorDetails: max - NumberLTError struct { - ResultErrorFields - } - - // ConditionThenError is produced if a condition's "then" validation is invalid - // ErrorDetails: - - ConditionThenError struct { - ResultErrorFields - } - - // ConditionElseError is produced if a condition's "else" condition is invalid - // ErrorDetails: - - ConditionElseError struct { - ResultErrorFields - } -) - -// newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { - var t string - var d string - switch err.(type) { - case *FalseError: - t = "false" - d = locale.False() - case *RequiredError: - t = "required" - d = locale.Required() - case *InvalidTypeError: - t = "invalid_type" - d = locale.InvalidType() - case *NumberAnyOfError: - t = "number_any_of" - d = locale.NumberAnyOf() - case *NumberOneOfError: - t = "number_one_of" - d = locale.NumberOneOf() - case *NumberAllOfError: - t = "number_all_of" - d = locale.NumberAllOf() - case *NumberNotError: - t = "number_not" - d = locale.NumberNot() - case *MissingDependencyError: - t = "missing_dependency" - d = locale.MissingDependency() - case *InternalError: - t = "internal" - d = locale.Internal() - case *ConstError: - t = "const" - d = locale.Const() - case *EnumError: - t = "enum" - d = locale.Enum() - case *ArrayNoAdditionalItemsError: - t = "array_no_additional_items" - d = locale.ArrayNoAdditionalItems() - case *ArrayMinItemsError: - t = "array_min_items" - d = locale.ArrayMinItems() - case *ArrayMaxItemsError: - t = "array_max_items" - d = locale.ArrayMaxItems() - case *ItemsMustBeUniqueError: - t = "unique" - d = locale.Unique() - case *ArrayContainsError: - t = "contains" - d = locale.ArrayContains() - case *ArrayMinPropertiesError: - t = "array_min_properties" - d = locale.ArrayMinProperties() - case *ArrayMaxPropertiesError: - t = "array_max_properties" - d = locale.ArrayMaxProperties() - case *AdditionalPropertyNotAllowedError: - t = "additional_property_not_allowed" - d = locale.AdditionalPropertyNotAllowed() - case *InvalidPropertyPatternError: - t = "invalid_property_pattern" - d = locale.InvalidPropertyPattern() - case *InvalidPropertyNameError: - t = "invalid_property_name" - d = locale.InvalidPropertyName() - case *StringLengthGTEError: - t = "string_gte" - d = locale.StringGTE() - case *StringLengthLTEError: - t = "string_lte" - d = locale.StringLTE() - case *DoesNotMatchPatternError: - t = "pattern" - d = locale.DoesNotMatchPattern() - case *DoesNotMatchFormatError: - t = "format" - d = locale.DoesNotMatchFormat() - case *MultipleOfError: - t = "multiple_of" - d = locale.MultipleOf() - case *NumberGTEError: - t = "number_gte" - d = locale.NumberGTE() - case *NumberGTError: - t = "number_gt" - d = locale.NumberGT() - case *NumberLTEError: - t = "number_lte" - d = locale.NumberLTE() - case *NumberLTError: - t = "number_lt" - d = locale.NumberLT() - case *ConditionThenError: - t = "condition_then" - d = locale.ConditionThen() - case *ConditionElseError: - t = "condition_else" - d = locale.ConditionElse() - } - - err.SetType(t) - err.SetContext(context) - err.SetValue(value) - err.SetDetails(details) - err.SetDescriptionFormat(d) - details["field"] = err.Field() - - if _, exists := details["context"]; !exists && context != nil { - details["context"] = context.String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) -} - -// formatErrorDescription takes a string in the default text/template -// format and converts it to a string with replacements. The fields come -// from the ErrorDetails struct and vary for each type of error. -func formatErrorDescription(s string, details ErrorDetails) string { - - var tpl *template.Template - var descrAsBuffer bytes.Buffer - var err error - - errorTemplates.RLock() - tpl = errorTemplates.Lookup(s) - errorTemplates.RUnlock() - - if tpl == nil { - errorTemplates.Lock() - tpl = errorTemplates.New(s) - - if ErrorTemplateFuncs != nil { - tpl.Funcs(ErrorTemplateFuncs) - } - - tpl, err = tpl.Parse(s) - errorTemplates.Unlock() - - if err != nil { - return err.Error() - } - } - - err = tpl.Execute(&descrAsBuffer, details) - if err != nil { - return err.Error() - } - - return descrAsBuffer.String() -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/tools/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go deleted file mode 100644 index 873ffc7d7..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ /dev/null @@ -1,368 +0,0 @@ -package gojsonschema - -import ( - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "sync" - "time" -) - -type ( - // FormatChecker is the interface all formatters added to FormatCheckerChain must implement - FormatChecker interface { - // IsFormat checks if input has the correct format and type - IsFormat(input interface{}) bool - } - - // FormatCheckerChain holds the formatters - FormatCheckerChain struct { - formatters map[string]FormatChecker - } - - // EmailFormatChecker verifies email address formats - EmailFormatChecker struct{} - - // IPV4FormatChecker verifies IP addresses in the IPv4 format - IPV4FormatChecker struct{} - - // IPV6FormatChecker verifies IP addresses in the IPv6 format - IPV6FormatChecker struct{} - - // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Date: YYYY-MM-DD - // Full Time: HH:MM:SSZ-07:00 - // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - // - // Note: Nanoseconds are also suported in all formats - // - // http://tools.ietf.org/html/rfc3339#section-5.6 - DateTimeFormatChecker struct{} - - // DateFormatChecker verifies date formats - // - // Valid format: - // Full Date: YYYY-MM-DD - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - DateFormatChecker struct{} - - // TimeFormatChecker verifies time formats - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Time: HH:MM:SSZ-07:00 - // - // Where - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - TimeFormatChecker struct{} - - // URIFormatChecker validates a URI with a valid Scheme per RFC3986 - URIFormatChecker struct{} - - // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 - URIReferenceFormatChecker struct{} - - // URITemplateFormatChecker validates a URI template per RFC6570 - URITemplateFormatChecker struct{} - - // HostnameFormatChecker validates a hostname is in the correct format - HostnameFormatChecker struct{} - - // UUIDFormatChecker validates a UUID is in the correct format - UUIDFormatChecker struct{} - - // RegexFormatChecker validates a regex is in the correct format - RegexFormatChecker struct{} - - // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 - JSONPointerFormatChecker struct{} - - // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format - RelativeJSONPointerFormatChecker struct{} -) - -var ( - // FormatCheckers holds the valid formatters, and is a public variable - // so library users can add custom formatters - FormatCheckers = FormatCheckerChain{ - formatters: map[string]FormatChecker{ - "date": DateFormatChecker{}, - "time": TimeFormatChecker{}, - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "idn-email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, - "uri-reference": URIReferenceFormatChecker{}, - "iri": URIFormatChecker{}, - "iri-reference": URIReferenceFormatChecker{}, - "uri-template": URITemplateFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, - "json-pointer": JSONPointerFormatChecker{}, - "relative-json-pointer": RelativeJSONPointerFormatChecker{}, - }, - } - - // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - - // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI - rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") - - rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") - - rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") - - rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") - - lock = new(sync.RWMutex) -) - -// Add adds a FormatChecker to the FormatCheckerChain -// The name used will be the value used for the format key in your json schema -func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { - lock.Lock() - c.formatters[name] = f - lock.Unlock() - - return c -} - -// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) -func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { - lock.Lock() - delete(c.formatters, name) - lock.Unlock() - - return c -} - -// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name -func (c *FormatCheckerChain) Has(name string) bool { - lock.RLock() - _, ok := c.formatters[name] - lock.RUnlock() - - return ok -} - -// IsFormat will check an input against a FormatChecker with the given name -// to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { - lock.RLock() - f, ok := c.formatters[name] - lock.RUnlock() - - // If a format is unrecognized it should always pass validation - if !ok { - return true - } - - return f.IsFormat(input) -} - -// IsFormat checks if input is a correctly formatted e-mail address -func (f EmailFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := mail.ParseAddress(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted IPv4-address -func (f IPV4FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ".") -} - -// IsFormat checks if input is a correctly formatted IPv6=address -func (f IPV6FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ":") -} - -// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 -func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - formats := []string{ - "15:04:05", - "15:04:05Z07:00", - "2006-01-02", - time.RFC3339, - time.RFC3339Nano, - } - - for _, format := range formats { - if _, err := time.Parse(format, asString); err == nil { - return true - } - } - - return false -} - -// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) -func (f DateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - _, err := time.Parse("2006-01-02", asString) - return err == nil -} - -// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) -func (f TimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { - return true - } - - _, err := time.Parse("15:04:05", asString) - return err == nil -} - -// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 -func (f URIFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - - if err != nil || u.Scheme == "" { - return false - } - - return !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 -func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := url.Parse(asString) - return err == nil && !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI template per RFC6570 -func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - if err != nil || strings.Contains(asString, `\`) { - return false - } - - return rxURITemplate.MatchString(u.Path) -} - -// IsFormat checks if input is a correctly formatted hostname -func (f HostnameFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxHostname.MatchString(asString) && len(asString) < 256 -} - -// IsFormat checks if input is a correctly formatted UUID -func (f UUIDFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxUUID.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted regular expression -func (f RegexFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if asString == "" { - return true - } - _, err := regexp.Compile(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 -func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxJSONPointer.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted relative JSON Pointer -func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxRelJSONPointer.MatchString(asString) -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/tools/vendor/github.com/xeipuuv/gojsonschema/glide.yaml deleted file mode 100644 index ab6fb867c..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/glide.yaml +++ /dev/null @@ -1,13 +0,0 @@ -package: github.com/xeipuuv/gojsonschema -license: Apache 2.0 -import: -- package: github.com/xeipuuv/gojsonschema - -- package: github.com/xeipuuv/gojsonpointer - -- package: github.com/xeipuuv/gojsonreference - -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/tools/vendor/github.com/xeipuuv/gojsonschema/internalLog.go deleted file mode 100644 index 4ef7a8d03..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/internalLog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Very simple log wrapper. -// Used for debugging/testing purposes. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "log" -) - -const internalLogEnabled = false - -func internalLog(format string, v ...interface{}) { - log.Printf(format, v...) -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/tools/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go deleted file mode 100644 index 0e979707b..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 MongoDB, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author tolsen -// author-github https://github.com/tolsen -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context -// -// created 04-09-2013 - -package gojsonschema - -import "bytes" - -// JsonContext implements a persistent linked-list of strings -type JsonContext struct { - head string - tail *JsonContext -} - -// NewJsonContext creates a new JsonContext -func NewJsonContext(head string, tail *JsonContext) *JsonContext { - return &JsonContext{head, tail} -} - -// String displays the context in reverse. -// This plays well with the data structure's persistent nature with -// Cons and a json document's tree structure. -func (c *JsonContext) String(del ...string) string { - byteArr := make([]byte, 0, c.stringLen()) - buf := bytes.NewBuffer(byteArr) - c.writeStringToBuffer(buf, del) - - return buf.String() -} - -func (c *JsonContext) stringLen() int { - length := 0 - if c.tail != nil { - length = c.tail.stringLen() + 1 // add 1 for "." - } - - length += len(c.head) - return length -} - -func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { - if c.tail != nil { - c.tail.writeStringToBuffer(buf, del) - - if len(del) > 0 { - buf.WriteString(del[0]) - } else { - buf.WriteString(".") - } - } - - buf.WriteString(c.head) -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/tools/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go deleted file mode 100644 index 5d88af263..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Different strategies to load JSON files. -// Includes References (file and HTTP), JSON strings and Go types. -// -// created 01-02-2015 - -package gojsonschema - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -var osFS = osFileSystem(os.Open) - -// JSONLoader defines the JSON loader interface -type JSONLoader interface { - JsonSource() interface{} - LoadJSON() (interface{}, error) - JsonReference() (gojsonreference.JsonReference, error) - LoaderFactory() JSONLoaderFactory -} - -// JSONLoaderFactory defines the JSON loader factory interface -type JSONLoaderFactory interface { - // New creates a new JSON loader for the given source - New(source string) JSONLoader -} - -// DefaultJSONLoaderFactory is the default JSON loader factory -type DefaultJSONLoaderFactory struct { -} - -// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem -type FileSystemJSONLoaderFactory struct { - fs http.FileSystem -} - -// New creates a new JSON loader for the given source -func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// New creates a new JSON loader for the given source -func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: f.fs, - source: source, - } -} - -// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. -type osFileSystem func(string) (*os.File, error) - -// Opens a file with the given name -func (o osFileSystem) Open(name string) (http.File, error) { - return o(name) -} - -// JSON Reference loader -// references are used to load JSONs from files and HTTP - -type jsonReferenceLoader struct { - fs http.FileSystem - source string -} - -func (l *jsonReferenceLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { - return &FileSystemJSONLoaderFactory{ - fs: l.fs, - } -} - -// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. -func NewReferenceLoader(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. -func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { - return &jsonReferenceLoader{ - fs: fs, - source: source, - } -} - -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { - - var err error - - reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) - if err != nil { - return nil, err - } - - refToURL := reference - refToURL.GetUrl().Fragment = "" - - var document interface{} - - if reference.HasFileScheme { - - filename := strings.TrimPrefix(refToURL.String(), "file://") - filename, err = url.QueryUnescape(filename) - - if err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, use slashes - // instead of backslashes, and have spaces escaped - filename = strings.TrimPrefix(filename, "/") - filename = filepath.FromSlash(filename) - } - - document, err = l.loadFromFile(filename) - if err != nil { - return nil, err - } - - } else { - - document, err = l.loadFromHTTP(refToURL.String()) - if err != nil { - return nil, err - } - - } - - return document, nil - -} - -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { - - // returned cached versions for metaschemas for drafts 4, 6 and 7 - // for performance and allow for easier offline use - if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { - return decodeJSONUsingNumber(strings.NewReader(metaSchema)) - } - - resp, err := http.Get(address) - if err != nil { - return nil, err - } - - // must return HTTP Status 200 OK - if resp.StatusCode != http.StatusOK { - return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) - } - - bodyBuff, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) -} - -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { - f, err := l.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - bodyBuff, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) - -} - -// JSON string loader - -type jsonStringLoader struct { - source string -} - -func (l *jsonStringLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewStringLoader creates a new JSONLoader, taking a string as source -func NewStringLoader(source string) JSONLoader { - return &jsonStringLoader{source: source} -} - -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - - return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) - -} - -// JSON bytes loader - -type jsonBytesLoader struct { - source []byte -} - -func (l *jsonBytesLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source -func NewBytesLoader(source []byte) JSONLoader { - return &jsonBytesLoader{source: source} -} - -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) -} - -// JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... - -type jsonGoLoader struct { - source interface{} -} - -func (l *jsonGoLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewGoLoader creates a new JSONLoader from a given Go struct -func NewGoLoader(source interface{}) JSONLoader { - return &jsonGoLoader{source: source} -} - -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { - - // convert it to a compliant JSON first to avoid types "mismatches" - - jsonBytes, err := json.Marshal(l.JsonSource()) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) - -} - -type jsonIOLoader struct { - buf *bytes.Buffer -} - -// NewReaderLoader creates a new JSON loader using the provided io.Reader -func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) -} - -// NewWriterLoader creates a new JSON loader using the provided io.Writer -func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) -} - -func (l *jsonIOLoader) JsonSource() interface{} { - return l.buf.String() -} - -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(l.buf) -} - -func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// JSON raw loader -// In case the JSON is already marshalled to interface{} use this loader -// This is used for testing as otherwise there is no guarantee the JSON is marshalled -// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber -type jsonRawLoader struct { - source interface{} -} - -// NewRawLoader creates a new JSON raw loader for the given source -func NewRawLoader(source interface{}) JSONLoader { - return &jsonRawLoader{source: source} -} -func (l *jsonRawLoader) JsonSource() interface{} { - return l.source -} -func (l *jsonRawLoader) LoadJSON() (interface{}, error) { - return l.source, nil -} -func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} -func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { - - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil - -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/locales.go b/tools/vendor/github.com/xeipuuv/gojsonschema/locales.go deleted file mode 100644 index a416225cd..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/locales.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const string and messages. -// -// created 01-01-2015 - -package gojsonschema - -type ( - // locale is an interface for defining custom error strings - locale interface { - - // False returns a format-string for "false" schema validation errors - False() string - - // Required returns a format-string for "required" schema validation errors - Required() string - - // InvalidType returns a format-string for "invalid type" schema validation errors - InvalidType() string - - // NumberAnyOf returns a format-string for "anyOf" schema validation errors - NumberAnyOf() string - - // NumberOneOf returns a format-string for "oneOf" schema validation errors - NumberOneOf() string - - // NumberAllOf returns a format-string for "allOf" schema validation errors - NumberAllOf() string - - // NumberNot returns a format-string to format a NumberNotError - NumberNot() string - - // MissingDependency returns a format-string for "missing dependency" schema validation errors - MissingDependency() string - - // Internal returns a format-string for internal errors - Internal() string - - // Const returns a format-string to format a ConstError - Const() string - - // Enum returns a format-string to format an EnumError - Enum() string - - // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema - ArrayNotEnoughItems() string - - // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError - ArrayNoAdditionalItems() string - - // ArrayMinItems returns a format-string to format an ArrayMinItemsError - ArrayMinItems() string - - // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError - ArrayMaxItems() string - - // Unique returns a format-string to format an ItemsMustBeUniqueError - Unique() string - - // ArrayContains returns a format-string to format an ArrayContainsError - ArrayContains() string - - // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError - ArrayMinProperties() string - - // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError - ArrayMaxProperties() string - - // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError - AdditionalPropertyNotAllowed() string - - // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError - InvalidPropertyPattern() string - - // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError - InvalidPropertyName() string - - // StringGTE returns a format-string to format an StringLengthGTEError - StringGTE() string - - // StringLTE returns a format-string to format an StringLengthLTEError - StringLTE() string - - // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError - DoesNotMatchPattern() string - - // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError - DoesNotMatchFormat() string - - // MultipleOf returns a format-string to format an MultipleOfError - MultipleOf() string - - // NumberGTE returns a format-string to format an NumberGTEError - NumberGTE() string - - // NumberGT returns a format-string to format an NumberGTError - NumberGT() string - - // NumberLTE returns a format-string to format an NumberLTEError - NumberLTE() string - - // NumberLT returns a format-string to format an NumberLTError - NumberLT() string - - // Schema validations - - // RegexPattern returns a format-string to format a regex-pattern error - RegexPattern() string - - // GreaterThanZero returns a format-string to format an error where a number must be greater than zero - GreaterThanZero() string - - // MustBeOfA returns a format-string to format an error where a value is of the wrong type - MustBeOfA() string - - // MustBeOfAn returns a format-string to format an error where a value is of the wrong type - MustBeOfAn() string - - // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error - CannotBeUsedWithout() string - - // CannotBeGT returns a format-string to format an error where a value are greater than allowed - CannotBeGT() string - - // MustBeOfType returns a format-string to format an error where a value does not match the required type - MustBeOfType() string - - // MustBeValidRegex returns a format-string to format an error where a regex is invalid - MustBeValidRegex() string - - // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format - MustBeValidFormat() string - - // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 - MustBeGTEZero() string - - // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed - KeyCannotBeGreaterThan() string - - // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type - KeyItemsMustBeOfType() string - - // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique - KeyItemsMustBeUnique() string - - // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error - ReferenceMustBeCanonical() string - - // NotAValidType returns a format-string to format an invalid type error - NotAValidType() string - - // Duplicated returns a format-string to format an error where types are duplicated - Duplicated() string - - // HttpBadStatus returns a format-string for errors when loading a schema using HTTP - HttpBadStatus() string - - // ParseError returns a format-string for JSON parsing errors - ParseError() string - - // ConditionThen returns a format-string for ConditionThenError errors - ConditionThen() string - - // ConditionElse returns a format-string for ConditionElseError errors - ConditionElse() string - - // ErrorFormat returns a format string for errors - ErrorFormat() string - } - - // DefaultLocale is the default locale for this package - DefaultLocale struct{} -) - -// False returns a format-string for "false" schema validation errors -func (l DefaultLocale) False() string { - return "False always fails validation" -} - -// Required returns a format-string for "required" schema validation errors -func (l DefaultLocale) Required() string { - return `{{.property}} is required` -} - -// InvalidType returns a format-string for "invalid type" schema validation errors -func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: {{.expected}}, given: {{.given}}` -} - -// NumberAnyOf returns a format-string for "anyOf" schema validation errors -func (l DefaultLocale) NumberAnyOf() string { - return `Must validate at least one schema (anyOf)` -} - -// NumberOneOf returns a format-string for "oneOf" schema validation errors -func (l DefaultLocale) NumberOneOf() string { - return `Must validate one and only one schema (oneOf)` -} - -// NumberAllOf returns a format-string for "allOf" schema validation errors -func (l DefaultLocale) NumberAllOf() string { - return `Must validate all the schemas (allOf)` -} - -// NumberNot returns a format-string to format a NumberNotError -func (l DefaultLocale) NumberNot() string { - return `Must not validate the schema (not)` -} - -// MissingDependency returns a format-string for "missing dependency" schema validation errors -func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on {{.dependency}}` -} - -// Internal returns a format-string for internal errors -func (l DefaultLocale) Internal() string { - return `Internal Error {{.error}}` -} - -// Const returns a format-string to format a ConstError -func (l DefaultLocale) Const() string { - return `{{.field}} does not match: {{.allowed}}` -} - -// Enum returns a format-string to format an EnumError -func (l DefaultLocale) Enum() string { - return `{{.field}} must be one of the following: {{.allowed}}` -} - -// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError -func (l DefaultLocale) ArrayNoAdditionalItems() string { - return `No additional items allowed on array` -} - -// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema -func (l DefaultLocale) ArrayNotEnoughItems() string { - return `Not enough items on array to match positional list of schema` -} - -// ArrayMinItems returns a format-string to format an ArrayMinItemsError -func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least {{.min}} items` -} - -// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError -func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most {{.max}} items` -} - -// Unique returns a format-string to format an ItemsMustBeUniqueError -func (l DefaultLocale) Unique() string { - return `{{.type}} items[{{.i}},{{.j}}] must be unique` -} - -// ArrayContains returns a format-string to format an ArrayContainsError -func (l DefaultLocale) ArrayContains() string { - return `At least one of the items must match` -} - -// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError -func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least {{.min}} properties` -} - -// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError -func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most {{.max}} properties` -} - -// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError -func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property {{.property}} is not allowed` -} - -// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError -func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "{{.property}}" does not match pattern {{.pattern}}` -} - -// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError -func (l DefaultLocale) InvalidPropertyName() string { - return `Property name of "{{.property}}" does not match` -} - -// StringGTE returns a format-string to format an StringLengthGTEError -func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to {{.min}}` -} - -// StringLTE returns a format-string to format an StringLengthLTEError -func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to {{.max}}` -} - -// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError -func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '{{.pattern}}'` -} - -// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError -func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '{{.format}}'` -} - -// MultipleOf returns a format-string to format an MultipleOfError -func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of {{.multiple}}` -} - -// NumberGTE returns the format string to format a NumberGTEError -func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to {{.min}}` -} - -// NumberGT returns the format string to format a NumberGTError -func (l DefaultLocale) NumberGT() string { - return `Must be greater than {{.min}}` -} - -// NumberLTE returns the format string to format a NumberLTEError -func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to {{.max}}` -} - -// NumberLT returns the format string to format a NumberLTError -func (l DefaultLocale) NumberLT() string { - return `Must be less than {{.max}}` -} - -// Schema validators - -// RegexPattern returns a format-string to format a regex-pattern error -func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '{{.pattern}}'` -} - -// GreaterThanZero returns a format-string to format an error where a number must be greater than zero -func (l DefaultLocale) GreaterThanZero() string { - return `{{.number}} must be strictly greater than 0` -} - -// MustBeOfA returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfA() string { - return `{{.x}} must be of a {{.y}}` -} - -// MustBeOfAn returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfAn() string { - return `{{.x}} must be of an {{.y}}` -} - -// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error -func (l DefaultLocale) CannotBeUsedWithout() string { - return `{{.x}} cannot be used without {{.y}}` -} - -// CannotBeGT returns a format-string to format an error where a value are greater than allowed -func (l DefaultLocale) CannotBeGT() string { - return `{{.x}} cannot be greater than {{.y}}` -} - -// MustBeOfType returns a format-string to format an error where a value does not match the required type -func (l DefaultLocale) MustBeOfType() string { - return `{{.key}} must be of type {{.type}}` -} - -// MustBeValidRegex returns a format-string to format an error where a regex is invalid -func (l DefaultLocale) MustBeValidRegex() string { - return `{{.key}} must be a valid regex` -} - -// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format -func (l DefaultLocale) MustBeValidFormat() string { - return `{{.key}} must be a valid format {{.given}}` -} - -// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 -func (l DefaultLocale) MustBeGTEZero() string { - return `{{.key}} must be greater than or equal to 0` -} - -// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed -func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `{{.key}} cannot be greater than {{.y}}` -} - -// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type -func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `{{.key}} items must be {{.type}}` -} - -// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique -func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `{{.key}} items must be unique` -} - -// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error -func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference {{.reference}} must be canonical` -} - -// NotAValidType returns a format-string to format an invalid type error -func (l DefaultLocale) NotAValidType() string { - return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` -} - -// Duplicated returns a format-string to format an error where types are duplicated -func (l DefaultLocale) Duplicated() string { - return `{{.type}} type is duplicated` -} - -// HttpBadStatus returns a format-string for errors when loading a schema using HTTP -func (l DefaultLocale) HttpBadStatus() string { - return `Could not read schema from HTTP, response status is {{.status}}` -} - -// ErrorFormat returns a format string for errors -// Replacement options: field, description, context, value -func (l DefaultLocale) ErrorFormat() string { - return `{{.field}}: {{.description}}` -} - -// ParseError returns a format-string for JSON parsing errors -func (l DefaultLocale) ParseError() string { - return `Expected: {{.expected}}, given: Invalid JSON` -} - -// ConditionThen returns a format-string for ConditionThenError errors -// If/Else -func (l DefaultLocale) ConditionThen() string { - return `Must validate "then" as "if" was valid` -} - -// ConditionElse returns a format-string for ConditionElseError errors -func (l DefaultLocale) ConditionElse() string { - return `Must validate "else" as "if" was not valid` -} - -// constants -const ( - STRING_NUMBER = "number" - STRING_ARRAY_OF_STRINGS = "array of strings" - STRING_ARRAY_OF_SCHEMAS = "array of schemas" - STRING_SCHEMA = "valid schema" - STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" - STRING_PROPERTIES = "properties" - STRING_DEPENDENCY = "dependency" - STRING_PROPERTY = "property" - STRING_UNDEFINED = "undefined" - STRING_CONTEXT_ROOT = "(root)" - STRING_ROOT_SCHEMA_PROPERTY = "(root)" -) diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/result.go b/tools/vendor/github.com/xeipuuv/gojsonschema/result.go deleted file mode 100644 index 0a0179148..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/result.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Result and ResultError implementations. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "fmt" - "strings" -) - -type ( - // ErrorDetails is a map of details specific to each error. - // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} - - // ResultError is the interface that library errors must implement - ResultError interface { - // Field returns the field name without the root context - // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName - Field() string - // SetType sets the error-type - SetType(string) - // Type returns the error-type - Type() string - // SetContext sets the JSON-context for the error - SetContext(*JsonContext) - // Context returns the JSON-context of the error - Context() *JsonContext - // SetDescription sets a description for the error - SetDescription(string) - // Description returns the description of the error - Description() string - // SetDescriptionFormat sets the format for the description in the default text/template format - SetDescriptionFormat(string) - // DescriptionFormat returns the format for the description in the default text/template format - DescriptionFormat() string - // SetValue sets the value related to the error - SetValue(interface{}) - // Value returns the value related to the error - Value() interface{} - // SetDetails sets the details specific to the error - SetDetails(ErrorDetails) - // Details returns details about the error - Details() ErrorDetails - // String returns a string representation of the error - String() string - } - - // ResultErrorFields holds the fields for each ResultError implementation. - // ResultErrorFields implements the ResultError interface, so custom errors - // can be defined by just embedding this type - ResultErrorFields struct { - errorType string // A string with the type of error (i.e. invalid_type) - context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... - description string // A human readable error message - descriptionFormat string // A format for human readable error message - value interface{} // Value given by the JSON file that is the source of the error - details ErrorDetails - } - - // Result holds the result of a validation - Result struct { - errors []ResultError - // Scores how well the validation matched. Useful in generating - // better error messages for anyOf and oneOf. - score int - } -) - -// Field returns the field name without the root context -// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName -func (v *ResultErrorFields) Field() string { - return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") -} - -// SetType sets the error-type -func (v *ResultErrorFields) SetType(errorType string) { - v.errorType = errorType -} - -// Type returns the error-type -func (v *ResultErrorFields) Type() string { - return v.errorType -} - -// SetContext sets the JSON-context for the error -func (v *ResultErrorFields) SetContext(context *JsonContext) { - v.context = context -} - -// Context returns the JSON-context of the error -func (v *ResultErrorFields) Context() *JsonContext { - return v.context -} - -// SetDescription sets a description for the error -func (v *ResultErrorFields) SetDescription(description string) { - v.description = description -} - -// Description returns the description of the error -func (v *ResultErrorFields) Description() string { - return v.description -} - -// SetDescriptionFormat sets the format for the description in the default text/template format -func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { - v.descriptionFormat = descriptionFormat -} - -// DescriptionFormat returns the format for the description in the default text/template format -func (v *ResultErrorFields) DescriptionFormat() string { - return v.descriptionFormat -} - -// SetValue sets the value related to the error -func (v *ResultErrorFields) SetValue(value interface{}) { - v.value = value -} - -// Value returns the value related to the error -func (v *ResultErrorFields) Value() interface{} { - return v.value -} - -// SetDetails sets the details specific to the error -func (v *ResultErrorFields) SetDetails(details ErrorDetails) { - v.details = details -} - -// Details returns details about the error -func (v *ResultErrorFields) Details() ErrorDetails { - return v.details -} - -// String returns a string representation of the error -func (v ResultErrorFields) String() string { - // as a fallback, the value is displayed go style - valueString := fmt.Sprintf("%v", v.value) - - // marshal the go value value to json - if v.value == nil { - valueString = TYPE_NULL - } else { - if vs, err := marshalToJSONString(v.value); err == nil { - if vs == nil { - valueString = TYPE_NULL - } else { - valueString = *vs - } - } - } - - return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ - "context": v.context.String(), - "description": v.description, - "value": valueString, - "field": v.Field(), - }) -} - -// Valid indicates if no errors were found -func (v *Result) Valid() bool { - return len(v.errors) == 0 -} - -// Errors returns the errors that were found -func (v *Result) Errors() []ResultError { - return v.errors -} - -// AddError appends a fully filled error to the error set -// SetDescription() will be called with the result of the parsed err.DescriptionFormat() -func (v *Result) AddError(err ResultError, details ErrorDetails) { - if _, exists := details["context"]; !exists && err.Context() != nil { - details["context"] = err.Context().String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) - - v.errors = append(v.errors, err) -} - -func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { - newError(err, context, value, Locale, details) - v.errors = append(v.errors, err) - v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function -} - -// Used to copy errors from a sub-schema to the main one -func (v *Result) mergeErrors(otherResult *Result) { - v.errors = append(v.errors, otherResult.Errors()...) - v.score += otherResult.score -} - -func (v *Result) incrementScore() { - v.score++ -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/schema.go b/tools/vendor/github.com/xeipuuv/gojsonschema/schema.go deleted file mode 100644 index 9e93cd795..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/schema.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines Schema, the main entry to every subSchema. -// Contains the parsing logic and error checking. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "math/big" - "reflect" - "regexp" - "text/template" - - "github.com/xeipuuv/gojsonreference" -) - -var ( - // Locale is the default locale to use - // Library users can overwrite with their own implementation - Locale locale = DefaultLocale{} - - // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. - ErrorTemplateFuncs template.FuncMap -) - -// NewSchema instances a schema using the given JSONLoader -func NewSchema(l JSONLoader) (*Schema, error) { - return NewSchemaLoader().Compile(l) -} - -// Schema holds a schema -type Schema struct { - documentReference gojsonreference.JsonReference - rootSchema *subSchema - pool *schemaPool - referencePool *schemaReferencePool -} - -func (d *Schema) parse(document interface{}, draft Draft) error { - d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} - return d.parseSchema(document, d.rootSchema) -} - -// SetRootSchemaName sets the root-schema name -func (d *Schema) SetRootSchemaName(name string) { - d.rootSchema.property = name -} - -// Parses a subSchema -// -// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring -// Not much magic involved here, most of the job is to validate the key names and their values, -// then the values are copied into subSchema struct -// -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { - - if currentSchema.draft == nil { - if currentSchema.parent == nil { - return errors.New("Draft not set") - } - currentSchema.draft = currentSchema.parent.draft - } - - // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" - if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { - b := documentNode.(bool) - currentSchema.pass = &b - return nil - } - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.ParseError(), - ErrorDetails{ - "expected": STRING_SCHEMA, - }, - )) - } - - m := documentNode.(map[string]interface{}) - - if currentSchema.parent == nil { - currentSchema.ref = &d.documentReference - currentSchema.id = &d.documentReference - } - - if currentSchema.id == nil && currentSchema.parent != nil { - currentSchema.id = currentSchema.parent.id - } - - // In draft 6 the id keyword was renamed to $id - // Hybrid mode uses the old id by default - var keyID string - - switch *currentSchema.draft { - case Draft4: - keyID = KEY_ID - case Hybrid: - keyID = KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - default: - keyID = KEY_ID_NEW - } - if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": keyID, - }, - )) - } - if k, ok := m[keyID].(string); ok { - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - if currentSchema == d.rootSchema { - currentSchema.id = &jsonReference - } else { - ref, err := currentSchema.parent.id.Inherits(jsonReference) - if err != nil { - return err - } - currentSchema.id = ref - } - } - - // definitions - if existsMapKey(m, KEY_DEFINITIONS) { - if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { - for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { - if isKind(dv, reflect.Map, reflect.Bool) { - - newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} - - err := d.parseSchema(dv, newSchema) - - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - - } - - // title - if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_TITLE, - }, - )) - } - if k, ok := m[KEY_TITLE].(string); ok { - currentSchema.title = &k - } - - // description - if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_DESCRIPTION, - }, - )) - } - if k, ok := m[KEY_DESCRIPTION].(string); ok { - currentSchema.description = &k - } - - // $ref - if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_REF, - }, - )) - } - - if k, ok := m[KEY_REF].(string); ok { - - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - - currentSchema.ref = &jsonReference - - if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { - currentSchema.refSchema = sch - } else { - err := d.parseReference(documentNode, currentSchema) - - if err != nil { - return err - } - - return nil - } - } - - // type - if existsMapKey(m, KEY_TYPE) { - if isKind(m[KEY_TYPE], reflect.String) { - if k, ok := m[KEY_TYPE].(string); ok { - err := currentSchema.types.Add(k) - if err != nil { - return err - } - } - } else { - if isKind(m[KEY_TYPE], reflect.Slice) { - arrayOfTypes := m[KEY_TYPE].([]interface{}) - for _, typeInArray := range arrayOfTypes { - if reflect.ValueOf(typeInArray).Kind() != reflect.String { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - if err := currentSchema.types.Add(typeInArray.(string)); err != nil { - return err - } - } - - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - } - } - - // properties - if existsMapKey(m, KEY_PROPERTIES) { - err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) - if err != nil { - return err - } - } - - // additionalProperties - if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { - if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { - currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) - } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalProperties = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_PROPERTIES, - }, - )) - } - } - - // patternProperties - if existsMapKey(m, KEY_PATTERN_PROPERTIES) { - if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { - patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) - if len(patternPropertiesMap) > 0 { - currentSchema.patternProperties = make(map[string]*subSchema) - for k, v := range patternPropertiesMap { - _, err := regexp.MatchString(k, "") - if err != nil { - return errors.New(formatErrorDescription( - Locale.RegexPattern(), - ErrorDetails{"pattern": k}, - )) - } - newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err = d.parseSchema(v, newSchema) - if err != nil { - return errors.New(err.Error()) - } - currentSchema.patternProperties[k] = newSchema - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // propertyNames - if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { - if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertyNames = newSchema - err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // dependencies - if existsMapKey(m, KEY_DEPENDENCIES) { - err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) - if err != nil { - return err - } - } - - // items - if existsMapKey(m, KEY_ITEMS) { - if isKind(m[KEY_ITEMS], reflect.Slice) { - for _, itemElement := range m[KEY_ITEMS].([]interface{}) { - if isKind(itemElement, reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(itemElement, newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - currentSchema.itemsChildrenIsSingleSchema = false - } - } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(m[KEY_ITEMS], newSchema) - if err != nil { - return err - } - currentSchema.itemsChildrenIsSingleSchema = true - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - } - - // additionalItems - if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { - if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { - currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) - } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalItems = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_ITEMS, - }, - )) - } - } - - // validation : number / integer - - if existsMapKey(m, KEY_MULTIPLE_OF) { - multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) - if multipleOfValue == nil { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_NUMBER, - "given": KEY_MULTIPLE_OF, - }, - )) - } - if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { - return errors.New(formatErrorDescription( - Locale.GreaterThanZero(), - ErrorDetails{"number": KEY_MULTIPLE_OF}, - )) - } - currentSchema.multipleOf = multipleOfValue - } - - if existsMapKey(m, KEY_MINIMUM) { - minimumValue := mustBeNumber(m[KEY_MINIMUM]) - if minimumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.minimum = minimumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - } - } - - if existsMapKey(m, KEY_MAXIMUM) { - maximumValue := mustBeNumber(m[KEY_MAXIMUM]) - if maximumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.maximum = maximumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - } - } - - // validation : string - - if existsMapKey(m, KEY_MIN_LENGTH) { - minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) - if minLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *minLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_LENGTH}, - )) - } - currentSchema.minLength = minLengthIntegerValue - } - - if existsMapKey(m, KEY_MAX_LENGTH) { - maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) - if maxLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *maxLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_LENGTH}, - )) - } - currentSchema.maxLength = maxLengthIntegerValue - } - - if currentSchema.minLength != nil && currentSchema.maxLength != nil { - if *currentSchema.minLength > *currentSchema.maxLength { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, - )) - } - } - - if existsMapKey(m, KEY_PATTERN) { - if isKind(m[KEY_PATTERN], reflect.String) { - regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) - if err != nil { - return errors.New(formatErrorDescription( - Locale.MustBeValidRegex(), - ErrorDetails{"key": KEY_PATTERN}, - )) - } - currentSchema.pattern = regexpObject - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, - )) - } - } - - if existsMapKey(m, KEY_FORMAT) { - formatString, ok := m[KEY_FORMAT].(string) - if !ok { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, - )) - } - currentSchema.format = formatString - } - - // validation : object - - if existsMapKey(m, KEY_MIN_PROPERTIES) { - minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) - if minPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *minPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_PROPERTIES}, - )) - } - currentSchema.minProperties = minPropertiesIntegerValue - } - - if existsMapKey(m, KEY_MAX_PROPERTIES) { - maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) - if maxPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *maxPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_PROPERTIES}, - )) - } - currentSchema.maxProperties = maxPropertiesIntegerValue - } - - if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { - if *currentSchema.minProperties > *currentSchema.maxProperties { - return errors.New(formatErrorDescription( - Locale.KeyCannotBeGreaterThan(), - ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, - )) - } - } - - if existsMapKey(m, KEY_REQUIRED) { - if isKind(m[KEY_REQUIRED], reflect.Slice) { - requiredValues := m[KEY_REQUIRED].([]interface{}) - for _, requiredValue := range requiredValues { - if isKind(requiredValue, reflect.String) { - if isStringInSlice(currentSchema.required, requiredValue.(string)) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_REQUIRED}, - )) - } - currentSchema.required = append(currentSchema.required, requiredValue.(string)) - } else { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeOfType(), - ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, - )) - } - } - - // validation : array - - if existsMapKey(m, KEY_MIN_ITEMS) { - minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) - if minItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *minItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_ITEMS}, - )) - } - currentSchema.minItems = minItemsIntegerValue - } - - if existsMapKey(m, KEY_MAX_ITEMS) { - maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) - if maxItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *maxItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_ITEMS}, - )) - } - currentSchema.maxItems = maxItemsIntegerValue - } - - if existsMapKey(m, KEY_UNIQUE_ITEMS) { - if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { - currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { - newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.contains = newSchema - err := d.parseSchema(m[KEY_CONTAINS], newSchema) - if err != nil { - return err - } - } - - // validation : all - - if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { - is, err := marshalWithoutNumber(m[KEY_CONST]) - if err != nil { - return err - } - currentSchema._const = is - } - - if existsMapKey(m, KEY_ENUM) { - if isKind(m[KEY_ENUM], reflect.Slice) { - for _, v := range m[KEY_ENUM].([]interface{}) { - is, err := marshalWithoutNumber(v) - if err != nil { - return err - } - if isStringInSlice(currentSchema.enum, *is) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_ENUM}, - )) - } - currentSchema.enum = append(currentSchema.enum, *is) - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, - )) - } - } - - // validation : subSchema - - if existsMapKey(m, KEY_ONE_OF) { - if isKind(m[KEY_ONE_OF], reflect.Slice) { - for _, v := range m[KEY_ONE_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.oneOf = append(currentSchema.oneOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ANY_OF) { - if isKind(m[KEY_ANY_OF], reflect.Slice) { - for _, v := range m[KEY_ANY_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.anyOf = append(currentSchema.anyOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ALL_OF) { - if isKind(m[KEY_ALL_OF], reflect.Slice) { - for _, v := range m[KEY_ALL_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.allOf = append(currentSchema.allOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_NOT) { - if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} - currentSchema.not = newSchema - err := d.parseSchema(m[KEY_NOT], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, - )) - } - } - - if *currentSchema.draft >= Draft7 { - if existsMapKey(m, KEY_IF) { - if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} - currentSchema._if = newSchema - err := d.parseSchema(m[KEY_IF], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_THEN) { - if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} - currentSchema._then = newSchema - err := d.parseSchema(m[KEY_THEN], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_ELSE) { - if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} - currentSchema._else = newSchema - err := d.parseSchema(m[KEY_ELSE], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, - )) - } - } - } - - return nil -} - -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { - var ( - refdDocumentNode interface{} - dsp *schemaPoolDocument - err error - ) - - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} - - d.referencePool.Add(currentSchema.ref.String(), newSchema) - - dsp, err = d.pool.GetDocument(*currentSchema.ref) - if err != nil { - return err - } - newSchema.id = currentSchema.ref - - refdDocumentNode = dsp.Document - newSchema.draft = dsp.Draft - - if err != nil { - return err - } - - if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, - )) - } - - err = d.parseSchema(refdDocumentNode, newSchema) - if err != nil { - return err - } - - currentSchema.refSchema = newSchema - - return nil - -} - -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - for k := range m { - schemaProperty := k - newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) - err := d.parseSchema(m[k], newSchema) - if err != nil { - return err - } - } - - return nil -} - -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - currentSchema.dependencies = make(map[string]interface{}) - - for k := range m { - switch reflect.ValueOf(m[k]).Kind() { - - case reflect.Slice: - values := m[k].([]interface{}) - var valuesToRegister []string - - for _, value := range values { - if !isKind(value, reflect.String) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - valuesToRegister = append(valuesToRegister, value.(string)) - currentSchema.dependencies[k] = valuesToRegister - } - - case reflect.Map, reflect.Bool: - depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err := d.parseSchema(m[k], depSchema) - if err != nil { - return err - } - currentSchema.dependencies[k] = depSchema - - default: - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - - } - - return nil -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/tools/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go deleted file mode 100644 index 20db0c1f9..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "bytes" - "errors" - - "github.com/xeipuuv/gojsonreference" -) - -// SchemaLoader is used to load schemas -type SchemaLoader struct { - pool *schemaPool - AutoDetect bool - Validate bool - Draft Draft -} - -// NewSchemaLoader creates a new NewSchemaLoader -func NewSchemaLoader() *SchemaLoader { - - ps := &SchemaLoader{ - pool: &schemaPool{ - schemaPoolDocuments: make(map[string]*schemaPoolDocument), - }, - AutoDetect: true, - Validate: false, - Draft: Hybrid, - } - ps.pool.autoDetect = &ps.AutoDetect - - return ps -} - -func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { - - var ( - schema string - err error - ) - if sl.AutoDetect { - schema, _, err = parseSchemaURL(documentNode) - if err != nil { - return err - } - } - - // If no explicit "$schema" is used, use the default metaschema associated with the draft used - if schema == "" { - if sl.Draft == Hybrid { - return nil - } - schema = drafts.GetSchemaURL(sl.Draft) - } - - //Disable validation when loading the metaschema to prevent an infinite recursive loop - sl.Validate = false - - metaSchema, err := sl.Compile(NewReferenceLoader(schema)) - - if err != nil { - return err - } - - sl.Validate = true - - result := metaSchema.validateDocument(documentNode) - - if !result.Valid() { - var res bytes.Buffer - for _, err := range result.Errors() { - res.WriteString(err.String()) - res.WriteString("\n") - } - return errors.New(res.String()) - } - - return nil -} - -// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require -// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema -func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { - emptyRef, _ := gojsonreference.NewJsonReference("") - - for _, loader := range loaders { - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - // Directly use the Recursive function, so that it get only added to the schema pool by $id - // and not by the ref of the document as it's empty - if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { - return err - } - } - - return nil -} - -//AddSchema adds a schema under the provided URL to the schema cache -func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { - - ref, err := gojsonreference.NewJsonReference(url) - - if err != nil { - return err - } - - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - return sl.pool.parseReferences(doc, ref, true) -} - -// Compile loads and compiles a schema -func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { - - ref, err := rootSchema.JsonReference() - - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = sl.pool - d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() - d.documentReference = ref - d.referencePool = newSchemaReferencePool() - - var doc interface{} - if ref.String() != "" { - // Get document from schema pool - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - doc = spd.Document - } else { - // Load JSON directly - doc, err = rootSchema.LoadJSON() - if err != nil { - return nil, err - } - // References need only be parsed if loading JSON directly - // as pool.GetDocument already does this for us if loading by reference - err = sl.pool.parseReferences(doc, ref, true) - if err != nil { - return nil, err - } - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return nil, err - } - } - - draft := sl.Draft - if sl.AutoDetect { - _, detectedDraft, err := parseSchemaURL(doc) - if err != nil { - return nil, err - } - if detectedDraft != nil { - draft = *detectedDraft - } - } - - err = d.parse(doc, draft) - if err != nil { - return nil, err - } - - return &d, nil -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/tools/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go deleted file mode 100644 index 35b1cc630..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines resources pooling. -// Eases referencing and avoids downloading the same resource twice. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -type schemaPoolDocument struct { - Document interface{} - Draft *Draft -} - -type schemaPool struct { - schemaPoolDocuments map[string]*schemaPoolDocument - jsonLoaderFactory JSONLoaderFactory - autoDetect *bool -} - -func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { - - var ( - draft *Draft - err error - reference = ref.String() - ) - // Only the root document should be added to the schema pool if pooled is true - if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { - return fmt.Errorf("Reference already exists: \"%s\"", reference) - } - - if *p.autoDetect { - _, draft, err = parseSchemaURL(document) - if err != nil { - return err - } - } - - err = p.parseReferencesRecursive(document, ref, draft) - - if pooled { - p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} - } - - return err -} - -func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { - // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. - // For $ref references it takes into account the $id scope it is in and replaces - // the reference by the absolute resolved reference - - // When encountering errors it fails silently. Error handling is done when the schema - // is syntactically parsed and any error encountered here should also come up there. - switch m := document.(type) { - case []interface{}: - for _, v := range m { - p.parseReferencesRecursive(v, ref, draft) - } - case map[string]interface{}: - localRef := &ref - - keyID := KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) - if err == nil { - localRef, err = ref.Inherits(jsonReference) - if err == nil { - if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { - return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) - } - p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} - } - } - } - - if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) - if err == nil { - absoluteRef, err := localRef.Inherits(jsonReference) - if err == nil { - m[KEY_REF] = absoluteRef.String() - } - } - } - - for k, v := range m { - // const and enums should be interpreted literally, so ignore them - if k == KEY_CONST || k == KEY_ENUM { - continue - } - // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc - // Therefore don't treat it like a schema. - if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { - if child, ok := v.(map[string]interface{}); ok { - for _, v := range child { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } else { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } - return nil -} - -func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { - - var ( - spd *schemaPoolDocument - draft *Draft - ok bool - err error - ) - - if internalLogEnabled { - internalLog("Get Document ( %s )", reference.String()) - } - - // Create a deep copy, so we can remove the fragment part later on without altering the original - refToURL, _ := gojsonreference.NewJsonReference(reference.String()) - - // First check if the given fragment is a location independent identifier - // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 - - if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { - if internalLogEnabled { - internalLog(" From pool") - } - return spd, nil - } - - // If the given reference is not a location independent identifier, - // strip the fragment and look for a document with it's base URI - - refToURL.GetUrl().Fragment = "" - - if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { - document, _, err := reference.GetPointer().Get(cachedSpd.Document) - - if err != nil { - return nil, err - } - - if internalLogEnabled { - internalLog(" From pool") - } - - spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} - p.schemaPoolDocuments[reference.String()] = spd - - return spd, nil - } - - // It is not possible to load anything remotely that is not canonical... - if !reference.IsCanonical() { - return nil, errors.New(formatErrorDescription( - Locale.ReferenceMustBeCanonical(), - ErrorDetails{"reference": reference.String()}, - )) - } - - jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) - document, err := jsonReferenceLoader.LoadJSON() - - if err != nil { - return nil, err - } - - // add the whole document to the pool for potential re-use - p.parseReferences(document, refToURL, true) - - _, draft, _ = parseSchemaURL(document) - - // resolve the potential fragment and also cache it - document, _, err = reference.GetPointer().Get(document) - - if err != nil { - return nil, err - } - - return &schemaPoolDocument{Document: document, Draft: draft}, nil -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/tools/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go deleted file mode 100644 index 6e5e1b5cd..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Pool of referenced schemas. -// -// created 25-06-2013 - -package gojsonschema - -import ( - "fmt" -) - -type schemaReferencePool struct { - documents map[string]*subSchema -} - -func newSchemaReferencePool() *schemaReferencePool { - - p := &schemaReferencePool{} - p.documents = make(map[string]*subSchema) - - return p -} - -func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) - } - - if sch, ok := p.documents[ref]; ok { - if internalLogEnabled { - internalLog(fmt.Sprintf(" From pool")) - } - return sch, true - } - - return nil, false -} - -func (p *schemaReferencePool) Add(ref string, sch *subSchema) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) - } - if _, ok := p.documents[ref]; !ok { - p.documents[ref] = sch - } -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/tools/vendor/github.com/xeipuuv/gojsonschema/schemaType.go deleted file mode 100644 index 36b447a29..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/schemaType.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Helper structure to handle schema types, and the combination of them. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "strings" -) - -type jsonSchemaType struct { - types []string -} - -// Is the schema typed ? that is containing at least one type -// When not typed, the schema does not need any type validation -func (t *jsonSchemaType) IsTyped() bool { - return len(t.types) > 0 -} - -func (t *jsonSchemaType) Add(etype string) error { - - if !isStringInSlice(JSON_TYPES, etype) { - return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) - } - - if t.Contains(etype) { - return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) - } - - t.types = append(t.types, etype) - - return nil -} - -func (t *jsonSchemaType) Contains(etype string) bool { - - for _, v := range t.types { - if v == etype { - return true - } - } - - return false -} - -func (t *jsonSchemaType) String() string { - - if len(t.types) == 0 { - return STRING_UNDEFINED // should never happen - } - - // Displayed as a list [type1,type2,...] - if len(t.types) > 1 { - return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) - } - - // Only one type: name only - return t.types[0] -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/tools/vendor/github.com/xeipuuv/gojsonschema/subSchema.go deleted file mode 100644 index ec779812c..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/subSchema.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines the structure of a sub-subSchema. -// A sub-subSchema can contain other sub-schemas. -// -// created 27-02-2013 - -package gojsonschema - -import ( - "github.com/xeipuuv/gojsonreference" - "math/big" - "regexp" -) - -// Constants -const ( - KEY_SCHEMA = "$schema" - KEY_ID = "id" - KEY_ID_NEW = "$id" - KEY_REF = "$ref" - KEY_TITLE = "title" - KEY_DESCRIPTION = "description" - KEY_TYPE = "type" - KEY_ITEMS = "items" - KEY_ADDITIONAL_ITEMS = "additionalItems" - KEY_PROPERTIES = "properties" - KEY_PATTERN_PROPERTIES = "patternProperties" - KEY_ADDITIONAL_PROPERTIES = "additionalProperties" - KEY_PROPERTY_NAMES = "propertyNames" - KEY_DEFINITIONS = "definitions" - KEY_MULTIPLE_OF = "multipleOf" - KEY_MINIMUM = "minimum" - KEY_MAXIMUM = "maximum" - KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" - KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" - KEY_MIN_LENGTH = "minLength" - KEY_MAX_LENGTH = "maxLength" - KEY_PATTERN = "pattern" - KEY_FORMAT = "format" - KEY_MIN_PROPERTIES = "minProperties" - KEY_MAX_PROPERTIES = "maxProperties" - KEY_DEPENDENCIES = "dependencies" - KEY_REQUIRED = "required" - KEY_MIN_ITEMS = "minItems" - KEY_MAX_ITEMS = "maxItems" - KEY_UNIQUE_ITEMS = "uniqueItems" - KEY_CONTAINS = "contains" - KEY_CONST = "const" - KEY_ENUM = "enum" - KEY_ONE_OF = "oneOf" - KEY_ANY_OF = "anyOf" - KEY_ALL_OF = "allOf" - KEY_NOT = "not" - KEY_IF = "if" - KEY_THEN = "then" - KEY_ELSE = "else" -) - -type subSchema struct { - draft *Draft - - // basic subSchema meta properties - id *gojsonreference.JsonReference - title *string - description *string - - property string - - // Quick pass/fail for boolean schemas - pass *bool - - // Types associated with the subSchema - types jsonSchemaType - - // Reference url - ref *gojsonreference.JsonReference - // Schema referenced - refSchema *subSchema - - // hierarchy - parent *subSchema - itemsChildren []*subSchema - itemsChildrenIsSingleSchema bool - propertiesChildren []*subSchema - - // validation : number / integer - multipleOf *big.Rat - maximum *big.Rat - exclusiveMaximum *big.Rat - minimum *big.Rat - exclusiveMinimum *big.Rat - - // validation : string - minLength *int - maxLength *int - pattern *regexp.Regexp - format string - - // validation : object - minProperties *int - maxProperties *int - required []string - - dependencies map[string]interface{} - additionalProperties interface{} - patternProperties map[string]*subSchema - propertyNames *subSchema - - // validation : array - minItems *int - maxItems *int - uniqueItems bool - contains *subSchema - - additionalItems interface{} - - // validation : all - _const *string //const is a golang keyword - enum []string - - // validation : subSchema - oneOf []*subSchema - anyOf []*subSchema - allOf []*subSchema - not *subSchema - _if *subSchema // if/else are golang keywords - _then *subSchema - _else *subSchema -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/types.go b/tools/vendor/github.com/xeipuuv/gojsonschema/types.go deleted file mode 100644 index 0e6fd5173..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/types.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const types for schema and JSON. -// -// created 28-02-2013 - -package gojsonschema - -// Type constants -const ( - TYPE_ARRAY = `array` - TYPE_BOOLEAN = `boolean` - TYPE_INTEGER = `integer` - TYPE_NUMBER = `number` - TYPE_NULL = `null` - TYPE_OBJECT = `object` - TYPE_STRING = `string` -) - -// JSON_TYPES hosts the list of type that are supported in JSON -var JSON_TYPES []string - -// SCHEMA_TYPES hosts the list of type that are supported in schemas -var SCHEMA_TYPES []string - -func init() { - JSON_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_NULL, - TYPE_OBJECT, - TYPE_STRING} - - SCHEMA_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_OBJECT, - TYPE_STRING} -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/utils.go b/tools/vendor/github.com/xeipuuv/gojsonschema/utils.go deleted file mode 100644 index a17d22e3b..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/utils.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Various utility functions. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" -) - -func isKind(what interface{}, kinds ...reflect.Kind) bool { - target := what - if isJSONNumber(what) { - // JSON Numbers are strings! - target = *mustBeNumber(what) - } - targetKind := reflect.ValueOf(target).Kind() - for _, kind := range kinds { - if targetKind == kind { - return true - } - } - return false -} - -func existsMapKey(m map[string]interface{}, k string) bool { - _, ok := m[k] - return ok -} - -func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false -} - -// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. -func indexStringInSlice(s []string, what string) int { - for i := range s { - if s[i] == what { - return i - } - } - return -1 -} - -func marshalToJSONString(value interface{}) (*string, error) { - - mBytes, err := json.Marshal(value) - if err != nil { - return nil, err - } - - sBytes := string(mBytes) - return &sBytes, nil -} - -func marshalWithoutNumber(value interface{}) (*string, error) { - - // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber - // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 - // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber - // so that these differences in representation are removed - - jsonString, err := marshalToJSONString(value) - if err != nil { - return nil, err - } - - var document interface{} - - err = json.Unmarshal([]byte(*jsonString), &document) - if err != nil { - return nil, err - } - - return marshalToJSONString(document) -} - -func isJSONNumber(what interface{}) bool { - - switch what.(type) { - - case json.Number: - return true - } - - return false -} - -func checkJSONInteger(what interface{}) (isInt bool) { - - jsonNumber := what.(json.Number) - - bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) - - return isValidNumber && bigFloat.IsInt() - -} - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -func mustBeInteger(what interface{}) *int { - - if isJSONNumber(what) { - - number := what.(json.Number) - - isInt := checkJSONInteger(number) - - if isInt { - - int64Value, err := number.Int64() - if err != nil { - return nil - } - - int32Value := int(int64Value) - return &int32Value - } - - } - - return nil -} - -func mustBeNumber(what interface{}) *big.Rat { - - if isJSONNumber(what) { - number := what.(json.Number) - float64Value, success := new(big.Rat).SetString(string(number)) - if success { - return float64Value - } - } - - return nil - -} - -func convertDocumentNode(val interface{}) interface{} { - - if lval, ok := val.([]interface{}); ok { - - res := []interface{}{} - for _, v := range lval { - res = append(res, convertDocumentNode(v)) - } - - return res - - } - - if mval, ok := val.(map[interface{}]interface{}); ok { - - res := map[string]interface{}{} - - for k, v := range mval { - res[k.(string)] = convertDocumentNode(v) - } - - return res - - } - - return val -} diff --git a/tools/vendor/github.com/xeipuuv/gojsonschema/validation.go b/tools/vendor/github.com/xeipuuv/gojsonschema/validation.go deleted file mode 100644 index 74091bca1..000000000 --- a/tools/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ /dev/null @@ -1,858 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Extends Schema and subSchema, implements the validation phase. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -// Validate loads and validates a JSON schema -func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { - // load schema - schema, err := NewSchema(ls) - if err != nil { - return nil, err - } - return schema.Validate(ld) -} - -// Validate loads and validates a JSON document -func (v *Schema) Validate(l JSONLoader) (*Result, error) { - root, err := l.LoadJSON() - if err != nil { - return nil, err - } - return v.validateDocument(root), nil -} - -func (v *Schema) validateDocument(root interface{}) *Result { - result := &Result{} - context := NewJsonContext(STRING_CONTEXT_ROOT, nil) - v.rootSchema.validateRecursive(v.rootSchema, root, result, context) - return result -} - -func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { - result := &Result{} - v.validateRecursive(v, document, result, context) - return result -} - -// Walker function to validate the json recursively against the subSchema -func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateRecursive %s", context.String()) - internalLog(" %v", currentNode) - } - - // Handle true/false schema as early as possible as all other fields will be nil - if currentSubSchema.pass != nil { - if !*currentSubSchema.pass { - result.addInternalError( - new(FalseError), - context, - currentNode, - ErrorDetails{}, - ) - } - return - } - - // Handle referenced schemas, returns directly when a $ref is found - if currentSubSchema.refSchema != nil { - v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) - return - } - - // Check for null value - if currentNode == nil { - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_NULL, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) - v.validateCommon(currentSubSchema, currentNode, result, context) - - } else { // Not a null value - - if isJSONNumber(currentNode) { - - value := currentNode.(json.Number) - - isInt := checkJSONInteger(value) - - validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) - - if currentSubSchema.types.IsTyped() && !validType { - - givenType := TYPE_INTEGER - if !isInt { - givenType = TYPE_NUMBER - } - - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": givenType, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } else { - - rValue := reflect.ValueOf(currentNode) - rKind := rValue.Kind() - - switch rKind { - - // Slice => JSON array - - case reflect.Slice: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_ARRAY, - }, - ) - return - } - - castCurrentNode := currentNode.([]interface{}) - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateArray(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - // Map => JSON object - - case reflect.Map: - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_OBJECT, - }, - ) - return - } - - castCurrentNode, ok := currentNode.(map[string]interface{}) - if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) - } - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateObject(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - for _, pSchema := range currentSubSchema.propertiesChildren { - nextNode, ok := castCurrentNode[pSchema.property] - if ok { - subContext := NewJsonContext(pSchema.property, context) - v.validateRecursive(pSchema, nextNode, result, subContext) - } - } - - // Simple JSON values : string, number, boolean - - case reflect.Bool: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_BOOLEAN, - }, - ) - return - } - - value := currentNode.(bool) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - case reflect.String: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_STRING, - }, - ) - return - } - - value := currentNode.(string) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } - - } - - } - - result.incrementScore() -} - -// Different kinds of validation there, subSchema / common / array / object / string... -func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateSchema %s", context.String()) - internalLog(" %v", currentNode) - } - - if len(currentSubSchema.anyOf) > 0 { - - validatedAnyOf := false - var bestValidationResult *Result - - for _, anyOfSchema := range currentSubSchema.anyOf { - if !validatedAnyOf { - validationResult := anyOfSchema.subValidateWithContext(currentNode, context) - validatedAnyOf = validationResult.Valid() - - if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - } - if !validatedAnyOf { - - result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) - - if bestValidationResult != nil { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - } - - if len(currentSubSchema.oneOf) > 0 { - - nbValidated := 0 - var bestValidationResult *Result - - for _, oneOfSchema := range currentSubSchema.oneOf { - validationResult := oneOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - - if nbValidated != 1 { - - result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) - - if nbValidated == 0 { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - - } - - if len(currentSubSchema.allOf) > 0 { - nbValidated := 0 - - for _, allOfSchema := range currentSubSchema.allOf { - validationResult := allOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } - result.mergeErrors(validationResult) - } - - if nbValidated != len(currentSubSchema.allOf) { - result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.not != nil { - validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if isKind(currentNode, reflect.Map) { - for elementKey := range currentNode.(map[string]interface{}) { - if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { - switch dependency := dependency.(type) { - - case []string: - for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { - result.addInternalError( - new(MissingDependencyError), - context, - currentNode, - ErrorDetails{"dependency": dependOnKey}, - ) - } - } - - case *subSchema: - dependency.validateRecursive(dependency, currentNode, result, context) - } - } - } - } - } - - if currentSubSchema._if != nil { - validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) - if currentSubSchema._then != nil && validationResultIf.Valid() { - validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) - if !validationResultThen.Valid() { - result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultThen) - } - } - if currentSubSchema._else != nil && !validationResultIf.Valid() { - validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) - if !validationResultElse.Valid() { - result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultElse) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateCommon %s", context.String()) - internalLog(" %v", value) - } - - // const: - if currentSubSchema._const != nil { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if *vString != *currentSubSchema._const { - result.addInternalError(new(ConstError), - context, - value, - ErrorDetails{ - "allowed": *currentSubSchema._const, - }, - ) - } - } - - // enum: - if len(currentSubSchema.enum) > 0 { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if !isStringInSlice(currentSubSchema.enum, *vString) { - result.addInternalError( - new(EnumError), - context, - value, - ErrorDetails{ - "allowed": strings.Join(currentSubSchema.enum, ", "), - }, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateArray %s", context.String()) - internalLog(" %v", value) - } - - nbValues := len(value) - - // TODO explain - if currentSubSchema.itemsChildrenIsSingleSchema { - for i := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else { - if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { - - nbItems := len(currentSubSchema.itemsChildren) - - // while we have both schemas and values, check them against each other - for i := 0; i != nbItems && i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - - if nbItems < nbValues { - // we have less schemas than elements in the instance array, - // but that might be ok if "additionalItems" is specified. - - switch currentSubSchema.additionalItems.(type) { - case bool: - if !currentSubSchema.additionalItems.(bool) { - result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) - } - case *subSchema: - additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) - for i := nbItems; i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } - } - } - } - - // minItems & maxItems - if currentSubSchema.minItems != nil { - if nbValues < int(*currentSubSchema.minItems) { - result.addInternalError( - new(ArrayMinItemsError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minItems}, - ) - } - } - if currentSubSchema.maxItems != nil { - if nbValues > int(*currentSubSchema.maxItems) { - result.addInternalError( - new(ArrayMaxItemsError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxItems}, - ) - } - } - - // uniqueItems: - if currentSubSchema.uniqueItems { - var stringifiedItems = make(map[string]int) - for j, v := range value { - vString, err := marshalWithoutNumber(v) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) - } - if i, ok := stringifiedItems[*vString]; ok { - result.addInternalError( - new(ItemsMustBeUniqueError), - context, - value, - ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, - ) - } - stringifiedItems[*vString] = j - } - } - - // contains: - - if currentSubSchema.contains != nil { - validatedOne := false - var bestValidationResult *Result - - for i, v := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - - validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) - if validationResult.Valid() { - validatedOne = true - break - } else { - if bestValidationResult == nil || validationResult.score > bestValidationResult.score { - bestValidationResult = validationResult - } - } - } - if !validatedOne { - result.addInternalError( - new(ArrayContainsError), - context, - value, - ErrorDetails{}, - ) - if bestValidationResult != nil { - result.mergeErrors(bestValidationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateObject %s", context.String()) - internalLog(" %v", value) - } - - // minProperties & maxProperties: - if currentSubSchema.minProperties != nil { - if len(value) < int(*currentSubSchema.minProperties) { - result.addInternalError( - new(ArrayMinPropertiesError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minProperties}, - ) - } - } - if currentSubSchema.maxProperties != nil { - if len(value) > int(*currentSubSchema.maxProperties) { - result.addInternalError( - new(ArrayMaxPropertiesError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxProperties}, - ) - } - } - - // required: - for _, requiredProperty := range currentSubSchema.required { - _, ok := value[requiredProperty] - if ok { - result.incrementScore() - } else { - result.addInternalError( - new(RequiredError), - context, - value, - ErrorDetails{"property": requiredProperty}, - ) - } - } - - // additionalProperty & patternProperty: - for pk := range value { - - // Check whether this property is described by "properties" - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - // Check whether this property is described by "patternProperties" - ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" - if !found && !ppMatch { - switch ap := currentSubSchema.additionalProperties.(type) { - case bool: - // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema - if !ap { - result.addInternalError( - new(AdditionalPropertyNotAllowedError), - context, - value[pk], - ErrorDetails{"property": pk}, - ) - - } - case *subSchema: - validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) - result.mergeErrors(validationResult) - } - } - } - - // propertyNames: - if currentSubSchema.propertyNames != nil { - for pk := range value { - validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) - if !validationResult.Valid() { - result.addInternalError(new(InvalidPropertyNameError), - context, - value, ErrorDetails{ - "property": pk, - }) - result.mergeErrors(validationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { - - if internalLogEnabled { - internalLog("validatePatternProperty %s", context.String()) - internalLog(" %s %v", key, value) - } - - validated := false - - for pk, pv := range currentSubSchema.patternProperties { - if matches, _ := regexp.MatchString(pk, key); matches { - validated = true - subContext := NewJsonContext(key, context) - validationResult := pv.subValidateWithContext(value, subContext) - result.mergeErrors(validationResult) - } - } - - if !validated { - return false - } - - result.incrementScore() - return true -} - -func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore JSON numbers - if isJSONNumber(value) { - return - } - - // Ignore non strings - if !isKind(value, reflect.String) { - return - } - - if internalLogEnabled { - internalLog("validateString %s", context.String()) - internalLog(" %v", value) - } - - stringValue := value.(string) - - // minLength & maxLength: - if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { - result.addInternalError( - new(StringLengthGTEError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minLength}, - ) - } - } - if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { - result.addInternalError( - new(StringLengthLTEError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxLength}, - ) - } - } - - // pattern: - if currentSubSchema.pattern != nil { - if !currentSubSchema.pattern.MatchString(stringValue) { - result.addInternalError( - new(DoesNotMatchPatternError), - context, - value, - ErrorDetails{"pattern": currentSubSchema.pattern}, - ) - - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore non numbers - if !isJSONNumber(value) { - return - } - - if internalLogEnabled { - internalLog("validateNumber %s", context.String()) - internalLog(" %v", value) - } - - number := value.(json.Number) - float64Value, _ := new(big.Rat).SetString(string(number)) - - // multipleOf: - if currentSubSchema.multipleOf != nil { - if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { - result.addInternalError( - new(MultipleOfError), - context, - number, - ErrorDetails{ - "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), - }, - ) - } - } - - //maximum & exclusiveMaximum: - if currentSubSchema.maximum != nil { - if float64Value.Cmp(currentSubSchema.maximum) == 1 { - result.addInternalError( - new(NumberLTEError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.maximum), - }, - ) - } - } - if currentSubSchema.exclusiveMaximum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { - result.addInternalError( - new(NumberLTError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), - }, - ) - } - } - - //minimum & exclusiveMinimum: - if currentSubSchema.minimum != nil { - if float64Value.Cmp(currentSubSchema.minimum) == -1 { - result.addInternalError( - new(NumberGTEError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.minimum), - }, - ) - } - } - if currentSubSchema.exclusiveMinimum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { - result.addInternalError( - new(NumberGTError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), - }, - ) - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} diff --git a/tools/vendor/go.etcd.io/bbolt/.gitignore b/tools/vendor/go.etcd.io/bbolt/.gitignore index 9fa948ebf..ed4d259db 100644 --- a/tools/vendor/go.etcd.io/bbolt/.gitignore +++ b/tools/vendor/go.etcd.io/bbolt/.gitignore @@ -6,5 +6,7 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt +.DS_Store diff --git a/tools/vendor/go.etcd.io/bbolt/.go-version b/tools/vendor/go.etcd.io/bbolt/.go-version index 013173af5..7bdcec52d 100644 --- a/tools/vendor/go.etcd.io/bbolt/.go-version +++ b/tools/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.22.6 +1.23.12 diff --git a/tools/vendor/go.etcd.io/bbolt/Makefile b/tools/vendor/go.etcd.io/bbolt/Makefile index 214077974..f5a6703a0 100644 --- a/tools/vendor/go.etcd.io/bbolt/Makefile +++ b/tools/vendor/go.etcd.io/bbolt/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -13,9 +14,26 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -24,21 +42,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic BOLT_CMD=bbolt @@ -55,7 +75,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail @@ -65,12 +85,24 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - -.PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/tools/vendor/go.etcd.io/bbolt/OWNERS b/tools/vendor/go.etcd.io/bbolt/OWNERS new file mode 100644 index 000000000..91f168a79 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/tools/vendor/go.etcd.io/bbolt/README.md b/tools/vendor/go.etcd.io/bbolt/README.md index 495a93ef8..f365e51e3 100644 --- a/tools/vendor/go.etcd.io/bbolt/README.md +++ b/tools/vendor/go.etcd.io/bbolt/README.md @@ -1,10 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) @@ -71,13 +69,14 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started ### Installing -To start using Bolt, install Go and run `go get`: +To start using `bbolt`, install Go and run `go get`: ```sh $ go get go.etcd.io/bbolt@latest ``` @@ -103,7 +102,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -298,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return errors.New("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can @@ -305,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -336,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction @@ -654,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } @@ -890,7 +921,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. @@ -919,6 +950,21 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt @@ -934,13 +980,16 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. @@ -964,6 +1013,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_386.go b/tools/vendor/go.etcd.io/bbolt/bolt_386.go deleted file mode 100644 index aee25960f..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/tools/vendor/go.etcd.io/bbolt/bolt_aix.go similarity index 94% rename from tools/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go rename to tools/vendor/go.etcd.io/bbolt/bolt_aix.go index babad6578..596e54060 100644 --- a/tools/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go +++ b/tools/vendor/go.etcd.io/bbolt/bolt_aix.go @@ -1,3 +1,5 @@ +//go:build aix + package bbolt import ( @@ -7,6 +9,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -67,7 +71,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_amd64.go b/tools/vendor/go.etcd.io/bbolt/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2a..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_android.go b/tools/vendor/go.etcd.io/bbolt/bolt_android.go new file mode 100644 index 000000000..ac64fcf5b --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/bolt_android.go @@ -0,0 +1,92 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_arm.go b/tools/vendor/go.etcd.io/bbolt/bolt_arm.go deleted file mode 100644 index aee25960f..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_arm64.go b/tools/vendor/go.etcd.io/bbolt/bolt_arm64.go deleted file mode 100644 index 447bc1973..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build arm64 -// +build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_loong64.go b/tools/vendor/go.etcd.io/bbolt/bolt_loong64.go deleted file mode 100644 index 31c17c1d0..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build loong64 -// +build loong64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/tools/vendor/go.etcd.io/bbolt/bolt_mips64x.go deleted file mode 100644 index a9385beb6..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build mips64 || mips64le -// +build mips64 mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/tools/vendor/go.etcd.io/bbolt/bolt_mipsx.go deleted file mode 100644 index ed734ff7f..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build mips || mipsle -// +build mips mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_ppc.go b/tools/vendor/go.etcd.io/bbolt/bolt_ppc.go deleted file mode 100644 index e403f57d8..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc -// +build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/tools/vendor/go.etcd.io/bbolt/bolt_ppc64.go deleted file mode 100644 index fcd86529f..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc64 -// +build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/tools/vendor/go.etcd.io/bbolt/bolt_ppc64le.go deleted file mode 100644 index 20234aca4..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc64le -// +build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/tools/vendor/go.etcd.io/bbolt/bolt_riscv64.go deleted file mode 100644 index 060f30c73..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build riscv64 -// +build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_s390x.go b/tools/vendor/go.etcd.io/bbolt/bolt_s390x.go deleted file mode 100644 index 92d2755ad..000000000 --- a/tools/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build s390x -// +build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/tools/vendor/go.etcd.io/bbolt/bolt_solaris.go similarity index 95% rename from tools/vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename to tools/vendor/go.etcd.io/bbolt/bolt_solaris.go index 6dea4294d..56b2ccab4 100644 --- a/tools/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ b/tools/vendor/go.etcd.io/bbolt/bolt_solaris.go @@ -1,6 +1,3 @@ -//go:build aix -// +build aix - package bbolt import ( @@ -10,6 +7,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -70,7 +69,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_unix.go b/tools/vendor/go.etcd.io/bbolt/bolt_unix.go index 757ae4d1a..f68e721f5 100644 --- a/tools/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/tools/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android package bbolt @@ -10,6 +9,9 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +38,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -66,7 +68,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/tools/vendor/go.etcd.io/bbolt/bolt_windows.go b/tools/vendor/go.etcd.io/bbolt/bolt_windows.go index e5dde2745..e99a0d621 100644 --- a/tools/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/tools/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -8,6 +8,9 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +45,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -70,7 +73,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. @@ -93,7 +96,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/tools/vendor/go.etcd.io/bbolt/boltsync_unix.go b/tools/vendor/go.etcd.io/bbolt/boltsync_unix.go index 81e09a531..27face752 100644 --- a/tools/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ b/tools/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/tools/vendor/go.etcd.io/bbolt/bucket.go b/tools/vendor/go.etcd.io/bbolt/bucket.go index f3533d344..6371ace97 100644 --- a/tools/vendor/go.etcd.io/bbolt/bucket.go +++ b/tools/vendor/go.etcd.io/bbolt/bucket.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +17,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +43,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -153,13 +145,23 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { - return nil, ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Insert into node. @@ -173,21 +175,21 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(newKey, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, errors.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() - c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -200,39 +202,108 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } + + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + if b.buckets != nil { + if child := b.buckets[string(newKey)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(newKey)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + child := b.Bucket(newKey) + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -243,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -251,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !b.Writable() || !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -278,17 +449,27 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return errors.ErrValueTooLarge } // Insert into node. @@ -301,8 +482,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} @@ -315,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -332,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -343,44 +535,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -390,7 +584,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -403,11 +597,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -421,64 +615,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if p.IsLeafPage() { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -499,29 +693,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -529,16 +723,16 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -561,9 +755,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -577,10 +771,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -595,16 +789,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -615,11 +809,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&bucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -638,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -662,8 +856,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -682,6 +876,12 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. @@ -696,19 +896,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -723,11 +923,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } @@ -797,3 +997,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/tools/vendor/go.etcd.io/bbolt/cursor.go b/tools/vendor/go.etcd.io/bbolt/cursor.go index bbfd92a9b..0c1e28c10 100644 --- a/tools/vendor/go.etcd.io/bbolt/cursor.go +++ b/tools/vendor/go.etcd.io/bbolt/cursor.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +33,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +43,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +54,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +64,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +83,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +105,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +118,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +159,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +175,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +196,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -277,10 +280,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -303,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -315,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -338,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -349,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -375,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -395,19 +398,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -417,7 +420,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. @@ -425,5 +428,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/tools/vendor/go.etcd.io/bbolt/db.go b/tools/vendor/go.etcd.io/bbolt/db.go index 822798e41..622947d9c 100644 --- a/tools/vendor/go.etcd.io/bbolt/db.go +++ b/tools/vendor/go.etcd.io/bbolt/db.go @@ -3,49 +3,28 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" - "sort" "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond // FreelistType is the type of the freelist backend type FreelistType string +// TODO(ahrtr): eventually we should (step by step) +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( // FreelistArrayType indicates backend freelist type is array FreelistArrayType = FreelistType("array") @@ -137,6 +116,8 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -144,17 +125,16 @@ type DB struct { // always fails on Windows platform. //nolint dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte + data *[common.MaxMapSize]byte datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -191,13 +171,15 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -211,9 +193,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize + + if options.Logger == nil { + db.logger = getDiscardLogger() + } else { + db.logger = options.Logger + } + + lg := db.Logger() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -222,6 +222,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -230,9 +231,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -244,8 +245,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -254,27 +256,28 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -286,8 +289,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -302,13 +306,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } @@ -352,7 +357,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -361,11 +366,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -397,13 +402,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -414,17 +419,29 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist +} + +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -433,21 +450,22 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } + lg := db.Logger() // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) + var fileSize int + fileSize, err = db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } var size = fileSize if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -472,6 +490,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -493,15 +512,16 @@ func (db *DB) mmap(minsz int) (err error) { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -524,6 +544,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: %v", err.Error()) } @@ -542,14 +563,14 @@ func (db *DB) mmapSize(size int) (int, error) { } // Verify the requested size is not above the maximum allowed. - if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + if size > common.MaxMapSize { + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -560,8 +581,8 @@ func (db *DB) mmapSize(size int) (int, error) { } // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize + if sz > common.MaxMapSize { + sz = common.MaxMapSize } return int(sz), nil @@ -571,6 +592,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: %v", err.Error()) } return nil @@ -580,6 +602,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: %v", err.Error()) } return nil @@ -600,42 +623,43 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } - db.filesz = len(buf) return nil } @@ -716,13 +740,31 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -738,14 +780,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -755,6 +797,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.AddReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -771,7 +816,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -786,49 +831,23 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.ReleasePendingPages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -848,6 +867,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -1056,7 +1078,20 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1069,37 +1104,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1109,7 +1144,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1117,17 +1152,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.Allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1135,7 +1171,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1143,7 +1180,13 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + lg := db.Logger() + fileSize, err := db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } + if sz <= fileSize { return nil } @@ -1162,21 +1205,22 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } @@ -1184,7 +1228,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1196,21 +1240,21 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1218,11 +1262,17 @@ func (db *DB) freepages() []pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. @@ -1259,6 +1309,12 @@ type Options struct { // If <=0, the initial map size is 0. // If initialMmapSize is smaller than the previous database size, // it takes no effect. + // + // Note: On Windows, due to platform limitations, the database file size + // will be immediately resized to match `InitialMmapSize` (aligned to page size) + // when the DB is opened. On non-Windows platforms, the file size will grow + // dynamically based on the actual amount of written data, regardless of `InitialMmapSize`. + // Refer to https://github.com/etcd-io/bbolt/issues/378#issuecomment-1378121966. InitialMmapSize int // PageSize overrides the default OS page size. @@ -1277,6 +1333,19 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger +} + +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1327,65 +1396,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/tools/vendor/go.etcd.io/bbolt/errors.go b/tools/vendor/go.etcd.io/bbolt/errors.go index f2c3b20ed..02958c86f 100644 --- a/tools/vendor/go.etcd.io/bbolt/errors.go +++ b/tools/vendor/go.etcd.io/bbolt/errors.go @@ -1,78 +1,108 @@ package bbolt -import "errors" +import "go.etcd.io/bbolt/errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly // ErrFreePagesNotLoaded is returned when a readonly transaction without // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue ) diff --git a/tools/vendor/go.etcd.io/bbolt/errors/errors.go b/tools/vendor/go.etcd.io/bbolt/errors/errors.go new file mode 100644 index 000000000..c115289e5 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/errors/errors.go @@ -0,0 +1,84 @@ +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying to create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") +) diff --git a/tools/vendor/go.etcd.io/bbolt/freelist.go b/tools/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index dffc7bc74..000000000 --- a/tools/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,410 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - - // Remove pgids which are allocated by this txid - for pgid, tid := range f.allocs { - if tid == txid { - delete(f.allocs, pgid) - } - } - - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) - ids := unsafe.Slice((*pgid)(data), count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l) - f.copyall(ids) - } else { - p.count = 0xFFFF - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/tools/vendor/go.etcd.io/bbolt/freelist_hmap.go b/tools/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e7..000000000 --- a/tools/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go new file mode 100644 index 000000000..773175de3 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go new file mode 100644 index 000000000..9f27d9199 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go new file mode 100644 index 000000000..773175de3 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go new file mode 100644 index 000000000..9022f6bca --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go @@ -0,0 +1,9 @@ +//go:build arm64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go new file mode 100644 index 000000000..31277523c --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go @@ -0,0 +1,9 @@ +//go:build loong64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go new file mode 100644 index 000000000..d930f4edd --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go @@ -0,0 +1,9 @@ +//go:build mips64 || mips64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x8000000000 // 512GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go new file mode 100644 index 000000000..8b1934368 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go @@ -0,0 +1,9 @@ +//go:build mips || mipsle + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x40000000 // 1GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go new file mode 100644 index 000000000..a374e1406 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go @@ -0,0 +1,9 @@ +//go:build ppc + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go new file mode 100644 index 000000000..80288a83a --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go @@ -0,0 +1,9 @@ +//go:build ppc64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go new file mode 100644 index 000000000..77561d687 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go @@ -0,0 +1,9 @@ +//go:build ppc64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go new file mode 100644 index 000000000..2a876e5f7 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go @@ -0,0 +1,9 @@ +//go:build riscv64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go new file mode 100644 index 000000000..982cb7558 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go @@ -0,0 +1,9 @@ +//go:build s390x + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/tools/vendor/go.etcd.io/bbolt/internal/common/bucket.go new file mode 100644 index 000000000..2b4ab1453 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/inode.go b/tools/vendor/go.etcd.io/bbolt/internal/common/inode.go new file mode 100644 index 000000000..080b9af78 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/inode.go @@ -0,0 +1,115 @@ +package common + +import "unsafe" + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/meta.go b/tools/vendor/go.etcd.io/bbolt/internal/common/meta.go new file mode 100644 index 000000000..055388604 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/meta.go @@ -0,0 +1,161 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" + + "go.etcd.io/bbolt/errors" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return errors.ErrInvalid + } else if m.version != Version { + return errors.ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return errors.ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.SetFlags(MetaPageFlag) + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) Version() uint32 { + return m.version +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) Checksum() uint64 { + return m.checksum +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/page.go b/tools/vendor/go.etcd.io/bbolt/internal/common/page.go new file mode 100644 index 000000000..ee808967c --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/page.go @@ -0,0 +1,391 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if p.IsBranchPage() { + return "branch" + } else if p.IsLeafPage() { + return "leaf" + } else if p.IsMetaPage() { + return "meta" + } else if p.IsFreelistPage() { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +func (p *Page) IsBranchPage() bool { + return p.flags == BranchPageFlag +} + +func (p *Page) IsLeafPage() bool { + return p.flags == LeafPageFlag +} + +func (p *Page) IsMetaPage() bool { + return p.flags == MetaPageFlag +} + +func (p *Page) IsFreelistPage() bool { + return p.flags == FreelistPageFlag +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/types.go b/tools/vendor/go.etcd.io/bbolt/internal/common/types.go new file mode 100644 index 000000000..18d6d69c2 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -0,0 +1,37 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version uint32 = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/tools/vendor/go.etcd.io/bbolt/unsafe.go b/tools/vendor/go.etcd.io/bbolt/internal/common/unsafe.go similarity index 75% rename from tools/vendor/go.etcd.io/bbolt/unsafe.go rename to tools/vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 7745d32ce..740ffc707 100644 --- a/tools/vendor/go.etcd.io/bbolt/unsafe.go +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -1,18 +1,18 @@ -package bbolt +package common import ( "unsafe" ) -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset) } -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) } -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices // // This memory is not allocated from C, but it is unmanaged by Go's @@ -23,5 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] + return (*[MaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/utils.go b/tools/vendor/go.etcd.io/bbolt/internal/common/utils.go new file mode 100644 index 000000000..bdf82a7b0 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/utils.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "io" + "os" + "unsafe" +) + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/common/verify.go b/tools/vendor/go.etcd.io/bbolt/internal/common/verify.go new file mode 100644 index 000000000..eac95e263 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/tools/vendor/go.etcd.io/bbolt/internal/freelist/array.go new file mode 100644 index 000000000..0cc1ba715 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/freelist/array.go @@ -0,0 +1,108 @@ +package freelist + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() +} + +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +func (f *array) FreeCount() int { + return len(f.ids) +} + +func (f *array) freePageIds() common.Pgids { + return f.ids +} + +func (f *array) mergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + ids: []common.Pgid{}, + } + a.Interface = a + return a +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/tools/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go new file mode 100644 index 000000000..2b819506b --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free or is one of the meta pages, then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/tools/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go new file mode 100644 index 000000000..8d471f4b5 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go @@ -0,0 +1,292 @@ +package freelist + +import ( + "fmt" + "reflect" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size +} + +func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + if len(pgids) == 0 { + return + } + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + size := uint64(1) + start := pgids[0] + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() +} + +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+common.Pgid(n), remain) + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) +} + +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() + if count == 0 { + return common.Pgids{} + } + + m := make([]common.Pgid, 0, count) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } + } + } + + return m +} + +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - common.Pgid(preSize) + f.delSpan(start, preSize) + + newStart -= common.Pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/tools/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/tools/vendor/go.etcd.io/bbolt/internal/freelist/shared.go new file mode 100644 index 000000000..f2d113008 --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/internal/freelist/shared.go @@ -0,0 +1,310 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) + if ok { + delete(t.allocs, p.Id()) + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + m := common.Pgids{} + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + t.NoSyncReload(t.freePageIds()) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + a := []common.Pgid{} + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init([]common.Pgid{}) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/tools/vendor/go.etcd.io/bbolt/logger.go b/tools/vendor/go.etcd.io/bbolt/logger.go new file mode 100644 index 000000000..fb250894a --- /dev/null +++ b/tools/vendor/go.etcd.io/bbolt/logger.go @@ -0,0 +1,113 @@ +package bbolt + +// See https://github.com/etcd-io/raft/blob/main/logger.go +import ( + "fmt" + "io" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/tools/vendor/go.etcd.io/bbolt/mlock_unix.go b/tools/vendor/go.etcd.io/bbolt/mlock_unix.go index 744a972f5..9a0fd332c 100644 --- a/tools/vendor/go.etcd.io/bbolt/mlock_unix.go +++ b/tools/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt diff --git a/tools/vendor/go.etcd.io/bbolt/node.go b/tools/vendor/go.etcd.io/bbolt/node.go index 9c56150d8..022b1001e 100644 --- a/tools/vendor/go.etcd.io/bbolt/node.go +++ b/tools/vendor/go.etcd.io/bbolt/node.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" "sort" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,10 +15,10 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -38,10 +39,10 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -50,10 +51,10 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -113,9 +114,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -158,30 +159,15 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = p.IsLeafPage() + n.inodes = common.ReadInodeFromPage(p) - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + n.key = n.inodes[0].Key() + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,57 +176,27 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } @@ -273,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +327,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -382,12 +338,12 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -415,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } @@ -426,14 +382,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -457,53 +413,37 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } @@ -525,20 +465,20 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -553,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -594,17 +534,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/tools/vendor/go.etcd.io/bbolt/page.go b/tools/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index bb081b031..000000000 --- a/tools/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,212 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/tools/vendor/go.etcd.io/bbolt/tx.go b/tools/vendor/go.etcd.io/bbolt/tx.go index 766395de3..1669fb16a 100644 --- a/tools/vendor/go.etcd.io/bbolt/tx.go +++ b/tools/vendor/go.etcd.io/bbolt/tx.go @@ -5,15 +5,16 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -27,9 +28,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -48,24 +49,27 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + if tx == nil || tx.meta == nil { + return -1 + } + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -75,7 +79,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -96,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. @@ -123,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. @@ -137,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } + + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -157,40 +197,43 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { + if tx.meta.Pgid() > opgid { _ = errors.New("") // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -198,7 +241,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -208,11 +252,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -220,7 +264,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -240,16 +285,14 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id + + tx.db.freelist.Write(p) + tx.meta.SetFreelist(p.Id()) return nil } @@ -257,9 +300,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -271,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -282,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -305,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -335,7 +378,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -344,26 +387,53 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr + var f *os.File + // There is a risk that between the time a read-only transaction + // is created and the time the file is actually opened, the + // underlying db file at tx.db.path may have been replaced + // (e.g. via rename). In that case, opening the file again would + // unexpectedly point to a different file, rather than the one + // the transaction was based on. + // + // To overcome this, we reuse the already opened file handle when + // WritFlag not set. When the WriteFlag is set, we reopen the file + // but verify that it still refers to the same underlying file + // (by device and inode). If it does not, we fall back to + // reusing the existing already opened file handle. + if tx.WriteFlag != 0 { + // Attempt to open reader with WriteFlag + f, err = tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } - }() + + if ok, err := sameFile(tx.db.file, f); !ok { + lg := tx.db.Logger() + if cerr := f.Close(); cerr != nil { + lg.Errorf("failed to close the file (%s): %v", tx.db.path, cerr) + } + lg.Warningf("The underlying file has changed, so reuse the already opened file (%s): %v", tx.db.path, err) + f = tx.db.file + } else { + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + } + } else { + f = tx.db.file + } // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -371,22 +441,22 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { return n, fmt.Errorf("meta 1 copy: %s", err) } - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } + // Copy data pages using a SectionReader to avoid affecting f's offset. + dataOffset := int64(tx.db.pageSize * 2) + dataSize := tx.Size() - dataOffset + sr := io.NewSectionReader(f, dataOffset, dataSize) // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + wn, err := io.CopyN(w, sr, dataSize) n += wn if err != nil { return n, err @@ -395,6 +465,19 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, nil } +func sameFile(f1, f2 *os.File) (bool, error) { + fi1, err := f1.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the first file (%s): %w", f1.Name(), err) + } + fi2, err := f2.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the second file (%s): %w", f2.Name(), err) + } + + return os.SameFile(fi1, fi2), nil +} + // CopyFile copies the entire database to file at the given path. // A reader transaction is maintained during the copy so it is safe to continue // using the database while a copy is in progress. @@ -413,14 +496,16 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -432,29 +517,31 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + lg := tx.db.Logger() + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. for { sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 + if sz > common.MaxAllocSize-1 { + sz = common.MaxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -474,9 +561,10 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -485,11 +573,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -503,18 +591,27 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + tx.db.metalock.Lock() + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + tx.db.metalock.Unlock() + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } - if !tx.db.NoSync || IgnoreNoSync { + tx.db.metalock.Unlock() + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -527,69 +624,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, berrors.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/tools/vendor/go.etcd.io/bbolt/tx_check.go b/tools/vendor/go.etcd.io/bbolt/tx_check.go index 75c7c0843..c3ecbb975 100644 --- a/tools/vendor/go.etcd.io/bbolt/tx_check.go +++ b/tools/vendor/go.etcd.io/bbolt/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -13,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } @@ -28,18 +27,22 @@ func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) @@ -48,118 +51,171 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) } +} - // Close the channel to signal completion. - close(ch) +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } + p := tx.page(pageId) - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.IsBucketEntry() { + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + tx: tx, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) + } } - reachable[id] = p } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) + } +} - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.RootPage() == 0 { + return + } - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -168,7 +224,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) @@ -194,6 +250,7 @@ func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousK type checkConfig struct { kvStringer KVStringer + pageId uint64 } type CheckOption func(options *checkConfig) @@ -204,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint64) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/tools/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 000000000..773c9b643 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/tools/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/tools/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 000000000..088d19a6c --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/doc.go b/tools/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 000000000..ad73d8cb9 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 000000000..af6ef171f --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 000000000..949e2165c --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 000000000..e854d7e84 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 000000000..29e629d66 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 000000000..cecad8bae --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 000000000..b6f2e28d4 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 000000000..a13a6b733 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 000000000..1217776ea --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 000000000..69a348f0f --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 000000000..0dd01b063 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/limit.go b/tools/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 000000000..86babf1a8 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/span.go b/tools/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 000000000..6ebea12a9 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/tools/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 000000000..cbcfabde3 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/tools/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/tools/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 000000000..dbc477a59 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd..b25641c55 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a01bfafbe..6bd50d4c9 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -176,6 +176,10 @@ func WithMessageEvents(events ...event) Option { // WithSpanNameFormatter takes a function that will be called on every // request and the returned string will become the Span Name. +// +// When using [http.ServeMux] (or any middleware that sets the Pattern of [http.Request]), +// the span name formatter will run twice. Once when the span is created, and +// second time after the middleware, so the pattern can be used. func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { return optionFunc(func(c *config) { c.SpanNameFormatter = f diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e555a475f..937f9b4e7 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -95,7 +98,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r, semconv.RequestTraceAttrsOpts{})...), } opts = append(opts, h.spanStartOptions...) @@ -173,7 +176,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx = ContextWithLabeler(ctx, labeler) } - next.ServeHTTP(w, r.WithContext(ctx)) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + + if r.Pattern != "" { + span.SetName(h.spanNameFormatter(h.operation, r)) + } statusCode := rww.StatusCode() bytesWritten := rww.BytesWritten() @@ -189,14 +197,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -204,6 +214,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go index a945f5566..d032aa841 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -1,6 +1,11 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/body_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package request provides types and functionality to handle HTTP request +// handling. package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" import ( @@ -53,7 +58,7 @@ func (w *BodyWrapper) updateReadData(n int64, err error) { } } -// Closes closes the io.ReadCloser. +// Close closes the io.ReadCloser. func (w *BodyWrapper) Close() error { return w.ReadCloser.Close() } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go new file mode 100644 index 000000000..9e00dd2fc --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +// Generate request package: +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index fbc344cbd..ca2e4c14c 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/resp_writer_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -102,7 +105,7 @@ func (w *RespWriterWrapper) BytesWritten() int64 { return w.written } -// BytesWritten returns the HTTP status code that was sent. +// StatusCode returns the HTTP status code that was sent. func (w *RespWriterWrapper) StatusCode() int { w.mu.RLock() defer w.mu.RUnlock() diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index fb893b250..7cb9693d9 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,12 +12,17 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -30,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -48,26 +61,40 @@ type HTTPServer struct { // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) + if s.duplicate { + return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) + } + return attrs +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return []attribute.KeyValue{ + OldHTTPServer{}.NetworkTransportAttr(network), + CurrentHTTPServer{}.NetworkTransportAttr(network), + } + } + return []attribute.KeyValue{ + CurrentHTTPServer{}.NetworkTransportAttr(network), } - return oldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return attrs } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return CurrentHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -99,32 +126,72 @@ type MetricAttributes struct { type MetricData struct { RequestSize int64 + + // The request duration, in milliseconds ElapsedTime float64 } +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, + } +) + func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return + if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } +} - // TODO: Duplicate Metrics +// hasOptIn returns true if the comma-separated version string contains the +// exact optIn value. +func hasOptIn(version, optIn string) bool { + for _, v := range strings.Split(version, ",") { + if strings.TrimSpace(v) == optIn { + return true + } + } + return false } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - duplicate := env == "http/dup" + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := hasOptIn(env, "http/dup") server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + } return server } @@ -135,32 +202,42 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := hasOptIn(env, "http/dup") client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, + } + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return OldHTTPClient{}.RequestTraceAttrs(req, attrs) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return attrs } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) } - - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return attrs } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -174,11 +251,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { } func (c HTTPClient) ErrorType(err error) attribute.KeyValue { - if c.duplicate { - return newHTTPClient{}.ErrorType(err) - } - - return attribute.KeyValue{} + return CurrentHTTPClient{}.ErrorType(err) } type MetricOpts struct { @@ -194,34 +267,57 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { - attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["new"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["old"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { - if s.requestBytesCounter == nil || s.latencyMeasure == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + if s.requestBodySize == nil || s.requestDuration == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) +} + +func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.TraceAttributes(host) + if s.duplicate { + return OldHTTPClient{}.TraceAttributes(host, attrs) + } + + return attrs } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 000000000..f2cf8a152 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67b..53976b0d5 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,22 +1,35 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + +type CurrentHTTPServer struct{} -// TraceRequest returns trace attributes for an HTTP request received by a +// RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this @@ -32,18 +45,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +72,8 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -73,7 +87,17 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att count++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { count++ } @@ -90,6 +114,11 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att count++ } + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + attrs := make([]attribute.KeyValue, 0, count) attrs = append(attrs, semconvNew.ServerAddress(host), @@ -104,7 +133,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -113,7 +142,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att } } - if useragent := req.UserAgent(); useragent != "" { + if useragent != "" { attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) } @@ -132,10 +161,27 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) } + if route != "" { + attrs = append(attrs, n.Route(route)) + } + return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return semconvNew.NetworkTransportTCP + case "udp", "udp4", "udp6": + return semconvNew.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + return semconvNew.NetworkTransportUnix + default: + return semconvNew.NetworkTransportPipe + } +} + +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,17 +196,19 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } return semconvNew.URLScheme("http") } -// TraceResponse returns trace attributes for telemetry from an HTTP response. +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. // -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +243,95 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +351,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +413,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +440,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +457,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -343,6 +472,102 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +// TraceAttributes returns attributes for httptrace. +func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconvNew.ServerAddress(host), + } +} + +func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconvNew.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f..bc1f7751d 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,28 +17,28 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") + addrEnd := strings.LastIndexByte(hostport, ']') if addrEnd < 0 { // Invalid hostport. return } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + if i := strings.LastIndexByte(hostport[addrEnd:], ':'); i < 0 { host = hostport[1:addrEnd] return } } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { + if i := strings.LastIndexByte(hostport, ':'); i < 0 { host = hostport return } @@ -67,15 +70,31 @@ func requiredHTTPPort(https bool, port int) int { // nolint:revive } func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + if idx := strings.IndexByte(xForwardedFor, ','); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } +func httpRoute(pattern string) string { + if idx := strings.IndexByte(pattern, '/'); idx >= 0 { + return pattern[idx:] + } + return "" +} + func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } @@ -96,3 +115,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 5367732ec..ba7fccf1e 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -17,7 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,16 +37,18 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req) +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) +} + +func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + return semconvutil.NetTransport(network) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attributes := []attribute.KeyValue{} - +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } @@ -67,7 +71,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +88,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +117,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +148,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,24 +168,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req) +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req, attrs) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp) +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp, attrs) } -func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.status_code int @@ -197,7 +201,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit var requestHost string var requestPort int for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -214,7 +218,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -235,7 +239,7 @@ const ( clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds ) -func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -263,12 +267,7 @@ func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) +// TraceAttributes returns attributes for httptrace. +func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { + return append(attrs, semconv.NetHostName(host)) } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index a73bb06e9..b99735479 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -1,14 +1,16 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconvutil provides OpenTelemetry semantic convention utilities. package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" import ( "fmt" "net/http" + "slices" "strings" "go.opentelemetry.io/otel/attribute" @@ -16,6 +18,11 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) +type HTTPServerRequestOptions struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + // HTTPClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", @@ -26,9 +33,9 @@ import ( // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) -func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { - return hc.ClientResponse(resp) +// HTTPClientResponse(resp, ClientRequest(resp.Request))) +func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientResponse(resp, attrs) } // HTTPClientRequest returns trace attributes for an HTTP request made by a client. @@ -36,8 +43,8 @@ func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length". -func HTTPClientRequest(req *http.Request) []attribute.KeyValue { - return hc.ClientRequest(req) +func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientRequest(req, attrs) } // HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. @@ -75,8 +82,8 @@ func HTTPClientStatus(code int) (codes.Code, string) { // "http.target", "net.host.name". The following attributes are returned if // they related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequest(server, req) +func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ServerRequest(server, req, opts, attrs) } // HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a @@ -153,8 +160,8 @@ var hc = &httpConv{ // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(ClientResponse(resp), ClientRequest(resp.Request)...) -func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { +// ClientResponse(resp, ClientRequest(resp.Request)) +func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.status_code int http.response_content_length int @@ -166,8 +173,11 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { if resp.ContentLength > 0 { n++ } + if n == 0 { + return attrs + } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } @@ -182,7 +192,7 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { +func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string user_agent.original string @@ -221,8 +231,7 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { n++ } - attrs := make([]attribute.KeyValue, 0, n) - + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) var u string @@ -305,7 +314,7 @@ func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue // related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip", // "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { +func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.scheme string @@ -358,7 +367,17 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { n++ } @@ -378,7 +397,7 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index b80a1db61..df97255e4 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors @@ -200,6 +200,15 @@ func splitHostPort(hostport string) (host string, port int) { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index ea504e396..d62ce44b0 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -35,14 +35,14 @@ func (l *Labeler) Get() []attribute.KeyValue { type labelerContextKeyType int -const lablelerContextKey labelerContextKeyType = 0 +const labelerContextKey labelerContextKeyType = 0 // ContextWithLabeler returns a new context with the provided Labeler instance. // Attributes added to the specified labeler will be injected into metrics // emitted by the instrumentation. Only one labeller can be injected into the // context. Injecting it multiple times will override the previous calls. func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { - return context.WithValue(parent, lablelerContextKey, l) + return context.WithValue(parent, labelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if @@ -50,7 +50,7 @@ func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { // Labeler is returned and the second return value is false. In this case it is // safe to use the Labeler but any attributes added to it will not be used. func LabelerFromContext(ctx context.Context) (*Labeler, bool) { - l, ok := ctx.Value(lablelerContextKey).(*Labeler) + l, ok := ctx.Value(labelerContextKey).(*Labeler) if !ok { l = &Labeler{} } diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b..44b86ad86 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 16ef3cb9b..6be4c1fde 100644 --- a/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,13 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.57.0" + return "0.61.0" // This string is updated by the pre_release.sh script during release } - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/tools/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/tools/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 000000000..128d61a22 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/tools/vendor/go.opentelemetry.io/otel/.gitignore b/tools/vendor/go.opentelemetry.io/otel/.gitignore index ae8577ef3..749e8e881 100644 --- a/tools/vendor/go.opentelemetry.io/otel/.gitignore +++ b/tools/vendor/go.opentelemetry.io/otel/.gitignore @@ -1,6 +1,7 @@ .DS_Store Thumbs.db +.cache/ .tools/ venv/ .idea/ diff --git a/tools/vendor/go.opentelemetry.io/otel/.golangci.yml b/tools/vendor/go.opentelemetry.io/otel/.golangci.yml index dbfb2a165..5f69cc027 100644 --- a/tools/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/tools/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -1,13 +1,9 @@ -# See https://github.com/golangci/golangci-lint#config-file +version: "2" run: - issues-exit-code: 1 #Default - tests: true #Default - + issues-exit-code: 1 + tests: true linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. + default: none enable: - asasalint - bodyclose @@ -15,295 +11,240 @@ linters: - errcheck - errorlint - godot - - gofumpt - - goimports - gosec - - gosimple - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - - tenv - testifylint - - typecheck - unconvert - - unused - unparam - + - unused + - usestdlibvars + - usetesting + settings: + depguard: + rules: + auto/sdk: + files: + - '!internal/global/trace.go' + - ~internal/global/trace_test.go + deny: + - pkg: go.opentelemetry.io/auto/sdk + desc: Do not use SDK from automatic instrumentation. + non-tests: + files: + - '!$test' + - '!**/*test/*.go' + - '!**/internal/matchers/*.go' + deny: + - pkg: testing + - pkg: github.com/stretchr/testify + - pkg: crypto/md5 + - pkg: crypto/sha1 + - pkg: crypto/**/pkix + otel-internal: + files: + - '**/sdk/*.go' + - '**/sdk/**/*.go' + - '**/exporters/*.go' + - '**/exporters/**/*.go' + - '**/schema/*.go' + - '**/schema/**/*.go' + - '**/metric/*.go' + - '**/metric/**/*.go' + - '**/bridge/*.go' + - '**/bridge/**/*.go' + - '**/trace/*.go' + - '**/trace/**/*.go' + - '**/log/*.go' + - '**/log/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/internal$ + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/internaltest + desc: Do not use cross-module internal packages. + otlp-internal: + files: + - '!**/exporters/otlp/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/internal + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - '!**/exporters/otlp/otlpmetric/internal/*.go' + - '!**/exporters/otlp/otlpmetric/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - '!**/exporters/otlp/otlptrace/*.go' + - '!**/exporters/otlp/otlptrace/internal/**.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - ^[ ]*[-•] + # Exclude sentences prefixing a list. + - :$ + misspell: + locale: US + ignore-rules: + - cancelled + perfsprint: + int-conversion: true + err-error: true + errorf: true + sprintf1: true + strconcat: true + revive: + confidence: 0.01 + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + disabled: true + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - - call-chain + - loop + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - sayRepetitiveInsteadOfStutters + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - /^[^\n]*$/ + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - preserveScope + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + - fmt.Print + - fmt.Printf + - fmt.Println + - name: unnecessary-stmt + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error + exclusions: + generated: lax + presets: + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - linters: + - revive + path: .*internal/.* + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Yes, they are, but it's okay in a test. + - linters: + - revive + path: _test\.go + text: exported func.*returns unexported type.*which can be annoying to use + # Example test functions should be treated like main. + - linters: + - revive + path: example.*_test\.go + text: calls to (.+) only in main[(][)] or init[(][)] functions + # It's okay to not run gosec and perfsprint in a test. + - linters: + - gosec + - perfsprint + path: _test\.go + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - linters: + - gosec + text: 'G404:' + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - linters: + - gosec + text: 'G402: TLS MinVersion too low.' + paths: + - third_party$ + - builtin$ + - examples$ issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - # It's okay to not run gosec in a test. - - path: _test\.go - linters: - - gosec - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - text: "G404:" - linters: - - gosec - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - text: "G402: TLS MinVersion too low." - linters: - - gosec - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - rules: - non-tests: - files: - - "!$test" - - "!**/*test/*.go" - - "!**/internal/matchers/*.go" - deny: - - pkg: "testing" - - pkg: "github.com/stretchr/testify" - - pkg: "crypto/md5" - - pkg: "crypto/sha1" - - pkg: "crypto/**/pkix" - otlp-internal: - files: - - "!**/exporters/otlp/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - "!**/exporters/otlp/otlptrace/*.go" - - "!**/exporters/otlp/otlptrace/internal/**.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - "!**/exporters/otlp/otlpmetric/internal/*.go" - - "!**/exporters/otlp/otlpmetric/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" - desc: Do not use cross-module internal packages. - otel-internal: - files: - - "**/sdk/*.go" - - "**/sdk/**/*.go" - - "**/exporters/*.go" - - "**/exporters/**/*.go" - - "**/schema/*.go" - - "**/schema/**/*.go" - - "**/metric/*.go" - - "**/metric/**/*.go" - - "**/bridge/*.go" - - "**/bridge/**/*.go" - - "**/trace/*.go" - - "**/trace/**/*.go" - - "**/log/*.go" - - "**/log/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/internal$" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/attribute" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/internaltest" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/matchers" - desc: Do not use cross-module internal packages. - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - - name: context-as-argument - disabled: true - arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - - name: defer - disabled: false - arguments: - - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - - name: exported - disabled: false - arguments: - - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - - name: string-format - disabled: false - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - - name: unhandled-error - disabled: false - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - - name: waitgroup-by-value - disabled: false - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error +formatters: + enable: + - gofumpt + - goimports + - golines + settings: + goimports: + local-prefixes: + - go.opentelemetry.io + golines: + max-len: 120 + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 8f68dbd04..4acc75701 100644 --- a/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,193 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + +## [1.36.0/0.58.0/0.12.0] 2025-05-20 + +### Added + +- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) +- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. + The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) +- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) +- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) +- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) +- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) +- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. + The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) +- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) +- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) + +### Removed + +- Drop support for [Go 1.22]. (#6381, #6418) +- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) +- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) +- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) + +### Changed + +- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) +- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) +- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) +- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) +- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) +- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) + +### Deprecated + +- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) + +### Fixes + +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) +- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) +- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) + +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + ## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 ### Added @@ -3156,7 +3343,14 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 +[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 @@ -3245,6 +3439,7 @@ It contains api and sdk for trace and meter. +[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 diff --git a/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 22a2e9dbd..f9ddc281f 100644 --- a/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -181,6 +180,18 @@ patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -631,17 +642,18 @@ should be canceled. ### Triagers +- [Alex Kats](https://github.com/akats7), Capital One - [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent ### Approvers ### Maintainers -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) ### Emeritus diff --git a/tools/vendor/go.opentelemetry.io/otel/Makefile b/tools/vendor/go.opentelemetry.io/otel/Makefile index b8292a4fb..4fa423ca0 100644 --- a/tools/vendor/go.opentelemetry.io/otel/Makefile +++ b/tools/vendor/go.opentelemetry.io/otel/Makefile @@ -11,11 +11,15 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} GO = go TIMEOUT = 60 +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -39,8 +43,11 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +VERIFYREADMES = $(TOOLS)/verifyreadmes +$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes + GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell @@ -64,7 +71,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -81,20 +88,20 @@ PIP := $(PYTOOLS)/pip WORKDIR := /workdir # The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) # Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" # Install python packages into the virtual environment. $(PYTOOLS)/%: $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell @@ -119,7 +126,7 @@ vanity-import-fix: $(PORTO) # Generate go.work file for local development. .PHONY: go-work go-work: $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 # Build @@ -209,11 +216,8 @@ go-mod-tidy/%: crosslink && cd $(DIR) \ && $(GO) mod tidy -compat=1.21 -.PHONY: lint-modules -lint-modules: go-mod-tidy - .PHONY: lint -lint: misspell lint-modules golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: $(PORTO) @@ -235,6 +239,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -255,14 +269,31 @@ check-clean-work-tree: exit 1; \ fi +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) @@ -288,10 +319,11 @@ add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} +MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes -verify-readmes: - ./verify_readmes.sh +verify-readmes: $(VERIFYREADMES) + $(VERIFYREADMES) diff --git a/tools/vendor/go.opentelemetry.io/otel/README.md b/tools/vendor/go.opentelemetry.io/otel/README.md index efec27890..5fa1b75c6 100644 --- a/tools/vendor/go.opentelemetry.io/otel/README.md +++ b/tools/vendor/go.opentelemetry.io/otel/README.md @@ -1,9 +1,13 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -49,20 +53,20 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | -| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | -| Linux | 1.23 | arm64 | -| Linux | 1.22 | arm64 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | +| macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | +| Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | +| Windows | 1.24 | 386 | | Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/tools/vendor/go.opentelemetry.io/otel/RELEASING.md b/tools/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b6125..1ddcdef03 100644 --- a/tools/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/tools/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,21 +1,22 @@ # Release Process +## Create a `Version Release` issue + +Create a `Version Release` issue to track the release process. + ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. -1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.21.0" # Change to the release version you are generating. -export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. ``` This should create a new sub-package of [`semconv`](./semconv). @@ -111,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository @@ -126,10 +150,24 @@ Importantly, bump any package versions referenced to be the latest one you just [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go +### Close the milestone + +Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. +This helps track what changes were included in each release. + +- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) +- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). + +Once all related issues and PRs have been added to the milestone, close the milestone. + ### Demo Repository Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) + +### Close the `Version Release` issue + +Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md b/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362..b8cb605c1 100644 --- a/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go b/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go index be9cd922d..3eeaa5d44 100644 --- a/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -19,7 +19,7 @@ func NewAllowKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return false } } - allowed := make(map[Key]struct{}) + allowed := make(map[Key]struct{}, len(keys)) for _, k := range keys { allowed[k] = struct{}{} } @@ -38,7 +38,7 @@ func NewDenyKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return true } } - forbid := make(map[Key]struct{}) + forbid := make(map[Key]struct{}, len(keys)) for _, k := range keys { forbid[k] = struct{}{} } diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go similarity index 68% rename from tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go rename to tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 822d84794..b76d2bbfd 100644 --- a/tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/tools/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -5,7 +5,7 @@ Package attribute provide several helper functions for some commonly used logic of processing attributes. */ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" +package attribute // import "go.opentelemetry.io/otel/attribute/internal" import ( "reflect" @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go new file mode 100644 index 000000000..5791c6e7a --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "math" +) + +func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func rawToBool(r uint64) bool { + return r != 0 +} + +func int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func rawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func rawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} diff --git a/tools/vendor/go.opentelemetry.io/otel/attribute/value.go b/tools/vendor/go.opentelemetry.io/otel/attribute/value.go index 9ea0ecbbd..817eecacf 100644 --- a/tools/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/tools/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -9,8 +9,7 @@ import ( "reflect" "strconv" - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" + attribute "go.opentelemetry.io/otel/attribute/internal" ) //go:generate stringer -type=Type @@ -51,7 +50,7 @@ const ( func BoolValue(v bool) Value { return Value{ vtype: BOOL, - numeric: internal.BoolToRaw(v), + numeric: boolToRaw(v), } } @@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value { func Int64Value(v int64) Value { return Value{ vtype: INT64, - numeric: internal.Int64ToRaw(v), + numeric: int64ToRaw(v), } } @@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value { func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), + numeric: float64ToRaw(v), } } @@ -125,7 +124,7 @@ func (v Value) Type() Type { // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) + return rawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is @@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool { // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) + return rawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is @@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 { // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) + return rawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is diff --git a/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f536703..0e1fe2422 100644 --- a/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/tools/vendor/go.opentelemetry.io/otel/codes/codes.go b/tools/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac354..49a35b122 100644 --- a/tools/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/tools/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/tools/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/tools/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 000000000..935bd4876 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,4 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python +FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver +FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 4571a5ca3..ca4544f0d 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package tracetransform provides conversion functionality for the otlptrace +// exporters. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 2171bee3c..8409b5f8f 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -294,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 4abf48d1f..6eacdf311 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 97cd6c54f..b6e6b10fb 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracegrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a94..1d840be20 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 3ee452ef7..506ca00b6 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -98,7 +101,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index 3d4f699d4..918490387 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 38b97a013..ba6e41183 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index a12ea4c48..1c4659423 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 1c5450ab6..777e68a7b 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 00ab1f20c..2da229870 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index c76bedfb1..5f78bfdfb 100644 --- a/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.32.0" + return "1.36.0" } diff --git a/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 93e80ea30..000000000 --- a/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/gen.go b/tools/vendor/go.opentelemetry.io/otel/internal/gen.go deleted file mode 100644 index 4259f0320..000000000 --- a/tools/vendor/go.opentelemetry.io/otel/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go index c657ff8e7..2e47b2964 100644 --- a/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global provides the OpenTelemetry global API. package global // import "go.opentelemetry.io/otel/internal/global" import ( diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dca..adb37b5b0 100644 --- a/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go index ac65262c6..49e4ac4fa 100644 --- a/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -145,6 +146,41 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF +// uprobe to this code. +// +// "noinline" pragma prevents the method from ever being inlined. +// +//go:noinline +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index b2fe3e41d..000000000 --- a/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func RawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - // Assumes original was a valid *float64 (overflow not checked). - return (*float64)(unsafe.Pointer(r)) // nolint: gosec -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - // Assumes original was a valid *int64 (overflow not checked). - return (*int64)(unsafe.Pointer(r)) // nolint: gosec -} diff --git a/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f2..b7fc973a6 100644 --- a/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef1..4404b71a2 100644 --- a/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go b/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e1..9f48d5f11 100644 --- a/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/tools/vendor/go.opentelemetry.io/otel/metric/meter.go b/tools/vendor/go.opentelemetry.io/otel/metric/meter.go index 14e08c24a..fdd2a7011 100644 --- a/tools/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/tools/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index ca6fcbdc0..9afb69e58 100644 --- a/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 552263ba7..ebda5026d 100644 --- a/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { } // Extract returns a copy of parent with the baggage from the carrier added. +// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked +// for multiple values extraction. Otherwise, Get is called. func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + if multiCarrier, ok := carrier.(ValuesGetter); ok { + return extractMultiBaggage(parent, multiCarrier) + } + return extractSingleBaggage(parent, carrier) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} + +func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent @@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context return baggage.ContextWithBaggage(parent, bag) } -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} +func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { + bVals := carrier.Values(baggageHeader) + if len(bVals) == 0 { + return parent + } + var members []baggage.Member + for _, bStr := range bVals { + currBag, err := baggage.Parse(bStr) + if err != nil { + continue + } + members = append(members, currBag.Members()...) + } + + b, err := baggage.New(members...) + if err != nil || b.Len() == 0 { + return parent + } + return baggage.ContextWithBaggage(parent, b) } diff --git a/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 8c8286aab..5c8c26ea2 100644 --- a/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -9,6 +9,7 @@ import ( ) // TextMapCarrier is the storage medium used by a TextMapPropagator. +// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -29,6 +30,18 @@ type TextMapCarrier interface { // must never be done outside of a new major release. } +// ValuesGetter can return multiple values for a single key, +// with contrast to TextMapCarrier.Get which returns a single value. +type ValuesGetter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Values returns all values associated with the passed key. + Values(key string) []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string @@ -55,14 +68,25 @@ func (c MapCarrier) Keys() []string { return keys } -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. type HeaderCarrier http.Header -// Get returns the value associated with the passed key. +// Compile time check that HeaderCarrier implements ValuesGetter. +var _ TextMapCarrier = HeaderCarrier{} + +// Compile time check that HeaderCarrier implements TextMapCarrier. +var _ ValuesGetter = HeaderCarrier{} + +// Get returns the first value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } +// Values returns all values associated with the passed key. +func (hc HeaderCarrier) Values(key string) []string { + return http.Header(hc).Values(key) +} + // Set stores the key-value pair. func (hc HeaderCarrier) Set(key string, value string) { http.Header(hc).Set(key, value) @@ -89,6 +113,8 @@ type TextMapPropagator interface { // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. + // Implementations may check if the carrier implements ValuesGetter, + // to support extraction of multiple values per key. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/tools/vendor/go.opentelemetry.io/otel/renovate.json b/tools/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13..fa5acf2d3 100644 --- a/tools/vendor/go.opentelemetry.io/otel/renovate.json +++ b/tools/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,8 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:recommended" + "config:best-practices", + "helpers:pinGitHubActionDigestsToSemver" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -15,10 +16,8 @@ "enabled": true }, { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], @@ -27,6 +26,10 @@ { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" + }, + { + "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], + "enabled": false } ] } diff --git a/tools/vendor/go.opentelemetry.io/otel/requirements.txt b/tools/vendor/go.opentelemetry.io/otel/requirements.txt index ab09daf9d..1bb55fb1c 100644 --- a/tools/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/tools/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.3.0 +codespell==2.4.1 diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 07923ed8d..e3309231d 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package env provides types and functionality for environment variable support +// in the OpenTelemetry SDK. package env // import "go.opentelemetry.io/otel/sdk/internal/env" import ( diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15..cefe4ab91 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a5..0d8619715 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type containerIDProvider func() (string, error) diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f05624..16a062ad8 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) const ( diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f65498..781903923 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type hostIDProvider func() (string, error) diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4fa..01b4d27a0 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type osDescriptionProvider func() (string, error) diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go index ce455dc54..3d703c5d9 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -5,6 +5,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "encoding/xml" + "errors" "fmt" "io" "os" @@ -63,7 +64,7 @@ func parsePlistFile(file io.Reader) (map[string]string, error) { } if len(v.Dict.Key) != len(v.Dict.String) { - return nil, fmt.Errorf("the number of and elements doesn't match") + return nil, errors.New("the number of and elements doesn't match") } properties := make(map[string]string, len(v.Dict.Key)) diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68fd..6712ce80d 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index ad4b50df4..09b91e1e1 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -21,11 +21,22 @@ import ( // Resources should be passed and stored as pointers // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. +// +// Note that the Go == operator compares not just the resource attributes but +// also all other internals of the Resource type. Therefore, Resource values +// should not be used as map or database keys. In general, the [Resource.Equal] +// method should be used instead of direct comparison with ==, since that +// method ensures the correct comparison of resource attributes, and the +// [attribute.Distinct] returned from [Resource.Equivalent] should be used for +// map and database keys instead. type Resource struct { attrs attribute.Set schemaURL string } +// Compile-time check that the Resource remains comparable. +var _ map[Resource]struct{} = nil + var ( defaultResource *Resource defaultResourceOnce sync.Once @@ -137,15 +148,19 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns true when a Resource is equivalent to this Resource. -func (r *Resource) Equal(eq *Resource) bool { +// Equal returns whether r and o represent the same resource. Two resources can +// be equal even if they have different schema URLs. +// +// See the documentation on the [Resource] type for the pitfalls of using == +// with Resource values; most code should use Equal instead. +func (r *Resource) Equal(o *Resource) bool { if r == nil { r = Empty() } - if eq == nil { - eq = Empty() + if o == nil { + o = Empty() } - return r.Equivalent() == eq.Equivalent() + return r.Equivalent() == o.Equivalent() } // Merge creates a new [Resource] by merging a and b. diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index ccc97e1b6..6966ed861 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "sync" "sync/atomic" "time" @@ -201,10 +202,9 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { } } - wait := make(chan error) + wait := make(chan error, 1) go func() { wait <- bsp.exportSpans(ctx) - close(wait) }() // Wait until the export is finished or the context is cancelled/timed out select { @@ -268,7 +268,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index 925bcf993..c8d3fb7e3 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - crand "crypto/rand" "encoding/binary" - "math/rand" - "sync" + "math/rand/v2" "go.opentelemetry.io/otel/trace" ) @@ -29,20 +27,15 @@ type IDGenerator interface { // must never be done outside of a new major release. } -type randomIDGenerator struct { - sync.Mutex - randSource *rand.Rand -} +type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { - gen.Lock() - defer gen.Unlock() sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -53,18 +46,17 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { - gen.Lock() - defer gen.Unlock() tid := trace.TraceID{} sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(tid[:]) + binary.NativeEndian.PutUint64(tid[:8], rand.Uint64()) + binary.NativeEndian.PutUint64(tid[8:], rand.Uint64()) if tid.IsValid() { break } } for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. } func defaultIDGenerator() IDGenerator { - gen := &randomIDGenerator{} - var rngSeed int64 - _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) - gen.randSource = rand.New(rand.NewSource(rngSeed)) - return gen + return &randomIDGenerator{} } diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 185aa7c08..0e2a2e7c6 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -169,7 +169,17 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) + global.Info( + "Tracer created", + "name", + name, + "version", + is.Version, + "schemaURL", + is.SchemaURL, + "attributes", + is.Attributes, + ) } return t } diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f7246..9b672a1d7 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index ebb6df6c9..aa7b262d0 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -47,12 +47,12 @@ const ( // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota - // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set. + // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag + // must not be set. RecordOnly - // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set. + // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag + // must be set. RecordAndSample ) diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 554111bb4..664e13e03 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -58,7 +58,7 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { var err error ssp.stopOnce.Do(func() { stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error) + done := make(chan error, 1) return done, func() { done <- exp.Shutdown(ctx) } } diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 17f883c2c..1785a4bbb 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 43419d3b5..0b65ae9ab 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -26,7 +26,11 @@ var _ trace.Tracer = &tracer{} // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { +func (tr *tracer) Start( + ctx context.Context, + name string, + options ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { @@ -112,7 +116,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo } // newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { +func (tr *tracer) newRecordingSpan( + psc, sc trace.SpanContext, + name string, + sr SamplingResult, + config *trace.SpanConfig, +) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() diff --git a/tools/vendor/go.opentelemetry.io/otel/sdk/version.go b/tools/vendor/go.opentelemetry.io/otel/sdk/version.go index 0b214d3fe..c0217af6b 100644 --- a/tools/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/tools/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package sdk provides the OpenTelemetry default SDK for Go. package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.32.0" + return "1.37.0" } diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md new file mode 100644 index 000000000..02b56115e --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md @@ -0,0 +1,4 @@ + +# Migration from v1.33.0 to v1.34.0 + +The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md new file mode 100644 index 000000000..fab06c975 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.34.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go new file mode 100644 index 000000000..5b5666257 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -0,0 +1,13851 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: az +const ( + // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic + // conventions. It represents the [Azure Resource Provider Namespace] as + // recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzNamespaceKey = attribute.Key("az.namespace") + + // AzServiceRequestIDKey is the attribute Key conforming to the + // "az.service_request_id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzServiceRequestIDKey = attribute.Key("az.service_request_id") +) + +// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" +// semantic conventions. It represents the [Azure Resource Provider Namespace] as +// recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzNamespace(val string) attribute.KeyValue { + return AzNamespaceKey.String(val) +} + +// AzServiceRequestID returns an attribute KeyValue conforming to the +// "az.service_request_id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzServiceRequestID(val string) attribute.KeyValue { + return AzServiceRequestIDKey.String(val) +} + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: development + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: development + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: development + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: development + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: development + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: development + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: development + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: development + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: development + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Deprecated: Use 'gcp.vertex_ai' instead. + GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") + // Deprecated: Use 'gcp.gemini' instead. + GenAISystemGemini = GenAISystemKey.String("gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Deprecated: Replaced by `output`. + GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") + // Deprecated: Replaced by `process`. + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") + // Deprecated: Replaced by `send`. + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("z_os") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // Deprecated: Removed, report shared memory usage with + // `metric.system.memory.shared` metric. + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // Deprecated: Replaced by `gitea`. + VCSProviderNameGittea = VCSProviderNameKey.String("gittea") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} \ No newline at end of file diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go new file mode 100644 index 000000000..2c5c7ebd0 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.34.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go new file mode 100644 index 000000000..88a998f1e --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go new file mode 100644 index 000000000..3c23d4592 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/auto.go b/tools/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 000000000..f3aa39813 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,662 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc, sc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &sc) + + span.sampled.Store(sampled) + span.spanContext = sc + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/config.go b/tools/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e00..9c0b720a4 100644 --- a/tools/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/tools/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 000000000..f663547b4 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 000000000..5debe90bb --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 000000000..7b1ae3c4e --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 000000000..f5e3a8cec --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 000000000..1798a702d --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 000000000..c2b4c635b --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 000000000..e7ca62c66 --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,472 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. + SpanKindInternal SpanKind = 1 + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. + SpanKindServer SpanKind = 2 + // SpanKindClient indicates that the span describes a request to some + // remote service. + SpanKindClient SpanKind = 3 + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// SpanEvent is a time-stamped annotation of the span, consisting of +// user-supplied text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 000000000..1039bf40c --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// StatusCode is the status of a Span. +// +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // StatusCodeUnset is the default status. + StatusCodeUnset StatusCode = 0 + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. + StatusCodeOK StatusCode = 1 + // StatusCodeError is used when the Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// Status defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 000000000..e5f10767c --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// ResourceSpans is a collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 000000000..ae9ce102a --- /dev/null +++ b/tools/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return string(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/tools/vendor/go.opentelemetry.io/otel/trace/noop.go b/tools/vendor/go.opentelemetry.io/otel/trace/noop.go index ca20e9997..0f56e4dbb 100644 --- a/tools/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/tools/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -82,4 +82,24 @@ func (noopSpan) AddLink(Link) {} func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +// +//go:noinline +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh deleted file mode 100644 index 1e87855ee..000000000 --- a/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) - -missingReadme=false -for dir in $dirs; do - if [ ! -f "$dir/README.md" ]; then - echo "couldn't find README.md for $dir" - missingReadme=true - fi -done - -if [ "$missingReadme" = true ] ; then - echo "Error: some READMEs couldn't be found." - exit 1 -fi diff --git a/tools/vendor/go.opentelemetry.io/otel/version.go b/tools/vendor/go.opentelemetry.io/otel/version.go index 59e248161..7afe92b59 100644 --- a/tools/vendor/go.opentelemetry.io/otel/version.go +++ b/tools/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.32.0" + return "1.37.0" } diff --git a/tools/vendor/go.opentelemetry.io/otel/versions.yaml b/tools/vendor/go.opentelemetry.io/otel/versions.yaml index c04b12f6b..9d4742a17 100644 --- a/tools/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/tools/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.32.0 + version: v1.37.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,20 +22,23 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.54.0 + version: v0.59.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.8.0 + version: v0.13.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.11 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index dd1b73f1e..892864ea6 100644 --- a/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR // All implementations must embed UnimplementedTraceServiceServer // for forward compatibility type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) mustEmbedUnimplementedTraceServiceServer() } diff --git a/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 852209b09..a7c5d19bf 100644 --- a/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -430,6 +430,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { return 0 } +// A reference to an Entity. +// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. +// +// Status: [Development] +type EntityRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_keys and description_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing {message}.attributes. + IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing {message}.attributes. + DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` +} + +func (x *EntityRef) Reset() { + *x = EntityRef{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityRef) ProtoMessage() {} + +func (x *EntityRef) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead. +func (*EntityRef) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *EntityRef) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +func (x *EntityRef) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EntityRef) GetIdKeys() []string { + if x != nil { + return x.IdKeys + } + return nil +} + +func (x *EntityRef) GetDescriptionKeys() []string { + if x != nil { + return x.DescriptionKeys + } + return nil +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -488,15 +583,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -511,13 +614,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope + (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -599,6 +703,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { return nil } } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ (*AnyValue_StringValue)(nil), @@ -615,7 +731,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index b7545b03b..eb7745d66 100644 --- a/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -48,6 +48,12 @@ type Resource struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + // + // Note: keys in the references MUST exist in attributes of this message. + // + // Status: [Development] + EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` } func (x *Resource) Reset() { @@ -96,6 +102,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (x *Resource) GetEntityRefs() []*v1.EntityRef { + if x != nil { + return x.EntityRefs + } + return nil +} + var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ @@ -106,7 +119,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, @@ -115,16 +128,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f, + 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -141,16 +159,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ - (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource - (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } diff --git a/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35b..b342a0a94 100644 --- a/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/tools/vendor/github.com/containers/common/LICENSE b/tools/vendor/go.podman.io/common/LICENSE similarity index 100% rename from tools/vendor/github.com/containers/common/LICENSE rename to tools/vendor/go.podman.io/common/LICENSE diff --git a/tools/vendor/github.com/containers/common/pkg/auth/auth.go b/tools/vendor/go.podman.io/common/pkg/auth/auth.go similarity index 95% rename from tools/vendor/github.com/containers/common/pkg/auth/auth.go rename to tools/vendor/go.podman.io/common/pkg/auth/auth.go index a3d333a99..8cb9f3a63 100644 --- a/tools/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/tools/vendor/go.podman.io/common/pkg/auth/auth.go @@ -10,15 +10,15 @@ import ( "path/filepath" "strings" - passwd "github.com/containers/common/pkg/password" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" + passwd "go.podman.io/common/pkg/password" + "go.podman.io/image/v5/docker" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/pkg/sysregistriesv2" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" ) // ErrNewCredentialsInvalid means that the new user-provided credentials are @@ -173,10 +173,10 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO if opts.StdinPassword { var stdinPasswordStrBuilder strings.Builder if opts.Password != "" { - return errors.New("Can't specify both --password-stdin and --password") + return errors.New("can't specify both --password-stdin and --password") } if opts.Username == "" { - return errors.New("Must provide --username with --password-stdin") + return errors.New("must provide --username with --password-stdin") } scanner := bufio.NewScanner(opts.Stdin) for scanner.Scan() { @@ -320,7 +320,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user return strings.TrimSpace(username), password, err } -// Logout implements a “log out” command with the provided opts and args +// Logout implements a “log out” command with the provided opts and args. func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error { if err := CheckAuthFile(opts.AuthFile); err != nil { return err @@ -390,7 +390,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri } // defaultRegistryWhenUnspecified returns first registry from search list of registry.conf -// used by login/logout when registry argument is not specified +// used by login/logout when registry argument is not specified. func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) if err != nil { diff --git a/tools/vendor/github.com/containers/common/pkg/auth/cli.go b/tools/vendor/go.podman.io/common/pkg/auth/cli.go similarity index 96% rename from tools/vendor/github.com/containers/common/pkg/auth/cli.go rename to tools/vendor/go.podman.io/common/pkg/auth/cli.go index 60e02e51e..3546ccbeb 100644 --- a/tools/vendor/github.com/containers/common/pkg/auth/cli.go +++ b/tools/vendor/go.podman.io/common/pkg/auth/cli.go @@ -3,13 +3,13 @@ package auth import ( "io" - "github.com/containers/common/pkg/completion" "github.com/spf13/pflag" + "go.podman.io/common/pkg/completion" ) // LoginOptions represents common flags in login // In addition, the caller should probably provide a --tls-verify flag (that affects the provided -// *types.SystemContest) +// *types.SystemContest). type LoginOptions struct { // CLI flags managed by the FlagSet returned by GetLoginFlags // Callers that use GetLoginFlags should not need to touch these values at all; callers that use @@ -30,7 +30,7 @@ type LoginOptions struct { NoWriteBack bool // set to true to not write the credentials to the authfile/cred helpers } -// LogoutOptions represents the results for flags in logout +// LogoutOptions represents the results for flags in logout. type LogoutOptions struct { // CLI flags managed by the FlagSet returned by GetLogoutFlags // Callers that use GetLogoutFlags should not need to touch these values at all; callers that use @@ -44,7 +44,7 @@ type LogoutOptions struct { AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry } -// GetLoginFlags defines and returns login flags for containers tools +// GetLoginFlags defines and returns login flags for containers tools. func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { fs := pflag.FlagSet{} fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") @@ -58,7 +58,7 @@ func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { return &fs } -// GetLoginFlagsCompletions returns the FlagCompletions for the login flags +// GetLoginFlagsCompletions returns the FlagCompletions for the login flags. func GetLoginFlagsCompletions() completion.FlagCompletions { flagCompletion := completion.FlagCompletions{} flagCompletion["authfile"] = completion.AutocompleteDefault @@ -69,7 +69,7 @@ func GetLoginFlagsCompletions() completion.FlagCompletions { return flagCompletion } -// GetLogoutFlags defines and returns logout flags for containers tools +// GetLogoutFlags defines and returns logout flags for containers tools. func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { fs := pflag.FlagSet{} fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") @@ -78,7 +78,7 @@ func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { return &fs } -// GetLogoutFlagsCompletions returns the FlagCompletions for the logout flags +// GetLogoutFlagsCompletions returns the FlagCompletions for the logout flags. func GetLogoutFlagsCompletions() completion.FlagCompletions { flagCompletion := completion.FlagCompletions{} flagCompletion["authfile"] = completion.AutocompleteDefault diff --git a/tools/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/tools/vendor/go.podman.io/common/pkg/capabilities/capabilities.go similarity index 70% rename from tools/vendor/github.com/containers/common/pkg/capabilities/capabilities.go rename to tools/vendor/go.podman.io/common/pkg/capabilities/capabilities.go index 43fd2c1b5..d88d747ff 100644 --- a/tools/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/tools/vendor/go.podman.io/common/pkg/capabilities/capabilities.go @@ -9,20 +9,13 @@ import ( "errors" "fmt" "slices" - "sort" "strings" "sync" - "github.com/syndtr/gocapability/capability" + "github.com/moby/sys/capability" ) var ( - // Used internally and populated during init(). - capabilityList []string - - // Used internally and populated during init(). - capsList []capability.Cap - // ErrUnknownCapability is thrown when an unknown capability is processed. ErrUnknownCapability = errors.New("unknown capability") @@ -35,67 +28,67 @@ var ( // Useful on the CLI for `--cap-add=all` etc. const All = "ALL" -func getCapName(c capability.Cap) string { +func capName(c capability.Cap) string { return "CAP_" + strings.ToUpper(c.String()) } -func init() { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND +// capStrList returns all capabilities supported by the currently running kernel, +// or an error if the list can not be obtained. +var capStrList = sync.OnceValues(func() ([]string, error) { + list, err := capability.ListSupported() + if err != nil { + return nil, err } - for _, cap := range capability.List() { - if cap > last { - continue - } - capsList = append(capsList, cap) - capabilityList = append(capabilityList, getCapName(cap)) - sort.Strings(capabilityList) + caps := make([]string, len(list)) + for i, c := range list { + caps[i] = capName(c) } -} - -var ( - boundingSetOnce sync.Once - boundingSetRet []string - boundingSetErr error -) + slices.Sort(caps) + return caps, nil +}) -// BoundingSet returns the capabilities in the current bounding set +// BoundingSet returns the capabilities in the current bounding set. func BoundingSet() ([]string, error) { - boundingSetOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - boundingSetErr = err - return - } - err = currentCaps.Load() - if err != nil { - boundingSetErr = err - return - } - var r []string - for _, c := range capsList { - if !currentCaps.Get(capability.BOUNDING, c) { - continue - } - r = append(r, getCapName(c)) - } - boundingSetRet = r - sort.Strings(boundingSetRet) - boundingSetErr = err - }) - return boundingSetRet, boundingSetErr + return boundingSet() } -// AllCapabilities returns all known capabilities. +var boundingSet = sync.OnceValues(func() ([]string, error) { + currentCaps, err := capability.NewPid2(0) + if err != nil { + return nil, err + } + err = currentCaps.Load() + if err != nil { + return nil, err + } + list, err := capability.ListSupported() + if err != nil { + return nil, err + } + var r []string + for _, c := range list { + if !currentCaps.Get(capability.BOUNDING, c) { + continue + } + r = append(r, capName(c)) + } + slices.Sort(r) + return r, nil +}) + +// AllCapabilities returns all capabilities supported by the running kernel. func AllCapabilities() []string { - return capabilityList + list, _ := capStrList() + return list } // NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet // present). func NormalizeCapabilities(caps []string) ([]string, error) { + all, err := capStrList() + if err != nil { + return nil, err + } normalized := make([]string, 0, len(caps)) for _, c := range caps { c = strings.ToUpper(c) @@ -106,19 +99,23 @@ func NormalizeCapabilities(caps []string) ([]string, error) { if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } - if !slices.Contains(capabilityList, c) { + if !slices.Contains(all, c) { return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability) } normalized = append(normalized, c) } - sort.Strings(normalized) + slices.Sort(normalized) return normalized, nil } // ValidateCapabilities validates if caps only contains valid capabilities. func ValidateCapabilities(caps []string) error { + all, err := capStrList() + if err != nil { + return err + } for _, c := range caps { - if !slices.Contains(capabilityList, c) { + if !slices.Contains(all, c) { return fmt.Errorf("%q: %w", c, ErrUnknownCapability) } } @@ -130,7 +127,7 @@ func ValidateCapabilities(caps []string) error { // // Note that: // "ALL" in capAdd adds returns known capabilities -// "All" in capDrop returns only the capabilities specified in capAdd +// "All" in capDrop returns only the capabilities specified in capAdd. func MergeCapabilities(base, adds, drops []string) ([]string, error) { // Normalize the base capabilities base, err := NormalizeCapabilities(base) @@ -155,7 +152,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return nil, errors.New("adding all caps and removing all caps not allowed") } // "Drop" all capabilities; return what's in capAdd instead - sort.Strings(capAdd) + slices.Sort(capAdd) return capAdd, nil } @@ -195,6 +192,6 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { } caps = append(caps, cap) } - sort.Strings(caps) + slices.Sort(caps) return caps, nil } diff --git a/tools/vendor/github.com/containers/common/pkg/completion/completion.go b/tools/vendor/go.podman.io/common/pkg/completion/completion.go similarity index 94% rename from tools/vendor/github.com/containers/common/pkg/completion/completion.go rename to tools/vendor/go.podman.io/common/pkg/completion/completion.go index 908d568ff..fef95b7f3 100644 --- a/tools/vendor/github.com/containers/common/pkg/completion/completion.go +++ b/tools/vendor/go.podman.io/common/pkg/completion/completion.go @@ -6,11 +6,11 @@ import ( "strings" "unicode" - "github.com/containers/common/pkg/capabilities" "github.com/spf13/cobra" + "go.podman.io/common/pkg/capabilities" ) -// FlagCompletions - hold flag completion functions to be applied later with CompleteCommandFlags() +// FlagCompletions - hold flag completion functions to be applied later with CompleteCommandFlags(). type FlagCompletions map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) // CompleteCommandFlags - Add completion functions for each flagname in FlagCompletions. @@ -22,7 +22,7 @@ func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) { /* Autocomplete Functions for cobra ValidArgsFunction */ -// AutocompleteNone - Block the default shell completion (no paths) +// AutocompleteNone - Block the default shell completion (no paths). func AutocompleteNone(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { return nil, cobra.ShellCompDirectiveNoFileComp } @@ -61,7 +61,7 @@ func AutocompleteCapabilities(_ *cobra.Command, _ []string, toComplete string) ( return completions, cobra.ShellCompDirectiveNoFileComp } -// autocompleteSubIDName - autocomplete the names in /etc/subuid or /etc/subgid +// autocompleteSubIDName - autocomplete the names in /etc/subuid or /etc/subgid. func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective) { file, err := os.Open(filename) if err != nil { @@ -92,7 +92,7 @@ func AutocompleteSubuidName(_ *cobra.Command, _ []string, _ string) ([]string, c return autocompleteSubIDName("/etc/subuid") } -// AutocompleteArch - Autocomplete platform supported by container engines +// AutocompletePlatform - Autocomplete platform supported by container engines. func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { completions := []string{ "linux/386", @@ -114,7 +114,7 @@ func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cob return completions, cobra.ShellCompDirectiveNoFileComp } -// AutocompleteArch - Autocomplete architectures supported by container engines +// AutocompleteArch - Autocomplete architectures supported by container engines. func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { completions := []string{ "386", @@ -134,19 +134,19 @@ func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.S return completions, cobra.ShellCompDirectiveNoFileComp } -// AutocompleteOS - Autocomplete OS supported by container engines +// AutocompleteOS - Autocomplete OS supported by container engines. func AutocompleteOS(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { completions := []string{"linux", "windows"} return completions, cobra.ShellCompDirectiveNoFileComp } // AutocompleteJSONFormat - Autocomplete format flag option. -// -> "json" +// -> "json". func AutocompleteJSONFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { return []string{"json"}, cobra.ShellCompDirectiveNoFileComp } -// AutocompleteOneArg - Autocomplete one random arg +// AutocompleteOneArg - Autocomplete one random arg. func AutocompleteOneArg(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) { if len(args) == 1 { return nil, cobra.ShellCompDirectiveDefault diff --git a/tools/vendor/github.com/containers/common/pkg/password/password_supported.go b/tools/vendor/go.podman.io/common/pkg/password/password_supported.go similarity index 100% rename from tools/vendor/github.com/containers/common/pkg/password/password_supported.go rename to tools/vendor/go.podman.io/common/pkg/password/password_supported.go diff --git a/tools/vendor/github.com/containers/common/pkg/password/password_windows.go b/tools/vendor/go.podman.io/common/pkg/password/password_windows.go similarity index 100% rename from tools/vendor/github.com/containers/common/pkg/password/password_windows.go rename to tools/vendor/go.podman.io/common/pkg/password/password_windows.go diff --git a/tools/vendor/github.com/containers/image/v5/LICENSE b/tools/vendor/go.podman.io/image/v5/LICENSE similarity index 100% rename from tools/vendor/github.com/containers/image/v5/LICENSE rename to tools/vendor/go.podman.io/image/v5/LICENSE diff --git a/tools/vendor/go.podman.io/image/v5/copy/blob.go b/tools/vendor/go.podman.io/image/v5/copy/blob.go new file mode 100644 index 000000000..a881e0623 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/blob.go @@ -0,0 +1,187 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/private" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.options.Progress channel, if required. + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.options.Progress, + ic.c.options.ProgressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) + } + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != nil && err != io.EOF { + return n, fmt.Errorf("happened during read: %w", err) + } + return n, err +} + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/compression.go b/tools/vendor/go.podman.io/image/v5/copy/compression.go new file mode 100644 index 000000000..0ecc85186 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/compression.go @@ -0,0 +1,434 @@ +package copy + +import ( + "errors" + "fmt" + "io" + "maps" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + internalblobinfocache "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/compression" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" + chunkedToc "go.podman.io/storage/pkg/chunked/toc" +) + +var ( + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip + + // compressionBufferSize is the buffer size used to compress a blob + compressionBufferSize = 1048576 + + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed + // using the algorithm that the media type says it should be compressed with + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + } +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + stream.reader = reader + + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorBaseVariantName = format.BaseVariantName() + } else { + res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed + } + + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +type bpcOperation int + +const ( + bpcOpInvalid bpcOperation = iota + bpcOpPreserveOpaque // We are preserving something where compression is not applicable + bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer + bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer + bpcOpCompressUncompressed // We are compressing uncompressed data + bpcOpRecompressCompressed // We are recompressing compressed data + bpcOpDecompressCompressed // We are decompressing compressed data +) + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + // We can’t do anything with an encrypted blob unless decrypted. + logrus.Debugf("Using original blob without modification for encrypted blob") + return &bpCompressionStepData{ + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.compressionFormat != nil { + uploadedAlgorithm = ic.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + return &bpCompressionStepData{ + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + succeeded = true + return &bpCompressionStepData{ + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var bpcOp bpcOperation + var uploadedOp types.LayerCompression + var algorithm *compressiontypes.Algorithm + switch { + case !layerCompressionChangeSupported: + bpcOp = bpcOpPreserveOpaque + uploadedOp = types.PreserveOriginal + algorithm = nil + case detected.isCompressed: + bpcOp = bpcOpPreserveCompressed + uploadedOp = types.PreserveOriginal + algorithm = &detected.format + default: + bpcOp = bpcOpPreserveUncompressed + uploadedOp = types.Decompress + algorithm = nil + } + return &bpCompressionStepData{ + operation: bpcOp, + uploadedOperation: uploadedOp, + uploadedAlgorithm: algorithm, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + // We only record the base variant of the format on upload; we didn’t do anything with + // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger + // reuse of any kind between the blob digest and the TOC digest. + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.uploadedOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, d.uploadedAnnotations) +} + +// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// and the original srcInfo (which the caller guarantees has been validated). +// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != bpcOpPreserve*, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because we set stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case bpcOpPreserveOpaque: + // No useful information + case bpcOpCompressUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + if d.uploadedAnnotations != nil { + tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations) + if err != nil { + return fmt.Errorf("parsing just-created compression annotations: %w", err) + } + if tocDigest != nil { + c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest) + } + } + case bpcOpDecompressCompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + case bpcOpRecompressCompressed, bpcOpPreserveCompressed: + // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest, + // and we don’t know that one. + // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless + // RecordDigestUncompressedPair was called for both compressed variants for some other reason). + case bpcOpPreserveUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest) + case bpcOpInvalid: + fallthrough + default: + return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) + } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) + } + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + // If the source is already using some TOC-dependent variant, we either copied the + // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, + // so record neither the variant name, nor the TOC digest. + c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/copy.go b/tools/vendor/go.podman.io/image/v5/copy/copy.go new file mode 100644 index 000000000..eed5f8d96 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/copy.go @@ -0,0 +1,417 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "slices" + "time" + + encconfig "github.com/containers/ocicrypt/config" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + internalblobinfocache "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/internal/imagedestination" + "go.podman.io/image/v5/internal/imagesource" + internalManifest "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/blobinfocache" + compression "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/signature" + "go.podman.io/image/v5/signature/signer" + "go.podman.io/image/v5/transports" + "go.podman.io/image/v5/types" + "golang.org/x/sync/semaphore" + "golang.org/x/term" +) + +var ( + // ErrDecryptParamsMissing is returned if there is missing decryption parameters + ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") + + // maxParallelDownloads is used to limit the maximum number of parallel + // downloads. Let's follow Firefox by limiting it to 6. + maxParallelDownloads = uint(6) +) + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature. + // Signers to use to add signatures during the copy. + // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row. + Signers []*signer.Signer + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`. + SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination + + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue, + // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time) + // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers). + // This only affects CopySystemImage. + PreferGzipInstances types.OptionalBool + + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. + MaxParallelDownloads uint + + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool + + // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform + // in the manifest list, a variant with the requested compression will exist. + // Invalid when copying a non-multi-architecture image. That will probably + // change in the future. + EnsureCompressionVariantsExist []OptionCompressionVariant + // ForceCompressionFormat ensures that the compression algorithm set in + // DestinationCtx.CompressionFormat is used exclusively, and blobs of other + // compression algorithms are not reused. + ForceCompressionFormat bool + + // ReportResolvedReference, if set, asks the destination transport to store + // a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Most transports don’t support this, and cause the value to be set to nil. + // + // For the containers-storage: transport, the reference contains an image ID, + // so that storage.ResolveReference returns exactly the created image. + // WARNING: It is unspecified whether the reference also contains a reference.Named element. + ReportResolvedReference *types.ImageReference + + // DestinationTimestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + DestinationTimestamp *time.Time +} + +// OptionCompressionVariant allows to supply information about +// selected compression algorithm and compression level by the +// end-user. Refer to EnsureCompressionVariantsExist to know +// more about its usage. +type OptionCompressionVariant struct { + Algorithm compression.Algorithm + Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +// The owner must call close() when done. +type copier struct { + policyContext *signature.PolicyContext + dest private.ImageDestination + rawSource private.ImageSource + options *Options // never nil + + reportWriter io.Writer + progressOutput io.Writer + + unparsedToplevel *image.UnparsedImage // for rawSource + blobInfoCache internalblobinfocache.BlobInfoCache2 + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. +} + +// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions +func shouldRequireCompressionFormatMatch(options *Options) (bool, error) { + if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) { + return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format") + } + return options.ForceCompressionFormat, nil +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := io.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + // safeClose amends retErr with an error from c.Close(), if any. + safeClose := func(name string, c io.Closer) { + err := c.Close() + if err == nil { + return + } + // Do not use %w for err as we don't want it to be unwrapped by callers. + if retErr != nil { + retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr) + } else { + retErr = fmt.Errorf(" (%s: %s)", name, err.Error()) + } + } + + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) + } + dest := imagedestination.FromPublic(publicDest) + defer safeClose("dest", dest) + + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) + } + rawSource := imagesource.FromPublic(publicRawSource) + defer safeClose("src", rawSource) + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // Instead use printCopyInfo() to print single line "Copying ..." messages. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = io.Discard + } + + c := &copier{ + policyContext: policyContext, + dest: dest, + rawSource: rawSource, + options: options, + + reportWriter: reportWriter, + progressOutput: progressOutput, + + unparsedToplevel: image.UnparsedInstance(rawSource, nil), + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + } + defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := c.options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } + } else { + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if c.options.ConcurrentBlobCopiesSemaphore != nil { + if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer c.options.ConcurrentBlobCopiesSemaphore.Release(1) + } + } + + if err := c.setupSigners(); err != nil { + return nil, err + } + + multiImage, err := isMultiImage(ctx, c.unparsedToplevel) + if err != nil { + return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) + } + + if !multiImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // The simple case: just copy a single image. + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, err + } + copiedManifest = single.manifest + } else if c.options.ImageListSelection == CopySystemImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) + } + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) + } + instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx + if err != nil { + return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, fmt.Errorf("copying system image from manifest list: %w", err) + } + copiedManifest = single.manifest + } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch c.options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, err = c.copyMultipleImages(ctx); err != nil { + return nil, err + } + } + + if options.ReportResolvedReference != nil { + *options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport. + } + if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: c.unparsedToplevel, + ReportResolvedReference: options.ReportResolvedReference, + Timestamp: options.DestinationTimestamp, + }); err != nil { + return nil, fmt.Errorf("committing the finished image: %w", err) + } + + return copiedManifest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...any) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +// close tears down state owned by copier. +func (c *copier) close() { + for i, s := range c.signersToClose { + if err := s.Close(); err != nil { + logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err) + } + } +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return term.IsTerminal(int(f.Fd())) + } + return false +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/digesting_reader.go b/tools/vendor/go.podman.io/image/v5/copy/digesting_reader.go new file mode 100644 index 000000000..4c6ba82ee --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "fmt" + "hash" + "io" + + digest "github.com/opencontainers/go-digest" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/encryption.go b/tools/vendor/go.podman.io/image/v5/copy/encryption.go new file mode 100644 index 000000000..59aecf1a1 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/encryption.go @@ -0,0 +1,138 @@ +package copy + +import ( + "fmt" + "maps" + "slices" + "strings" + + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/types" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil { + return &bpDecryptionStepData{ + decrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + // DecryptLayer supposedly returns a digest of the decrypted stream. + // In practice, that value is never set in the current implementation. + // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key, + // i.e. it doesn’t authenticate the origin of the metadata in any way. + reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) + return &bpDecryptionStepData{ + decrypting: true, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil { + return &bpEncryptionStepData{ + encrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return fmt.Errorf("Unable to finalize encryption: %w", err) + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, encryptAnnotations) + return nil +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/manifest.go b/tools/vendor/go.podman.io/image/v5/copy/manifest.go new file mode 100644 index 000000000..2fce87a48 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/manifest.go @@ -0,0 +1,253 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + internalManifest "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/manifest" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} + +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included *set.Set[string] +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: set.New[string](), + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if !os.included.Contains(s) { + os.list = append(os.list, s) + os.included.Add(s) + } +} + +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { + destSupportedManifestMIMETypes = allManifestMIMETypes + } + + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) + supportedByDest := set.New[string]() + for _, t := range destSupportedManifestMIMETypes { + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue + } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue + } + supportedByDest.Add(t) + } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) + } + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") + } + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if supportedByDest.Contains(srcType) { + prioritizedTypes.append(srcType) + } + if in.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if we can't modify, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") + } + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return res, nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + // If we're forcing it, replace the list of supported types with the forced value. + if forcedListMIMEType != "" { + destSupportedMIMETypes = []string{forcedListMIMEType} + } + + prioritizedTypes := newOrderedSet() + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) + } + // Pick out the other list types that we support. + for _, t := range destSupportedMIMETypes { + if manifest.MIMETypeIsMultiImage(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + if len(prioritizedTypes.list) == 0 { + return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + selectedType := prioritizedTypes.list[0] + otherSupportedTypes := prioritizedTypes.list[1:] + if selectedType != currentListMIMEType { + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) + } else { + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) + } + // Done. + return selectedType, otherSupportedTypes, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/multiple.go b/tools/vendor/go.podman.io/image/v5/copy/multiple.go new file mode 100644 index 000000000..0594aa20f --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/multiple.go @@ -0,0 +1,354 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "maps" + "slices" + "sort" + "strings" + + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/image" + internalManifest "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/compression" +) + +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest + + // Fields which can be used by callers when operation + // is `instanceCopyCopy` + copyForceCompressionFormat bool + + // Fields which can be used by callers when operation + // is `instanceCopyClone` + cloneArtifactType string + cloneCompressionVariant OptionCompressionVariant + clonePlatform *imgspecv1.Platform + cloneAnnotations map[string]string +} + +// internal type only to make imgspecv1.Platform comparable +type platformComparable struct { + architecture string + os string + osVersion string + osFeatures string + variant string +} + +// Converts imgspecv1.Platform to a comparable format. +func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable { + if platform == nil { + return platformComparable{} + } + osFeatures := slices.Clone(platform.OSFeatures) + sort.Strings(osFeatures) + return platformComparable{architecture: platform.Architecture, + os: platform.OS, + // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now. + osFeatures: strings.Join(osFeatures, ","), + osVersion: platform.OSVersion, + variant: platform.Variant, + } +} + +// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests +func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) { + res := make(map[platformComparable]*set.Set[string]) + for _, instanceDigest := range instanceDigests { + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + platformSet, ok := res[platform] + if !ok { + platformSet = set.New[string]() + res[platform] = platformSet + } + platformSet.AddSeq(slices.Values(instanceDetails.ReadOnly.CompressionAlgorithmNames)) + } + return res, nil +} + +func validateCompressionVariantExists(input []OptionCompressionVariant) error { + for _, option := range input { + _, err := compression.AlgorithmByName(option.Algorithm.Name()) + if err != nil { + return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err) + } + } + return nil +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) { + res := []instanceCopy{} + if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 { + // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist` + // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances, + // EnsureCompressionVariantsExist asks for an instance with some compression, + // an instance with that compression already exists, but is not included in options.Instances. + // We might define the semantics and implement this in the future. + return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages") + } + err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist) + if err != nil { + return res, err + } + compressionsByPlatform, err := platformCompressionMap(list, instanceDigests) + if err != nil { + return nil, err + } + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + copyForceCompressionFormat: forceCompressionFormat, + }) + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + compressionList := compressionsByPlatform[platform] + for _, compressionVariant := range options.EnsureCompressionVariantsExist { + if !compressionList.Contains(compressionVariant.Algorithm.Name()) { + res = append(res, instanceCopy{ + op: instanceCopyClone, + sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, + cloneCompressionVariant: compressionVariant, + clonePlatform: instanceDetails.ReadOnly.Platform, + cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), + }) + // add current compression to the list so that we don’t create duplicate clones + compressionList.Add(compressionVariant.Algorithm.Name()) + } + } + } + return res, nil +} + +// copyMultipleImages copies some or all of an image list's instances, using +// c.policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest list: %w", err) + } + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) + } + updatedList := originalList.CloneInternal() + + sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copySingleImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := c.options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options) + if err != nil { + return nil, fmt.Errorf("preparing instances for copy: %w", err) + } + c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat}) + if err != nil { + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updated.manifestDigest, + UpdateSize: int64(len(updated.manifest)), + UpdateCompressionAlgorithms: updated.compressionAlgorithms, + UpdateMediaType: updated.manifestMIMEType}) + case instanceCopyClone: + logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{ + requireCompressionFormatMatch: true, + compressionFormat: &instance.cloneCompressionVariant.Algorithm, + compressionLevel: instance.cloneCompressionVariant.Level}) + if err != nil { + return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpAdd, + AddDigest: updated.manifestDigest, + AddSize: int64(len(updated.manifest)), + AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, + AddPlatform: instance.clonePlatform, + AddAnnotations: instance.cloneAnnotations, + AddCompressionAlgorithms: updated.compressionAlgorithms, + }) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) + } + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.EditInstances(instanceEdits); err != nil { + return nil, fmt.Errorf("updating manifest list: %w", err) + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + var attemptedList internalManifest.ListPublic = updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) + } + + return manifestList, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/progress_bars.go b/tools/vendor/go.podman.io/image/v5/copy/progress_bars.go new file mode 100644 index 000000000..533634682 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/progress_bars.go @@ -0,0 +1,177 @@ +package copy + +import ( + "context" + "fmt" + "io" + "math" + "time" + + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/types" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + current := decor.SizeB1024(s.Current) + total := decor.SizeB1024(s.Total) + refill := decor.SizeB1024(s.Refill) + if s.Total == 0 { + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill) + } + // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%") + if s.Refill == 0 { + return fmt.Sprintf("%.1f / %.1f", current, total) + } + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo() +// to print a single line instead. +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. + return nil, err + } + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + }, nil +} + +// printCopyInfo prints a "Copying ..." message on the copier if the output is +// set to `io.Discard`. In that case, the progress bars won't be rendered but +// we still want to indicate when blobs and configs are copied. +func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } +} + +// mark100PercentComplete marks the progress bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + // do not update the progress bar if there is a chunk with unknown length. + if c.Length == math.MaxUint64 { + return rc, errs, err + } + total += int64(c.Length) + } + s.bar.EwmaIncrInt64(total, time.Since(start)) + } + return rc, errs, err +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/progress_channel.go b/tools/vendor/go.podman.io/image/v5/copy/progress_channel.go new file mode 100644 index 000000000..f57646156 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/progress_channel.go @@ -0,0 +1,79 @@ +package copy + +import ( + "io" + "time" + + "go.podman.io/image/v5/types" +) + +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. +type progressReader struct { + source io.Reader + channel chan<- types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastUpdate time.Time + offset uint64 + offsetUpdate uint64 +} + +// newProgressReader creates a new progress reader for: +// `source`: The source when internally reading bytes +// `channel`: The reporter channel to which the progress will be sent +// `interval`: The update interval to indicate how often the progress should update +// `artifact`: The blob metadata which is currently being progressed +func newProgressReader( + source io.Reader, + channel chan<- types.ProgressProperties, + interval time.Duration, + artifact types.BlobInfo, +) *progressReader { + // The progress reader constructor informs the progress channel + // that a new artifact will be read + channel <- types.ProgressProperties{ + Event: types.ProgressEventNewArtifact, + Artifact: artifact, + } + return &progressReader{ + source: source, + channel: channel, + interval: interval, + artifact: artifact, + lastUpdate: time.Now(), + offset: 0, + offsetUpdate: 0, + } +} + +// reportDone indicates to the internal channel that the progress has been +// finished +func (r *progressReader) reportDone() { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventDone, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } +} + +// Read continuously reads bytes into the progress reader and reports the +// status via the internal channel +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + r.offsetUpdate += uint64(n) + + // Fire the progress reader in the provided interval + if time.Since(r.lastUpdate) > r.interval { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventRead, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } + r.lastUpdate = time.Now() + r.offsetUpdate = 0 + } + return n, err +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/sign.go b/tools/vendor/go.podman.io/image/v5/copy/sign.go new file mode 100644 index 000000000..96038f3f8 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/sign.go @@ -0,0 +1,115 @@ +package copy + +import ( + "context" + "fmt" + + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/private" + internalsig "go.podman.io/image/v5/internal/signature" + internalSigner "go.podman.io/image/v5/internal/signer" + "go.podman.io/image/v5/signature/sigstore" + "go.podman.io/image/v5/signature/simplesigning" + "go.podman.io/image/v5/transports" +) + +// setupSigners initializes c.signers. +func (c *copier) setupSigners() error { + c.signers = append(c.signers, c.options.Signers...) + // c.signersToClose is intentionally not updated with c.options.Signers. + + // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need + // to clean up any created signers on failure. + + if c.options.SignBy != "" { + opts := []simplesigning.Option{ + simplesigning.WithKeyFingerprint(c.options.SignBy), + } + if c.options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase)) + } + signer, err := simplesigning.NewSigner(opts...) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + if c.options.SignBySigstorePrivateKeyFile != "" { + signer, err := sigstore.NewSigner( + sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase), + ) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + return nil +} + +// sourceSignatures returns signatures from unparsedSource, +// and verifies that they can be used (to avoid copying a large image when we +// can tell in advance that it would ultimately fail) +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, + gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { + var sigs []internalsig.Signature + if c.options.RemoveSignatures { + sigs = []internalsig.Signature{} + } else { + c.Printf("%s\n", gettingSignaturesMessage) + s, err := unparsed.UntrustedSignatures(ctx) + if err != nil { + return nil, fmt.Errorf("reading signatures: %w", err) + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("%s\n", checkingDestMessage) + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err) + } + } + return sigs, nil +} + +// createSignatures creates signatures for manifest and an optional identity. +func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) { + if len(c.signers) == 0 { + // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible. + return nil, nil + } + + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + } + + res := make([]internalsig.Signature, 0, len(c.signers)) + for signerIndex, signer := range c.signers { + msg := internalSigner.ProgressMessage(signer) + if len(c.signers) == 1 { + c.Printf("Creating signature: %s\n", msg) + } else { + c.Printf("Creating signature %d: %s\n", signerIndex+1, msg) + } + newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity) + if err != nil { + if len(c.signers) == 1 { + return nil, fmt.Errorf("creating signature: %w", err) + } else { + return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err) + } + } + res = append(res, newSig) + } + return res, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/copy/single.go b/tools/vendor/go.podman.io/image/v5/copy/single.go new file mode 100644 index 000000000..5c81fd2d5 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/copy/single.go @@ -0,0 +1,1004 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "iter" + "maps" + "reflect" + "slices" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v8" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/internal/pkg/platform" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/compression" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/transports" + "go.podman.io/image/v5/types" + chunkedToc "go.podman.io/storage/pkg/chunked/toc" +) + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + manifestConversionPlan manifestConversionPlan + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + requireCompressionFormatMatch bool +} + +type copySingleImageOptions struct { + requireCompressionFormatMatch bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int +} + +// copySingleImageResult carries data produced by copySingleImage +type copySingleImageResult struct { + manifest []byte + manifestMIMEType string + manifestDigest digest.Digest + compressionAlgorithms []compressiontypes.Algorithm +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate +// source image admissibility. +func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + } + if multiImage { + return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err) + } + src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + manifestList, _, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err) + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + return copySingleImageResult{}, err + } + + sigs, err := c.sourceSignatures(ctx, src, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return copySingleImageResult{}, err + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // manifestConversionPlan and diffIDsAreNeeded are computed later + cannotModifyManifestReason: cannotModifyManifestReason, + requireCompressionFormatMatch: opts.requireCompressionFormatMatch, + } + if opts.compressionFormat != nil { + ic.compressionFormat = opts.compressionFormat + ic.compressionLevel = opts.compressionLevel + } else if c.options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + ic.compressionFormat = c.options.DestinationCtx.CompressionFormat + ic.compressionLevel = c.options.DestinationCtx.CompressionLevel + } + // HACK: Don’t combine zstd:chunked and encryption. + // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption + // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have. + // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without + // encryption. (That can be determined from the unencrypted config anyway, but, still...) + // + // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of + // hard-coding zstd:chunked / zstd. + if ic.c.options.OciEncryptLayers != nil { + format := ic.compressionFormat + if format == nil { + format = defaultCompressionFormat + } + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { + if ic.requireCompressionFormatMatch { + return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") + } + logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead") + ic.compressionFormat = &compression.Zstd + } + } + + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return copySingleImageResult{}, err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil + + ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) + if err != nil { + return copySingleImageResult{}, err + } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if c.options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { + matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return copySingleImageResult{}, err + } + + if matchedResult != nil { + c.Printf("Skipping: image already present at destination\n") + return *matchedResult, nil + } + } + } + + compressionAlgos, err := ic.copyLayers(ctx) + if err != nil { + return copySingleImageResult{}, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + wipResult := copySingleImageResult{ + manifest: manifestBytes, + manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType, + manifestDigest: manifestDigest, + } + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return copySingleImageResult{}, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + wipResult = copySingleImageResult{ + manifest: attemptedManifest, + manifestMIMEType: manifestMIMEType, + manifestDigest: attemptedManifestDigest, + } + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &wipResult.manifestDigest + } + + newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity) + if err != nil { + return copySingleImageResult{}, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err) + } + } + wipResult.compressionAlgorithms = compressionAlgos + res := wipResult // We are done + return res, nil +} + +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + + if dest.MustMatchRuntimeOS() { + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) + } + wantedPlatforms := platform.WantedPlatforms(sys) + + if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) + }) { + options := newOrderedSet() + for _, p := range wantedPlatforms { + options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant)) + } + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) + } + } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the +// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. +func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { + srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) + return nil, nil + } + defer destImageSource.Close() + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return nil, nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return nil, nil + } + + compressionAlgos := set.New[string]() + for _, srcInfo := range ic.src.LayerInfos() { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { + compressionAlgos.Add(c.Name()) + } + } + + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + + // Destination and source manifests, types and digests should all be equivalent + return ©SingleImageResult{ + manifest: destManifest, + manifestMIMEType: destManifestType, + manifestDigest: srcManifestDigest, + compressionAlgorithms: algos, + }, nil +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { + srcInfos := ic.src.LayerInfos() + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return nil, err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) + if err != nil { + return nil, err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, len(srcInfos)) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() + if ic.c.options.OciEncryptLayers != nil { + totalLayers := len(srcInfos) + for _, l := range *ic.c.options.OciEncryptLayers { + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } + } + + if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers” + for i := 0; i < len(srcInfos); i++ { + layersToEncrypt.Add(i) + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return nil, err + } + + compressionAlgos := set.New[string]() + destInfos := make([]types.BlobInfo, len(srcInfos)) + diffIDs := make([]digest.Digest, len(srcInfos)) + for i, cld := range data { + if cld.err != nil { + return nil, cld.err + } + if cld.destInfo.CompressionAlgorithm != nil { + compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name()) + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + return algos, nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + var pendingImage types.Image = ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", fmt.Errorf("reading manifest: %w", err) + } + + if err := ic.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", fmt.Errorf("writing manifest: %w", err) + } + return man, manifestDigest, nil +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") + if err != nil { + return types.BlobInfo{}, err + } + defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } + + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + + bar.mark100PercentComplete() + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + return types.PreserveOriginal, &compression.Gzip, nil + case imgspecv1.MediaTypeImageLayerZstd: + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: + return types.Decompress, nil, nil + default: + return types.PreserveOriginal, nil, nil + } +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo + } + + ic.c.printCopyInfo("blob", srcInfo) + + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression + + var requiredCompression *compressiontypes.Algorithm + if ic.requireCompressionFormatMatch { + requiredCompression = ic.compressionFormat + } + + var tocDigest digest.Digest + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + if d != nil { + tocDigest = *d + } + + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...), + RequiredCompression: requiredCompression, + OriginalCompression: srcInfo.CompressionAlgorithm, + TOCDigest: tocDigest, + }) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + if err := func() error { // A scope for defer + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) + if err != nil { + return err + } + defer bar.Abort(false) + bar.mark100PercentComplete() + return nil + }(); err != nil { + return types.BlobInfo{}, "", err + } + + // Throw an event that the layer has been skipped + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + ic.c.options.Progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { + reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + if err != nil { + return false, types.BlobInfo{}, err + } + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, + } + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + EmptyLayer: emptyLayer, + LayerIndex: layerIndex, + }) + if err == nil { + if srcInfo.Size != -1 { + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) + } + bar.mark100PercentComplete() + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil + } + // On a "partial content not available" error, ignore it and retrieve the whole layer. + var perr private.ErrFallbackToOrdinaryLayerDownload + if errors.As(err, &perr) { + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{}, nil + } + return false, types.BlobInfo{}, err + }() + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err) + } + if reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + if err != nil { + return types.BlobInfo{}, "", err + } + defer bar.Abort(false) + + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } + diffID = diffIDResult.digest + } + } + + bar.mark100PercentComplete() + return blobInfo, diffID, nil + }() +} + +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) + } + return res +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names +func algorithmsByNames(names iter.Seq[string]) ([]compressiontypes.Algorithm, error) { + result := []compressiontypes.Algorithm{} + for name := range names { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, err + } + result = append(result, algo) + } + return result, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/directory/explicitfilepath/path.go b/tools/vendor/go.podman.io/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 000000000..0f92d4a5e --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,57 @@ +package explicitfilepath + +import ( + "fmt" + "os" + "path/filepath" + + "go.podman.io/storage/pkg/fileutils" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch err := fileutils.Lexists(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/tools/vendor/github.com/containers/image/v5/docker/body_reader.go b/tools/vendor/go.podman.io/image/v5/docker/body_reader.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/docker/body_reader.go rename to tools/vendor/go.podman.io/image/v5/docker/body_reader.go index 7d66ef6bc..3c612f268 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/tools/vendor/go.podman.io/image/v5/docker/body_reader.go @@ -6,7 +6,7 @@ import ( "fmt" "io" "math" - "math/rand" + "math/rand/v2" "net/http" "net/url" "strconv" @@ -35,9 +35,9 @@ type bodyReader struct { body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // time.Time{} if N/A + lastRetryTime time.Time // IsZero() if N/A offset int64 // Current offset within the blob - lastSuccessTime time.Time // time.Time{} if N/A + lastSuccessTime time.Time // IsZero() if N/A } // newBodyReader creates a bodyReader for request path in c. @@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise } br.body = nil - time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede headers := map[string][]string{ "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, @@ -197,7 +197,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { consumedBody = true br.body = res.Body br.lastRetryOffset = br.offset - br.lastRetryTime = time.Time{} + br.lastRetryTime = time.Now() return n, nil default: @@ -207,9 +207,9 @@ func (br *bodyReader) Read(p []byte) (int, error) { } // millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm is time.Time{}, it returns math.NaN() +// If tm.IsZero(), it returns math.NaN() func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm == (time.Time{}) { + if tm.IsZero() { return math.NaN() } return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 @@ -229,7 +229,7 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) return nil } - if br.lastRetryTime == (time.Time{}) { + if br.lastRetryTime.IsZero() { logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) return nil } diff --git a/tools/vendor/github.com/containers/image/v5/docker/cache.go b/tools/vendor/go.podman.io/image/v5/docker/cache.go similarity index 89% rename from tools/vendor/github.com/containers/image/v5/docker/cache.go rename to tools/vendor/go.podman.io/image/v5/docker/cache.go index 728d32d17..35fe37b38 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/cache.go +++ b/tools/vendor/go.podman.io/image/v5/docker/cache.go @@ -1,8 +1,8 @@ package docker import ( - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/types" ) // bicTransportScope returns a BICTransportScope appropriate for ref. diff --git a/tools/vendor/github.com/containers/image/v5/docker/distribution_error.go b/tools/vendor/go.podman.io/image/v5/docker/distribution_error.go similarity index 82% rename from tools/vendor/github.com/containers/image/v5/docker/distribution_error.go rename to tools/vendor/go.podman.io/image/v5/docker/distribution_error.go index 0a0064576..06a9593dc 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/tools/vendor/go.podman.io/image/v5/docker/distribution_error.go @@ -24,21 +24,31 @@ import ( "slices" "github.com/docker/distribution/registry/api/errcode" - dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty // errcode.Errors slice. var errNoErrorsInBody = errors.New("no error details found in HTTP response body") -// unexpectedHTTPStatusError is returned when an unexpected HTTP status is +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. -type unexpectedHTTPStatusError struct { - Status string +type UnexpectedHTTPStatusError struct { + // StatusCode code as returned from the server, so callers can + // match the exact code to make certain decisions if needed. + StatusCode int + // status text as displayed in the error message, not exposed as callers should match the number. + status string +} + +func (e UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.status) } -func (e *unexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { + return UnexpectedHTTPStatusError{ + StatusCode: resp.StatusCode, + status: resp.Status, + } } // unexpectedHTTPResponseError is returned when an expected HTTP status code @@ -114,10 +124,11 @@ func mergeErrors(err1, err2 error) error { // UnexpectedHTTPStatusError returned for response code outside of expected // range. func handleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { + switch { + case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range dockerChallenge.ResponseChallenges(resp) { + for c := range iterateAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -138,11 +149,13 @@ func handleErrorResponse(resp *http.Response) error { return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) } } + fallthrough + case resp.StatusCode >= 400 && resp.StatusCode < 500: err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } - return &unexpectedHTTPStatusError{Status: resp.Status} + return newUnexpectedHTTPStatusError(resp) } diff --git a/tools/vendor/github.com/containers/image/v5/docker/docker_client.go b/tools/vendor/go.podman.io/image/v5/docker/docker_client.go similarity index 93% rename from tools/vendor/github.com/containers/image/v5/docker/docker_client.go rename to tools/vendor/go.podman.io/image/v5/docker/docker_client.go index 97d97fed5..a83e19a36 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/tools/vendor/go.podman.io/image/v5/docker/docker_client.go @@ -11,29 +11,30 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "sync" "time" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/internal/useragent" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/pkg/tlsclientconfig" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/iolimits" + "go.podman.io/image/v5/internal/multierr" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/internal/useragent" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/pkg/sysregistriesv2" + "go.podman.io/image/v5/pkg/tlsclientconfig" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" ) const ( @@ -42,7 +43,6 @@ const ( dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" @@ -228,7 +228,12 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc registry = dockerRegistry } tlsClientConfig := &tls.Config{ - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + // As of 2025-08, tlsconfig.ClientDefault() differs from Go 1.23 defaults only in CipherSuites; + // so, limit us to only using that value. If go-connections/tlsconfig changes its policy, we + // will want to consider that and make a decision whether to follow suit. + // There is some chance that eventually the Go default will be to require TLS 1.3, and that point + // we might want to drop the dependency on go-connections entirely. + CipherSuites: tlsconfig.ClientDefault().CipherSuites, } // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, @@ -476,12 +481,11 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { } // Checks if the auth headers in the response contain an indication of a failed -// authorizdation because of an "insufficient_scope" error. If that's the case, +// authorization because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { if res.StatusCode == http.StatusUnauthorized { - challenges := parseAuthHeader(res.Header) - for _, challenge := range challenges { + for challenge := range iterateAuthHeader(res.Header) { if challenge.Scheme == "bearer" { if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { @@ -908,6 +912,10 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig + // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy + if c.sys != nil && c.sys.DockerProxyURL != nil { + tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) + } c.client = &http.Client{Transport: tr} ping := func(scheme string) error { @@ -925,7 +933,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return registryHTTPResponseToError(resp) } - c.challenges = parseAuthHeader(resp.Header) + c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil @@ -936,34 +944,6 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } if err != nil { err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) - if c.sys != nil && c.sys.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) - if err != nil { - return false - } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.tlsClientConfig.InsecureSkipVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } } return err } @@ -1021,13 +1001,18 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R continue } if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) remoteErrors = append(remoteErrors, err) logrus.Debug(err) resp.Body.Close() continue } - return resp.Body, getBlobSize(resp), nil + + size, err := getBlobSize(resp) + if err != nil { + size = -1 + } + return resp.Body, size, nil } if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob @@ -1035,12 +1020,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 +func getBlobSize(resp *http.Response) (int64, error) { + hdrs := resp.Header.Values("Content-Length") + if len(hdrs) == 0 { + return -1, errors.New(`Missing "Content-Length" header in response`) + } + hdr := hdrs[0] // Equivalent to resp.Header.Get(…) + size, err := strconv.ParseInt(hdr, 10, 64) + if err != nil { // Go’s response reader should already reject such values. + return -1, err } - return size + if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. + return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) + } + return size, nil } // getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). @@ -1071,7 +1064,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize := getBlobSize(res) + blobSize, err := getBlobSize(res) + if err != nil { + blobSize = -1 + } reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) if err != nil { @@ -1085,6 +1081,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. + + if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check + return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err) + } + digestAlgorithm := desc.Digest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String()) + } + reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) if err != nil { return nil, err @@ -1094,6 +1099,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR if err != nil { return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) } + actualDigest := digestAlgorithm.FromBytes(payload) + if actualDigest != desc.Digest { + return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String()) + } return payload, nil } diff --git a/tools/vendor/github.com/containers/image/v5/docker/docker_image.go b/tools/vendor/go.podman.io/image/v5/docker/docker_image.go similarity index 92% rename from tools/vendor/github.com/containers/image/v5/docker/docker_image.go rename to tools/vendor/go.podman.io/image/v5/docker/docker_image.go index 9741afc3f..1e5de65a7 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/tools/vendor/go.podman.io/image/v5/docker/docker_image.go @@ -9,12 +9,12 @@ import ( "net/url" "strings" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/types" ) // Image is a Docker-specific implementation of types.ImageCloser with a few extra methods @@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. } for _, tag := range tagsHolder.Tags { if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values + // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary + // to the spec, may include JSON null values in the list; and Go silently parses them as "". + if tag == "" { + logrus.Debugf("Ignoring invalid empty tag") + continue + } // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, // contrary to the tag format specified in // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , diff --git a/tools/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/tools/vendor/go.podman.io/image/v5/docker/docker_image_dest.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/docker/docker_image_dest.go rename to tools/vendor/go.podman.io/image/v5/docker/docker_image_dest.go index ed3d4a2c0..86077fe93 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/tools/vendor/go.podman.io/image/v5/docker/docker_image_dest.go @@ -16,31 +16,32 @@ import ( "slices" "strings" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/imagedestination/impl" - "github.com/containers/image/v5/internal/imagedestination/stubs" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/internal/streamdigest" - "github.com/containers/image/v5/internal/uploadreader" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/imagedestination/impl" + "go.podman.io/image/v5/internal/imagedestination/stubs" + "go.podman.io/image/v5/internal/iolimits" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/putblobdigest" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/internal/streamdigest" + "go.podman.io/image/v5/internal/uploadreader" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/blobinfocache/none" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) type dockerImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize ref dockerReference @@ -242,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. defer res.Body.Close() switch res.StatusCode { case http.StatusOK: + size, err := getBlobSize(res) + if err != nil { + return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) + } logrus.Debugf("... already exists") - return true, getBlobSize(res), nil + return true, size, nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) @@ -610,11 +615,11 @@ func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, si } switch { case d.c.supportsSignatures: - if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil { + if err := d.putSignaturesToAPIExtension(ctx, otherSignatures, *instanceDigest); err != nil { return err } case d.c.signatureBase != nil: - if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil { + if err := d.putSignaturesToLookaside(otherSignatures, *instanceDigest); err != nil { return err } default: @@ -923,13 +928,10 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context return nil } -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { return nil } diff --git a/tools/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/tools/vendor/go.podman.io/image/v5/docker/docker_image_src.go similarity index 90% rename from tools/vendor/github.com/containers/image/v5/docker/docker_image_src.go rename to tools/vendor/go.podman.io/image/v5/docker/docker_image_src.go index c8f6ba305..553dddeef 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/tools/vendor/go.podman.io/image/v5/docker/docker_image_src.go @@ -17,19 +17,19 @@ import ( "strings" "sync" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/imagesource/impl" - "github.com/containers/image/v5/internal/imagesource/stubs" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/regexp" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/imagesource/impl" + "go.podman.io/image/v5/internal/imagesource/stubs" + "go.podman.io/image/v5/internal/iolimits" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/blobinfocache/none" + "go.podman.io/image/v5/pkg/sysregistriesv2" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/regexp" ) // maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server, @@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // Don’t just build a string, try to preserve the typed error. primary := &attempts[len(attempts)-1] extras := []string{} - for i := 0; i < len(attempts)-1; i++ { + for _, attempt := range attempts[:len(attempts)-1] { // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. // The paired [] at least have some chance of being unambiguous. - extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) + extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err)) } return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) } @@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } return } + if parts >= len(chunks) { + errs <- errors.New("too many parts returned by the server") + break + } s := signalCloseReader{ closed: make(chan struct{}), stream: p, @@ -464,26 +468,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc var res []signature.Signature switch { case s.c.supportsSignatures: - sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) case s.c.signatureBase != nil: - sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigs...) default: return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") } - sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest) - if err != nil { + if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil { return nil, err } - res = append(res, sigstoreSigs...) return res, nil } @@ -505,35 +503,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest * return manifest.Digest(s.cachedManifest) } -// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, +// which is not nil, storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := []signature.Signature{} for i := 0; ; i++ { if i >= maxLookasideSignatures { - return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) + return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) } sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) if err != nil { - return nil, err + return err } signature, missing, err := s.getOneSignature(ctx, sigURL) if err != nil { - return nil, err + return err } if missing { break } - signatures = append(signatures, signature) + *dest = append(*dest, signature) } - return signatures, nil + return nil } // getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) @@ -571,7 +569,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL logrus.Debugf("... got status 404, as expected = end of signatures") return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) } contentType := res.Header.Get("Content-Type") @@ -596,48 +594,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL } } -// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } - var sigs []signature.Signature for _, sig := range parsedBody.Signatures { if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content)) } } - return sigs, nil + return nil } -func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { +// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention, +// storing the signatures to *dest. +// On error, the contents of *dest are undefined. +func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { if !s.c.useSigstoreAttachments { logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") - return nil, nil + return nil } manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { - return nil, err + return err } ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) if err != nil { - return nil, err + return err } if ociManifest == nil { - return nil, nil + return nil } logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) - res := []signature.Signature{} for layerIndex, layer := range ociManifest.Layers { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. @@ -648,11 +649,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, none.NoCache) if err != nil { - return nil, err + return err } - res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) + *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) } - return res, nil + return nil } // deleteImage deletes the named image from the registry, if supported. @@ -830,7 +831,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) handleBufferedNetworkReader(&br) }() - for i := uint(0); i < nBuffers; i++ { + for range nBuffers { b := bufferedNetworkReaderBuffer{ data: make([]byte, bufferSize), } diff --git a/tools/vendor/github.com/containers/image/v5/docker/docker_transport.go b/tools/vendor/go.podman.io/image/v5/docker/docker_transport.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/docker/docker_transport.go rename to tools/vendor/go.podman.io/image/v5/docker/docker_transport.go index c10463a43..5831dc3ce 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/tools/vendor/go.podman.io/image/v5/docker/docker_transport.go @@ -6,10 +6,10 @@ import ( "fmt" "strings" - "github.com/containers/image/v5/docker/policyconfiguration" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/docker/policyconfiguration" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/transports" + "go.podman.io/image/v5/types" ) // UnknownDigestSuffix can be appended to a reference when the caller diff --git a/tools/vendor/github.com/containers/image/v5/docker/errors.go b/tools/vendor/go.podman.io/image/v5/docker/errors.go similarity index 94% rename from tools/vendor/github.com/containers/image/v5/docker/errors.go rename to tools/vendor/go.podman.io/image/v5/docker/errors.go index 4392f9d18..1ed40b87f 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/errors.go +++ b/tools/vendor/go.podman.io/image/v5/docker/errors.go @@ -12,6 +12,7 @@ import ( var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. + // Deprecated: The V1 container registry detection is no longer performed, so this error is never returned. ErrV1NotSupported = errors.New("can't talk to a V1 container registry") // ErrTooManyRequests is returned when the status code returned is 429 ErrTooManyRequests = errors.New("too many requests to registry") @@ -39,10 +40,10 @@ func httpResponseToError(res *http.Response, context string) error { err := registryHTTPResponseToError(res) return ErrUnauthorizedForCredentials{Err: err} default: - if context != "" { - context += ": " + if context == "" { + return newUnexpectedHTTPStatusError(res) } - return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) } } diff --git a/tools/vendor/github.com/containers/image/v5/docker/paths_common.go b/tools/vendor/go.podman.io/image/v5/docker/paths_common.go similarity index 75% rename from tools/vendor/github.com/containers/image/v5/docker/paths_common.go rename to tools/vendor/go.podman.io/image/v5/docker/paths_common.go index 862e88039..d9993630b 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/paths_common.go +++ b/tools/vendor/go.podman.io/image/v5/docker/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package docker diff --git a/tools/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/tools/vendor/go.podman.io/image/v5/docker/paths_freebsd.go similarity index 79% rename from tools/vendor/github.com/containers/image/v5/docker/paths_freebsd.go rename to tools/vendor/go.podman.io/image/v5/docker/paths_freebsd.go index 2bf27ac06..8f0f2eee8 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/paths_freebsd.go +++ b/tools/vendor/go.podman.io/image/v5/docker/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package docker diff --git a/tools/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/tools/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go rename to tools/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go index e1f1f1f2b..ddb0bce12 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/tools/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/containers/image/v5/docker/reference" + "go.podman.io/image/v5/docker/reference" ) // DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/README.md b/tools/vendor/go.podman.io/image/v5/docker/reference/README.md similarity index 100% rename from tools/vendor/github.com/containers/image/v5/docker/reference/README.md rename to tools/vendor/go.podman.io/image/v5/docker/reference/README.md diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/tools/vendor/go.podman.io/image/v5/docker/reference/helpers.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/docker/reference/helpers.go rename to tools/vendor/go.podman.io/image/v5/docker/reference/helpers.go diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/tools/vendor/go.podman.io/image/v5/docker/reference/normalize.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/docker/reference/normalize.go rename to tools/vendor/go.podman.io/image/v5/docker/reference/normalize.go diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/reference.go b/tools/vendor/go.podman.io/image/v5/docker/reference/reference.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/docker/reference/reference.go rename to tools/vendor/go.podman.io/image/v5/docker/reference/reference.go diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/tools/vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go rename to tools/vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go diff --git a/tools/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/tools/vendor/go.podman.io/image/v5/docker/reference/regexp.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/docker/reference/regexp.go rename to tools/vendor/go.podman.io/image/v5/docker/reference/regexp.go index 76ba5c2d5..db656fe6a 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/reference/regexp.go +++ b/tools/vendor/go.podman.io/image/v5/docker/reference/regexp.go @@ -4,7 +4,7 @@ import ( "regexp" "strings" - storageRegexp "github.com/containers/storage/pkg/regexp" + storageRegexp "go.podman.io/storage/pkg/regexp" ) const ( diff --git a/tools/vendor/github.com/containers/image/v5/docker/registries_d.go b/tools/vendor/go.podman.io/image/v5/docker/registries_d.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/docker/registries_d.go rename to tools/vendor/go.podman.io/image/v5/docker/registries_d.go index 3619c3bae..53bbb53cb 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/registries_d.go +++ b/tools/vendor/go.podman.io/image/v5/docker/registries_d.go @@ -3,25 +3,26 @@ package docker import ( "errors" "fmt" + "io/fs" "net/url" "os" "path" "path/filepath" "strings" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/rootless" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" "gopkg.in/yaml.v3" ) // systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. // You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' +// -ldflags '-X go.podman.io/image/v5/docker.systemRegistriesDirPath=$your_path' var systemRegistriesDirPath = builtinRegistriesDirPath // builtinRegistriesDirPath is the path to registries.d. @@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { configPath := filepath.Join(dirPath, configName) configBytes, err := os.ReadFile(configPath) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race + continue + } return nil, err } diff --git a/tools/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/tools/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go similarity index 92% rename from tools/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go rename to tools/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go index 6bcb835b9..f5fed07b8 100644 --- a/tools/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/tools/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go @@ -4,6 +4,7 @@ package docker import ( "fmt" + "iter" "net/http" "strings" ) @@ -60,15 +61,17 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) +func iterateAuthHeader(header http.Header) iter.Seq[challenge] { + return func(yield func(challenge) bool) { + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + if !yield(challenge{Scheme: v, Parameters: p}) { + return + } + } } } - return challenges } // parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` diff --git a/tools/vendor/go.podman.io/image/v5/image/docker_schema2.go b/tools/vendor/go.podman.io/image/v5/image/docker_schema2.go new file mode 100644 index 000000000..617d499e4 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/image/docker_schema2.go @@ -0,0 +1,14 @@ +package image + +import ( + "go.podman.io/image/v5/internal/image" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = image.GzippedEmptyLayer + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest diff --git a/tools/vendor/go.podman.io/image/v5/image/sourced.go b/tools/vendor/go.podman.io/image/v5/image/sourced.go new file mode 100644 index 000000000..76375c5f5 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/image/sourced.go @@ -0,0 +1,37 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/types" +) + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + return image.FromSource(ctx, sys, src) +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + return image.FromUnparsedImage(ctx, sys, unparsed) +} diff --git a/tools/vendor/go.podman.io/image/v5/image/unparsed.go b/tools/vendor/go.podman.io/image/v5/image/unparsed.go new file mode 100644 index 000000000..79d446d70 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/image/unparsed.go @@ -0,0 +1,47 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/unparsedimage" + "go.podman.io/image/v5/types" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage = image.UnparsedImage + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return image.UnparsedInstance(src, instanceDigest) +} + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +// +// For the purposes of digest validation in [types.UnparsedImage.Manifest], what matters is the +// reference originally used to create wrappedInstance, not replacementRef. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/tools/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go rename to tools/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go index f31ee3124..5399c2961 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go +++ b/tools/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go @@ -1,8 +1,8 @@ package blobinfocache import ( - "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/types" ) // FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original diff --git a/tools/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/tools/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go rename to tools/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go index acf82ee63..d9d27ec95 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/tools/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go @@ -1,9 +1,9 @@ package blobinfocache import ( - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) const ( diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/tools/vendor/go.podman.io/image/v5/internal/image/docker_list.go similarity index 91% rename from tools/vendor/github.com/containers/image/v5/internal/image/docker_list.go rename to tools/vendor/go.podman.io/image/v5/internal/image/docker_list.go index 617a451aa..2b49964a1 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/docker_list.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/docker_list.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/types" ) func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go b/tools/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go rename to tools/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go index 3ef8e144d..da7a943b3 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go @@ -4,11 +4,11 @@ import ( "context" "fmt" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/types" ) type manifestSchema1 struct { @@ -202,7 +202,7 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t d = layerDiffIDs[v2Index] } layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + MediaType: manifest.DockerV2Schema2LayerMediaType, Size: size, Digest: m.m.FSLayers[v1Index].BlobSum, }) @@ -217,7 +217,7 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t return nil, err } configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", + MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), } diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/tools/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go rename to tools/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go index 01219e391..9305524a0 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go @@ -10,14 +10,14 @@ import ( "fmt" "strings" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/iolimits" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/blobinfocache/none" + "go.podman.io/image/v5/types" ) // GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/manifest.go b/tools/vendor/go.podman.io/image/v5/internal/image/manifest.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/image/manifest.go rename to tools/vendor/go.podman.io/image/v5/internal/image/manifest.go index ed57e08dd..d6ae8b6fa 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/manifest.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/manifest.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/types" ) // genericManifest is an interface for parsing, modifying image manifests and related data. diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/memory.go b/tools/vendor/go.podman.io/image/v5/internal/image/memory.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/internal/image/memory.go rename to tools/vendor/go.podman.io/image/v5/internal/image/memory.go index e22c7aafd..9dff39197 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/memory.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/memory.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/types" ) // memoryImage is a mostly-implementation of types.Image assembled from data diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/oci.go b/tools/vendor/go.podman.io/image/v5/internal/image/oci.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/image/oci.go rename to tools/vendor/go.podman.io/image/v5/internal/image/oci.go index aaef95ff3..8b73b91ff 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/oci.go @@ -7,15 +7,15 @@ import ( "fmt" "slices" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - internalManifest "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/iolimits" + internalManifest "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/pkg/blobinfocache/none" + "go.podman.io/image/v5/types" ) type manifestOCI1 struct { diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/tools/vendor/go.podman.io/image/v5/internal/image/oci_index.go similarity index 91% rename from tools/vendor/github.com/containers/image/v5/internal/image/oci_index.go rename to tools/vendor/go.podman.io/image/v5/internal/image/oci_index.go index 0e945c851..b9071b55d 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/oci_index.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/oci_index.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/types" ) func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/sourced.go b/tools/vendor/go.podman.io/image/v5/internal/image/sourced.go similarity index 99% rename from tools/vendor/github.com/containers/image/v5/internal/image/sourced.go rename to tools/vendor/go.podman.io/image/v5/internal/image/sourced.go index 661891aa5..ba2eaa0c9 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/sourced.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/sourced.go @@ -6,7 +6,7 @@ package image import ( "context" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/types" ) // FromReference returns a types.ImageCloser implementation for the default instance reading from reference. diff --git a/tools/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/tools/vendor/go.podman.io/image/v5/internal/image/unparsed.go similarity index 87% rename from tools/vendor/github.com/containers/image/v5/internal/image/unparsed.go rename to tools/vendor/go.podman.io/image/v5/internal/image/unparsed.go index 0f026501c..45c372383 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/image/unparsed.go +++ b/tools/vendor/go.podman.io/image/v5/internal/image/unparsed.go @@ -4,13 +4,13 @@ import ( "context" "fmt" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/imagesource" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/imagesource" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/types" ) // UnparsedImage implements types.UnparsedImage . @@ -30,6 +30,9 @@ type UnparsedImage struct { // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// // The UnparsedImage must not be used after the underlying ImageSource is Close()d. // // This is publicly visible as c/image/image.UnparsedInstance. @@ -48,6 +51,9 @@ func (i *UnparsedImage) Reference() types.ImageReference { } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +// +// Users of UnparsedImage are promised that this validates the image +// against either i.instanceDigest if set, or against a digest included in i.src.Reference. func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.cachedManifest == nil { m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go similarity index 81% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go index 47c169a1f..9a8d18713 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go @@ -4,11 +4,11 @@ import ( "context" "io" - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/types" ) // Compat implements the obsolete parts of types.ImageDestination @@ -99,3 +99,16 @@ func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanc } return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) } + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: unparsedToplevel, + }) +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go similarity index 82% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go index 9b42cfbec..b12beff07 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go @@ -1,8 +1,8 @@ package impl import ( - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/internal/private" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/private" ) // OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go index 704812e9a..1aab4b061 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go @@ -1,6 +1,6 @@ package impl -import "github.com/containers/image/v5/types" +import "go.podman.io/image/v5/types" // Properties collects properties of an ImageDestination that are constant throughout its lifetime // (but might differ across instances). diff --git a/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go new file mode 100644 index 000000000..c4536e933 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go @@ -0,0 +1,16 @@ +package stubs + +import ( + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. +type IgnoresOriginalOCIConfig struct{} + +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return nil +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go similarity index 88% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go index bbb53c198..573ad832e 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/types" ) // NoPutBlobPartialInitialize implements parts of private.ImageDestination @@ -36,8 +36,9 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. +// Even if SupportsPutBlobPartial() returns true, the call can fail. +// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. +// The fallback _must not_ be done otherwise. func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go index 7015fd068..c046449b1 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go @@ -4,8 +4,8 @@ import ( "context" "errors" - "github.com/containers/image/v5/internal/signature" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/signature" ) // NoSignaturesInitialize implements parts of private.ImageDestination diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go rename to tools/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go diff --git a/tools/vendor/go.podman.io/image/v5/internal/imagedestination/wrapper.go b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 000000000..cbbb6b42a --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,108 @@ +package imagedestination + +import ( + "context" + "io" + + "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/imagedestination/stubs" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/types" +) + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + + types.ImageDestination +} + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementers of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()), + + ImageDestination: dest, + } +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if options.RequiredCompression != nil { + return false, private.ReusedBlob{}, nil + } + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. + }, nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + simpleSigs := [][]byte{} + for _, sig := range signatures { + simpleSig, ok := sig.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(sig) + } + simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature()) + } + return w.PutSignatures(ctx, simpleSigs, instanceDigest) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return w.Commit(ctx, options.UnparsedToplevel) +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go similarity index 94% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go index 7d859c312..4d56f4359 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go @@ -3,9 +3,9 @@ package impl import ( "context" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" ) // Compat implements the obsolete parts of types.ImageSource diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go index d5eae6351..63016f333 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go @@ -3,8 +3,8 @@ package impl import ( "context" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/types" ) // DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing. diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go similarity index 93% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go index b3a8c7e88..749e42a0c 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go @@ -3,8 +3,8 @@ package impl import ( "context" - "github.com/containers/image/v5/internal/signature" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/signature" ) // NoSignatures implements GetSignatures() that returns nothing. diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go index 286ae524b..b1fe9b9a6 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go @@ -5,8 +5,8 @@ import ( "fmt" "io" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/types" ) // NoGetBlobAtInitialize implements parts of private.ImageSource diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/tools/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go similarity index 90% rename from tools/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go rename to tools/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go index f0d1d042b..00bf8893f 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go +++ b/tools/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go @@ -3,11 +3,11 @@ package imagesource import ( "context" - "github.com/containers/image/v5/internal/imagesource/stubs" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/imagesource/stubs" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/types" ) // wrapped provides the private.ImageSource operations diff --git a/tools/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/tools/vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go rename to tools/vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/common.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/common.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/common.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/common.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go similarity index 89% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go index f847fa9cc..fdcc42083 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/tools/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go @@ -5,11 +5,11 @@ import ( "fmt" "slices" - platform "github.com/containers/image/v5/internal/pkg/platform" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + platform "go.podman.io/image/v5/internal/pkg/platform" + compression "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) // Schema2PlatformSpec describes the platform which a particular manifest is @@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { editInstances := []ListEdit{} for i, instance := range updates { editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, + UpdateOldDigest: list.Manifests[i].Digest, UpdateDigest: instance.Digest, UpdateSize: instance.Size, UpdateMediaType: instance.MediaType, ListOperation: ListOpUpdate}) } - return index.editInstances(editInstances) + return list.editInstances(editInstances) } -func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { +func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { addedEntries := []Schema2ManifestDescriptor{} for i, editInstance := range editInstances { switch editInstance.ListOperation { @@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if err := editInstance.UpdateDigest.Validate(); err != nil { return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) } - targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { return m.Digest == editInstance.UpdateOldDigest }) if targetIndex == -1 { return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + list.Manifests[targetIndex].Digest = editInstance.UpdateDigest if editInstance.UpdateSize < 0 { return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) } - index.Manifests[targetIndex].Size = editInstance.UpdateSize + list.Manifests[targetIndex].Size = editInstance.UpdateSize if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: if editInstance.AddPlatform == nil { // Should we create a struct with empty fields instead? @@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if len(addedEntries) != 0 { // slices.Clone() here to ensure a private backing array; // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) + list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) } return nil } -func (index *Schema2List) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) +func (list *Schema2List) EditInstances(editInstances []ListEdit) error { + return list.editInstances(editInstances) } func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { @@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont // ChooseInstance parses blob as a schema2 manifest list, and returns the digest // of the image which is appropriate for the current environment. func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) @@ -283,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { return &Schema2List{*public} } -func (index *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +func (list *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) } -func (index *Schema2List) Clone() ListPublic { - return index.CloneInternal() +func (list *Schema2List) Clone() ListPublic { + return list.CloneInternal() } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/errors.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/errors.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/errors.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/list.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/list.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/list.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/list.go index 1c614d124..100d1c86b 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/tools/vendor/go.podman.io/image/v5/internal/manifest/list.go @@ -3,10 +3,10 @@ package manifest import ( "fmt" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + compression "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) // ListPublic is a subset of List which is a part of the public API; diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/manifest.go similarity index 99% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/manifest.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/manifest.go index 3fb52104a..7dfe77844 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/tools/vendor/go.podman.io/image/v5/internal/manifest/manifest.go @@ -4,10 +4,10 @@ import ( "encoding/json" "slices" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" ) // FIXME: Should we just use docker/distribution and docker/docker implementations directly? diff --git a/tools/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/tools/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go rename to tools/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go index fe78efaeb..046d8e607 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/tools/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go @@ -9,12 +9,12 @@ import ( "runtime" "slices" - platform "github.com/containers/image/v5/internal/pkg/platform" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + platform "go.podman.io/image/v5/internal/pkg/platform" + compression "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) const ( @@ -213,12 +213,12 @@ type instanceCandidate struct { digest digest.Digest // Instance digest } -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { switch { case ic.platformIndex != other.platformIndex: return ic.platformIndex < other.platformIndex case ic.isZstd != other.isZstd: - if !preferGzip { + if preferGzip != types.OptionalBoolTrue { return ic.isZstd } else { return !ic.isZstd @@ -232,14 +232,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip // chooseInstance is a private equivalent to ChooseInstanceByCompression, // shared by ChooseInstance and ChooseInstanceByCompression. func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - didPreferGzip := false - if preferGzip == types.OptionalBoolTrue { - didPreferGzip = true - } - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } + wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil for manifestIndex, d := range index.Manifests { @@ -254,7 +247,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi } candidate.platformIndex = platformIndex } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { bestMatch = &candidate } } diff --git a/tools/vendor/github.com/containers/image/v5/internal/multierr/multierr.go b/tools/vendor/go.podman.io/image/v5/internal/multierr/multierr.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/multierr/multierr.go rename to tools/vendor/go.podman.io/image/v5/internal/multierr/multierr.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/tools/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go rename to tools/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go index afdce1d3d..171438891 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/tools/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go @@ -24,9 +24,9 @@ import ( "slices" "strings" - "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/types" ) // For Linux, the kernel has already detected the ABI, ISA and Features. @@ -153,7 +153,7 @@ var compatibility = map[string][]string{ // WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. -func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { +func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform { // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. // The fields are not specified by the OCI specification, as of version 1.1, usefully enough // to be interoperable, anyway. @@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { Variant: v, }) } - return res, nil + return res } // MatchesPlatform returns true if a platform descriptor from a multi-arch image matches diff --git a/tools/vendor/github.com/containers/image/v5/internal/private/private.go b/tools/vendor/go.podman.io/image/v5/internal/private/private.go similarity index 75% rename from tools/vendor/github.com/containers/image/v5/internal/private/private.go rename to tools/vendor/go.podman.io/image/v5/internal/private/private.go index d81ea6703..a5d2057ae 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/tools/vendor/go.podman.io/image/v5/internal/private/private.go @@ -3,13 +3,15 @@ package private import ( "context" "io" + "time" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/signature" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/signature" + compression "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) // ImageSourceInternalOnly is the part of private.ImageSource that is not @@ -41,6 +43,12 @@ type ImageDestinationInternalOnly interface { // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures // on unsupported formats. + // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, + // or an error obtaining that value (e.g. if the image is an artifact and not a container image). + // The destination can use it in its TryReusingBlob/PutBlob implementations + // (otherwise it only obtains the final config after all layers are written). + NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -53,8 +61,9 @@ type ImageDestinationInternalOnly interface { // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). - // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller - // should fall back to PutBlobWithOptions. + // Even if SupportsPutBlobPartial() returns true, the call can fail. + // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. + // The fallback _must not_ be done otherwise. PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -69,6 +78,12 @@ type ImageDestinationInternalOnly interface { // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. // MUST be called after PutManifest (signatures may reference manifest contents). PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error + + // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before CommitWithOptions() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) + CommitWithOptions(ctx context.Context, options CommitOptions) error } // ImageDestination is an internal extension to the types.ImageDestination @@ -103,6 +118,7 @@ type PutBlobOptions struct { // PutBlobPartialOptions are used in PutBlobPartial. type PutBlobPartialOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) } @@ -145,6 +161,25 @@ type ReusedBlob struct { MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } +// CommitOptions are used in CommitWithOptions +type CommitOptions struct { + // UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list + // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the + // original manifest list digest, if desired. + UnparsedToplevel types.UnparsedImage + // ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. + ReportResolvedReference *types.ImageReference + // Timestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + Timestamp *time.Time +} + // ImageSourceChunk is a portion of a blob. // This API is experimental and can be changed without bumping the major version number. type ImageSourceChunk struct { @@ -183,3 +218,22 @@ type UnparsedImage interface { // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) } + +// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial. +// It suggests to the caller that a fallback mechanism can be used instead of a hard failure; +// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob. +type ErrFallbackToOrdinaryLayerDownload struct { + err error +} + +func (c ErrFallbackToOrdinaryLayerDownload) Error() string { + return c.err.Error() +} + +func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error { + return c.err +} + +func NewErrFallbackToOrdinaryLayerDownload(err error) error { + return ErrFallbackToOrdinaryLayerDownload{err: err} +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/tools/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go rename to tools/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go index b8d3a7e56..ce5054275 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go +++ b/tools/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go @@ -3,8 +3,8 @@ package putblobdigest import ( "io" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/types" ) // Digester computes a digest of the provided stream, if not known yet. diff --git a/tools/vendor/github.com/containers/image/v5/internal/rootless/rootless.go b/tools/vendor/go.podman.io/image/v5/internal/rootless/rootless.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/rootless/rootless.go rename to tools/vendor/go.podman.io/image/v5/internal/rootless/rootless.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/set/set.go b/tools/vendor/go.podman.io/image/v5/internal/set/set.go similarity index 84% rename from tools/vendor/github.com/containers/image/v5/internal/set/set.go rename to tools/vendor/go.podman.io/image/v5/internal/set/set.go index acf30343e..7716b12d5 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/tools/vendor/go.podman.io/image/v5/internal/set/set.go @@ -1,6 +1,9 @@ package set -import "golang.org/x/exp/maps" +import ( + "iter" + "maps" +) // FIXME: // - Docstrings @@ -28,8 +31,8 @@ func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s *Set[E]) AddSlice(slice []E) { - for _, v := range slice { +func (s *Set[E]) AddSeq(seq iter.Seq[E]) { + for v := range seq { s.Add(v) } } @@ -47,6 +50,6 @@ func (s *Set[E]) Empty() bool { return len(s.m) == 0 } -func (s *Set[E]) Values() []E { +func (s *Set[E]) All() iter.Seq[E] { return maps.Keys(s.m) } diff --git a/tools/vendor/github.com/containers/image/v5/internal/signature/signature.go b/tools/vendor/go.podman.io/image/v5/internal/signature/signature.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/signature/signature.go rename to tools/vendor/go.podman.io/image/v5/internal/signature/signature.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/tools/vendor/go.podman.io/image/v5/internal/signature/sigstore.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/signature/sigstore.go rename to tools/vendor/go.podman.io/image/v5/internal/signature/sigstore.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/signature/simple.go b/tools/vendor/go.podman.io/image/v5/internal/signature/simple.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/signature/simple.go rename to tools/vendor/go.podman.io/image/v5/internal/signature/simple.go diff --git a/tools/vendor/go.podman.io/image/v5/internal/signer/signer.go b/tools/vendor/go.podman.io/image/v5/internal/signer/signer.go new file mode 100644 index 000000000..526565d93 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/internal/signer/signer.go @@ -0,0 +1,47 @@ +package signer + +import ( + "context" + + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/signature" +) + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This type is visible to external callers, so it has no public fields or methods apart from Close(). +// +// The owner of a Signer must call Close() when done. +type Signer struct { + implementation SignerImplementation +} + +// NewSigner creates a public Signer from a SignerImplementation +func NewSigner(impl SignerImplementation) *Signer { + return &Signer{implementation: impl} +} + +func (s *Signer) Close() error { + return s.implementation.Close() +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +// Alternatively, should SignImageManifest be provided a logging writer of some kind? +func ProgressMessage(signer *Signer) string { + return signer.implementation.ProgressMessage() +} + +// SignImageManifest invokes a SignerImplementation. +// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage. +func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) { + return signer.implementation.SignImageManifest(ctx, manifest, dockerReference) +} + +// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This interface is distinct from Signer so that implementations can be created outside of this package. +type SignerImplementation interface { + // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. + ProgressMessage() string + // SignImageManifest creates a new signature for manifest m as dockerReference. + SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) + Close() error +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/tools/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go similarity index 89% rename from tools/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go rename to tools/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go index d5a5436a4..83608e04a 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/tools/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go @@ -5,9 +5,9 @@ import ( "io" "os" - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/tmpdir" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/putblobdigest" + "go.podman.io/image/v5/internal/tmpdir" + "go.podman.io/image/v5/types" ) // ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. diff --git a/tools/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/tools/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go similarity index 91% rename from tools/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go rename to tools/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go index bab73ee33..634b2d062 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ b/tools/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go @@ -4,12 +4,12 @@ import ( "os" "runtime" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/types" ) // unixTempDirForBigFiles is the directory path to store big files on non Windows systems. // You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' +// -ldflags '-X go.podman.io/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // builtinUnixTempDirForBigFiles is the directory path to store big files. diff --git a/tools/vendor/go.podman.io/image/v5/internal/unparsedimage/wrapper.go b/tools/vendor/go.podman.io/image/v5/internal/unparsedimage/wrapper.go new file mode 100644 index 000000000..ef314d4b3 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/internal/unparsedimage/wrapper.go @@ -0,0 +1,38 @@ +package unparsedimage + +import ( + "context" + + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/types" +) + +// wrapped provides the private.UnparsedImage operations +// for an object that only implements types.UnparsedImage +type wrapped struct { + types.UnparsedImage +} + +// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API +func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage { + if unparsed2, ok := unparsed.(private.UnparsedImage); ok { + return unparsed2 + } + return &wrapped{ + UnparsedImage: unparsed, + } +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + sigs, err := w.Signatures(ctx) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/tools/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/tools/vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go rename to tools/vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go diff --git a/tools/vendor/github.com/containers/image/v5/internal/useragent/useragent.go b/tools/vendor/go.podman.io/image/v5/internal/useragent/useragent.go similarity index 83% rename from tools/vendor/github.com/containers/image/v5/internal/useragent/useragent.go rename to tools/vendor/go.podman.io/image/v5/internal/useragent/useragent.go index 7ac49693e..54d8fcb70 100644 --- a/tools/vendor/github.com/containers/image/v5/internal/useragent/useragent.go +++ b/tools/vendor/go.podman.io/image/v5/internal/useragent/useragent.go @@ -1,6 +1,6 @@ package useragent -import "github.com/containers/image/v5/version" +import "go.podman.io/image/v5/version" // DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise. var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" diff --git a/tools/vendor/github.com/containers/image/v5/manifest/common.go b/tools/vendor/go.podman.io/image/v5/manifest/common.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/manifest/common.go rename to tools/vendor/go.podman.io/image/v5/manifest/common.go index 8d9d5795f..dde1bf3c8 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/common.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/common.go @@ -3,9 +3,9 @@ package manifest import ( "fmt" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) // layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() diff --git a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema1.go similarity index 94% rename from tools/vendor/github.com/containers/image/v5/manifest/docker_schema1.go rename to tools/vendor/go.podman.io/image/v5/manifest/docker_schema1.go index 222aa896e..28c9fea30 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema1.go @@ -8,14 +8,14 @@ import ( "strings" "time" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/internal/set" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/set" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/regexp" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. @@ -133,12 +133,12 @@ func (m *Schema1) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + layers := make([]LayerInfo, 0, len(m.FSLayers)) + for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers = append(layers, LayerInfo{ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } + }) } return layers } @@ -284,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { } // Build the history. convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { + for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), @@ -292,7 +292,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { Comment: compat.Comment, EmptyLayer: compat.ThrowAway, } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + convertedHistory = append(convertedHistory, hitem) } // Build the rootfs information. We need the decompressed sums that we've been // calculating to fill in the DiffIDs. It's expected (but not enforced by us) @@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { // Add the history and rootfs information. rootfs, err := json.Marshal(rootFS) if err != nil { - return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err) } rawRootfs := json.RawMessage(rootfs) raw["rootfs"] = &rawRootfs history, err := json.Marshal(convertedHistory) if err != nil { - return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err) + return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err) } rawHistory := json.RawMessage(history) raw["history"] = &rawHistory // Encode the result. config, err = json.Marshal(raw) if err != nil { - return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err) + return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err) } return config, nil } diff --git a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema2.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/manifest/docker_schema2.go rename to tools/vendor/go.podman.io/image/v5/manifest/docker_schema2.go index 7e53f4f54..b4255d886 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema2.go @@ -5,11 +5,11 @@ import ( "fmt" "time" - "github.com/containers/image/v5/internal/manifest" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/pkg/strslice" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/manifest" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/pkg/strslice" + "go.podman.io/image/v5/types" ) // Schema2Descriptor is a “descriptor” in docker/distribution schema 2. diff --git a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go rename to tools/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go index c958a3fa3..158f7949e 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go @@ -1,7 +1,7 @@ package manifest import ( - "github.com/containers/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/manifest" ) // Schema2PlatformSpec describes the platform which a particular manifest is diff --git a/tools/vendor/github.com/containers/image/v5/manifest/list.go b/tools/vendor/go.podman.io/image/v5/manifest/list.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/manifest/list.go rename to tools/vendor/go.podman.io/image/v5/manifest/list.go index 1d6fdc9f5..846ea7d43 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/list.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/list.go @@ -1,8 +1,8 @@ package manifest import ( - "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/manifest" ) var ( diff --git a/tools/vendor/github.com/containers/image/v5/manifest/manifest.go b/tools/vendor/go.podman.io/image/v5/manifest/manifest.go similarity index 98% rename from tools/vendor/github.com/containers/image/v5/manifest/manifest.go rename to tools/vendor/go.podman.io/image/v5/manifest/manifest.go index d8f37eb45..45118fa4e 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/manifest.go @@ -3,11 +3,11 @@ package manifest import ( "fmt" - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/types" ) // FIXME: Should we just use docker/distribution and docker/docker implementations directly? diff --git a/tools/vendor/github.com/containers/image/v5/manifest/oci.go b/tools/vendor/go.podman.io/image/v5/manifest/oci.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/manifest/oci.go rename to tools/vendor/go.podman.io/image/v5/manifest/oci.go index f714574ee..286d58c42 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/oci.go @@ -6,13 +6,13 @@ import ( "slices" "strings" - "github.com/containers/image/v5/internal/manifest" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/manifest" + compressiontypes "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/image/v5/types" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. @@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { if err := json.Unmarshal(manifestBlob, &oci1); err != nil { return nil, err } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest, manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } @@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + parts := strings.Split(mediatype, "+") + if slices.Contains(parts[1:], "encrypted") { return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] + unsuffixedMediatype := parts[0] switch unsuffixedMediatype { case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. diff --git a/tools/vendor/github.com/containers/image/v5/manifest/oci_index.go b/tools/vendor/go.podman.io/image/v5/manifest/oci_index.go similarity index 94% rename from tools/vendor/github.com/containers/image/v5/manifest/oci_index.go rename to tools/vendor/go.podman.io/image/v5/manifest/oci_index.go index 193b08935..84dae6070 100644 --- a/tools/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/tools/vendor/go.podman.io/image/v5/manifest/oci_index.go @@ -1,8 +1,8 @@ package manifest import ( - "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/manifest" ) // OCI1Index is just an alias for the OCI index type, but one which we can diff --git a/tools/vendor/go.podman.io/image/v5/oci/internal/oci_util.go b/tools/vendor/go.podman.io/image/v5/oci/internal/oci_util.go new file mode 100644 index 000000000..c4eaed0ee --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/internal/oci_util.go @@ -0,0 +1,150 @@ +package internal + +import ( + "errors" + "fmt" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = fmt.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) + if !matched { + return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} + +// parseOCIReferenceName parses the image from the oci reference. +func parseOCIReferenceName(image string) (img string, index int, err error) { + index = -1 + if strings.HasPrefix(image, "@") { + idx, err := strconv.Atoi(image[1:]) + if err != nil { + return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err) + } + if idx < 0 { + return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx) + } + index = idx + } else { + img = image + } + return img, index, nil +} + +// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists +func ParseReferenceIntoElements(reference string) (string, string, int, error) { + dir, image := SplitPathAndImage(reference) + image, index, err := parseOCIReferenceName(image) + if err != nil { + return "", "", -1, err + } + return dir, image, index, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/oci/layout/oci_delete.go b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_delete.go new file mode 100644 index 000000000..7eaf6f088 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_delete.go @@ -0,0 +1,189 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "slices" + + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/types" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + blobsUsedByImage := make(map[digest.Digest]int) + if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself. +func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + + dest[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := parseJSON[imgspecv1.Manifest](blobPath) + if err != nil { + return err + } + dest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + dest[layer.Digest]++ + } + case imgspecv1.MediaTypeImageIndex: + index, err := parseIndex(blobPath) + if err != nil { + return err + } + if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + return nil +} + +// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself. +func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil { + return err + } + } + return nil +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for digest := range blobsToDelete.All() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) (retErr error) { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + // since we are writing to this file, make sure we handle errors + defer func() { + closeErr := file.Close() + if retErr == nil { + retErr = closeErr + } + }() + + return json.NewEncoder(file).Encode(content) +} diff --git a/tools/vendor/go.podman.io/image/v5/oci/layout/oci_dest.go b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_dest.go new file mode 100644 index 000000000..48fe812df --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,414 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "slices" + + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/imagedestination/impl" + "go.podman.io/image/v5/internal/imagedestination/stubs" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/putblobdigest" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" +) + +type ociImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + ref ociReference + index imgspecv1.Index + sharedBlobDir string +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + desiredLayerCompression := types.Compress + if sys != nil && sys.OCIAcceptUncompressedLayers { + desiredLayerCompression = types.PreserveOriginal + } + + d := &ociImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + }, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: true, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"), + + ref: ref, + index: *index, + } + d.Compat = impl.AddCompat(d) + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return private.UploadedBlob{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil { + return private.UploadedBlob{}, err + } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the +// specific blob path determined by the blobDigest. The closed pointer indicates to the caller +// whether blobFile has been closed or not. +func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error { + if err := blobFile.Sync(); err != nil { + return err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + + // need to explicitly close the file, since a rename won't otherwise work on Windows + err = blobFile.Close() + if err != nil { + return err + } + *closed = true + + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return err + } + + return nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { + return false, private.ReusedBlob{}, nil + } + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, private.ReusedBlob{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, private.ReusedBlob{}, nil + } + if err != nil { + return false, private.ReusedBlob{}, err + } + + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := os.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +// PutBlobFromLocalFileOption is unused but may receive functionality in the future. +type PutBlobFromLocalFileOption struct{} + +// PutBlobFromLocalFile arranges the data from path to be used as blob with digest. +// It computes, and returns, the digest and size of the used file. +// +// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called. +func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) { + d, ok := dest.(*ociImageDestination) + if !ok { + return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination") + } + + succeeded := false + blobFileClosed := false + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return "", -1, err + } + defer func() { + if !blobFileClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + srcFile, err := os.Open(file) + if err != nil { + return "", -1, err + } + defer srcFile.Close() + + err = fileutils.ReflinkOrCopy(srcFile, blobFile) + if err != nil { + return "", -1, err + } + + _, err = blobFile.Seek(0, io.SeekStart) + if err != nil { + return "", -1, err + } + blobDigest, err := digest.FromReader(blobFile) + if err != nil { + return "", -1, err + } + + fileInfo, err := blobFile.Stat() + if err != nil { + return "", -1, err + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil { + return "", -1, err + } + + succeeded = true + return blobDigest, fileInfo.Size(), nil +} + +func ensureDirectoryExists(path string) error { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + err := fileutils.Exists(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/tools/vendor/go.podman.io/image/v5/oci/layout/oci_src.go b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_src.go new file mode 100644 index 000000000..f265a21d7 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_src.go @@ -0,0 +1,248 @@ +package layout + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/internal/imagesource/impl" + "go.podman.io/image/v5/internal/imagesource/stubs" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/pkg/tlsclientconfig" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" +) + +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image” part of the provided reference. +type ImageNotFoundError struct { + ref ociReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + +type ociImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = &tls.Config{ + // As of 2025-08, tlsconfig.ClientDefault() differs from Go 1.23 defaults only in CipherSuites; + // so, limit us to only using that value. If go-connections/tlsconfig changes its policy, we + // will want to consider that and make a decision whether to follow suit. + // There is some chance that eventually the Go default will be to require TLS 1.3, and that point + // we might want to drop the dependency on go-connections entirely. + CipherSuites: tlsconfig.ClientDefault().CipherSuites, + } + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, _, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + s := &ociImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + index: index, + descriptor: descriptor, + client: client, + } + if sys != nil { + // TODO(jonboulle): check dir existence? + s.sharedBlobDir = sys.OCISharedBlobDirPath + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + s.client.CloseIdleConnections() + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = s.descriptor.Digest + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := os.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// GetLocalBlobPath returns the local path to the blob file with the given digest. +// The returned path is checked for existence so when a non existing digest is +// given an error will be returned. +// +// Important: The returned path must be treated as read only, writing the file will +// corrupt the oci layout as the digest no longer matches. +func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) { + s, ok := src.(*ociImageSource) + if !ok { + return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source") + } + + path, err := s.ref.blobPath(digest, s.sharedBlobDir) + if err != nil { + return "", err + } + if err := fileutils.Exists(path); err != nil { + return "", err + } + + return path, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/oci/layout/oci_transport.go b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_transport.go new file mode 100644 index 000000000..7b5086cd8 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,304 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/directory/explicitfilepath" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/image" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/oci/internal" + "go.podman.io/image/v5/transports" + "go.podman.io/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json. + // + // Must not be set if sourceIndex is set (the value is not -1). + image string + // If not -1, a zero-based index of an image in the manifest index. Valid only for sources. + // Must not be set if image is set. + sourceIndex int +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image, index, err := internal.ParseReferenceIntoElements(reference) + if err != nil { + return nil, err + } + return newReference(dir, image, index) +} + +// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex. +// +// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used. +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + if sourceIndex != -1 && sourceIndex < 0 { + return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex) + } + if sourceIndex != -1 && image != "" { + return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex) + } + return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil +} + +// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index. +func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } + return newReference(dir, "", sourceIndex) +} + +// NewReference returns an OCI reference for a directory and an optional image name annotation (if not ""). +func NewReference(dir, image string) (types.ImageReference, error) { + return newReference(dir, image, -1) +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + if ref.sourceIndex == -1 { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) + } + return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) + if err != nil { + return nil, err + } + defer content.Close() + + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { + return nil, err + } + return obj, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, -1, err + } + + switch { + case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references. + return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.image, ref.sourceIndex) + + case ref.sourceIndex != -1: + if ref.sourceIndex >= len(index.Manifests) { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests)) + } + return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil + + case ref.image != "": + // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string + for i, md := range index.Manifests { + if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType { + return md, i, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) + } + } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} + + default: + // return manifest if only one image is in the oci directory + if len(index.Manifests) != 1 { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage + } + return index.Manifests[0], 0, nil + } +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") + } + md, _, err := ociRef.getManifestDescriptor() + return md, err +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) + } + var blobDir string + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil +} diff --git a/tools/vendor/go.podman.io/image/v5/oci/layout/reader.go b/tools/vendor/go.podman.io/image/v5/oci/layout/reader.go new file mode 100644 index 000000000..0a88e945e --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/oci/layout/reader.go @@ -0,0 +1,52 @@ +package layout + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/types" +) + +// This file is named reader.go for consistency with other transports’ +// handling of “image containers”, but we don’t actually need a stateful reader object. + +// ListResult wraps the image reference and the manifest for loading +type ListResult struct { + Reference types.ImageReference + ManifestDescriptor imgspecv1.Descriptor +} + +// List returns a slice of manifests included in the archive +func List(dir string) ([]ListResult, error) { + var res []ListResult + + indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile)) + if err != nil { + return nil, err + } + var index imgspecv1.Index + if err := json.Unmarshal(indexJSON, &index); err != nil { + return nil, err + } + + for manifestIndex, md := range index.Manifests { + refName := md.Annotations[imgspecv1.AnnotationRefName] + index := -1 + if refName == "" { + index = manifestIndex + } + ref, err := newReference(dir, refName, index) + if err != nil { + return nil, fmt.Errorf("error creating image reference: %w", err) + } + reference := ListResult{ + Reference: ref, + ManifestDescriptor: md, + } + res = append(res, reference) + } + return res, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/default.go b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 000000000..7fe615e34 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,88 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/rootless" + "go.podman.io/image/v5/pkg/blobinfocache/memory" + "go.podman.io/image/v5/pkg/blobinfocache/sqlite" + "go.podman.io/image/v5/types" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) + return memory.New() + } + + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache +} + +// CleanupDefaultCache removes the blob info cache directory. +// It deletes the cache directory but it does not affect any file or memory buffer currently +// in use. +func CleanupDefaultCache(sys *types.SystemContext) error { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + // Mirror the DefaultCache behavior that does not fail in this case + return nil + } + return os.RemoveAll(dir) +} diff --git a/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 000000000..993c78030 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,244 @@ +// Package prioritize provides utilities for filtering and prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "cmp" + "slices" + "time" + + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/manifest" + "go.podman.io/image/v5/pkg/compression" + "go.podman.io/image/v5/types" +) + +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + +// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, +// which can be later combined with information about a location. +type CandidateTemplate struct { + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm +} + +// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable +// for a CandidateLocations* call with v2Options. +// +// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); +// if not nil, the call is assumed to be CandidateLocations2. +func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { + if v2Options == nil { + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. + digest: digest, + } + } + + requiredCompression := "nil" + if v2Options.RequiredCompression != nil { + requiredCompression = v2Options.RequiredCompression.Name() + } + switch data.BaseVariantCompressor { + case blobinfocache.Uncompressed: + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, nil) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v", + digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, + } + case blobinfocache.UnknownCompression: + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) + return nil // Not allowed with CandidateLocations2 + default: + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. + algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) + if err != nil { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", + digest.String(), data.BaseVariantCompressor, err) + return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + } + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, + } + } +} + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) +} + +// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen) +func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, + }, + lastSeen: lastSeen, + } +} + +// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location. +func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + lastSeen: time.Time{}, + } +} + +// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, +// along with the specially-treated digest values relevant to the ordering. +type candidateSortState struct { + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.candidate.Digest != xj.candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.candidate.Digest == css.primaryDigest { + return -1 + } + if xj.candidate.Digest == css.primaryDigest { + return 1 + } + if css.uncompressedDigest != "" { + if xi.candidate.Digest == css.uncompressedDigest { + return 1 + } + if xj.candidate.Digest == css.uncompressedDigest { + return -1 + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return -xi.lastSeen.Compare(xj.lastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time + return -cmp + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest) +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them separately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + slices.SortFunc(cs, (&candidateSortState{ + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }).compare) + for _, candidate := range cs { + if candidate.candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } + + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].candidate + } + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].candidate) + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) +} diff --git a/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/memory/memory.go b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 000000000..84e9b7d6d --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,255 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize" + "go.podman.io/image/v5/types" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache. +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + uncompressedDigestsByTOC map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, + } +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = set.New[digest.Digest]() + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet.Add(anyDigest) +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok { + return d + } + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + mem.uncompressedDigestsByTOC[tocDigest] = uncompressed +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + delete(mem.compressors, anyDigest) + return + } + mem.compressors[anyDigest] = data +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v, ok := mem.compressors[digest]; ok { + compressionData = v + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates + } + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, template.CandidateWithLocation(l, t)) + } + } else if v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { + for d := range otherDigests.All() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options) + } + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/tools/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go rename to tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go index 9a2219e79..88c9024fd 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go @@ -2,9 +2,9 @@ package none import ( - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/types" ) // noCache implements a dummy BlobInfoCache which records no data. diff --git a/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 000000000..1d0127573 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,682 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/blobinfocache" + "go.podman.io/image/v5/pkg/blobinfocache/internal/prioritize" + "go.podman.io/image/v5/types" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + err = func() (retErr error) { // A scope for defer + defer func() { + closeErr := db.Close() + if retErr == nil { + retErr = closeErr + } + }() + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + return ensureDBHasCurrentSchema(db) + }() + if err != nil { + return nil, err + } + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) { + db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() error { return nil }, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, db.Close, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer func() { + closeErr := closeDB() + if retErr == nil { + retErr = closeErr + } + }() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + { + "DigestTOCUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` + + // index implied by PRIMARY KEY + `tocDigest TEXT PRIMARY KEY NOT NULL,` + + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + return "", nil + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)", + tocDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) + } + warned := false + if gotPrevious && previous != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), data.BaseVariantCompressor); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) + } + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v2Options != nil { + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } + } + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates, nil + } + + rows, err := tx.Query("SELECT location, time FROM KnownLocations "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + rowAdded := false + for rows.Next() { + var location string + var time time.Time + if err := rows.Scan(&location, &time); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time)) + rowAdded = true + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + + if !rowAdded && v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil)) +} diff --git a/tools/vendor/go.podman.io/image/v5/pkg/compression/compression.go b/tools/vendor/go.podman.io/image/v5/pkg/compression/compression.go new file mode 100644 index 000000000..6a6c4d4a3 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/compression/compression.go @@ -0,0 +1,175 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + + "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" + "go.podman.io/image/v5/pkg/compression/internal" + "go.podman.io/image/v5/pkg/compression/types" + "go.podman.io/storage/pkg/chunked/compressor" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return io.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// Such a partial decompression is not implemented by this package; it is consumed e.g. by +// github.com/containers/storage/pkg/chunked . +// +// If the compression generates such metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, fmt.Errorf("detecting compression: %w", err) + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, fmt.Errorf("initializing decompression: %w", err) + } + } else { + res = io.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/tools/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/tools/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go similarity index 84% rename from tools/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go rename to tools/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go index d6f85274d..e715705b4 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go @@ -3,6 +3,15 @@ package internal import "io" // CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// If the compression generates such metadata, it is written to the provided metadata map. +// // The caller must call Close() on the stream (even if the input stream does not need closing!). type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) diff --git a/tools/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/tools/vendor/go.podman.io/image/v5/pkg/compression/types/types.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/pkg/compression/types/types.go rename to tools/vendor/go.podman.io/image/v5/pkg/compression/types/types.go index 43d03b601..197122c7b 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/compression/types/types.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/compression/types/types.go @@ -1,7 +1,7 @@ package types import ( - "github.com/containers/image/v5/pkg/compression/internal" + "go.podman.io/image/v5/pkg/compression/internal" ) // DecompressorFunc returns the decompressed stream, given a compressed stream. diff --git a/tools/vendor/go.podman.io/image/v5/pkg/compression/zstd.go b/tools/vendor/go.podman.io/image/v5/pkg/compression/zstd.go new file mode 100644 index 000000000..39ae014d2 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/tools/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/tools/vendor/go.podman.io/image/v5/pkg/docker/config/config.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/pkg/docker/config/config.go rename to tools/vendor/go.podman.io/image/v5/pkg/docker/config/config.go index da2238a0b..56d4eb916 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/docker/config/config.go @@ -6,23 +6,25 @@ import ( "errors" "fmt" "io/fs" + "iter" + "maps" "os" "os/exec" "path/filepath" "runtime" "strings" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/multierr" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/pkg/sysregistriesv2" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" + "go.podman.io/storage/pkg/ioutils" ) type dockerAuthConfig struct { @@ -93,9 +95,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range fileContents.CredHelpers { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { @@ -115,16 +115,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon return nil, err } } - for registry := range creds { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(creds)) } } // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. allCreds := make(map[string]types.DockerAuthConfig) - for _, key := range allKeys.Values() { + for key := range allKeys.All() { creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -818,16 +816,10 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) - var keys []string - if !path.legacyFormat { - keys = authKeysForKey(key) - } else { - keys = []string{registry} - } - + // // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. - for _, key := range keys { + for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } @@ -854,25 +846,33 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut return types.DockerAuthConfig{}, nil } -// authKeysForKey returns the keys matching a provided auth file key, in order -// from the best match to worst. For example, +// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) +// in file with legacyFormat, in order from the best match to worst. +// For example, in a non-legacy file, // when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForKey(key string) (res []string) { - for { - res = append(res, key) +func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { + return func(yield func(string) bool) { + if legacyFormat { + _ = yield(registry) // We stop in any case + return + } + + for { + if !yield(key) { + return + } - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] } - key = key[:lastSlash] } - - return res } // decodeDockerAuth decodes the username and password from conf, diff --git a/tools/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/tools/vendor/go.podman.io/image/v5/pkg/strslice/README.md similarity index 100% rename from tools/vendor/github.com/containers/image/v5/pkg/strslice/README.md rename to tools/vendor/go.podman.io/image/v5/pkg/strslice/README.md diff --git a/tools/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/tools/vendor/go.podman.io/image/v5/pkg/strslice/strslice.go similarity index 100% rename from tools/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go rename to tools/vendor/go.podman.io/image/v5/pkg/strslice/strslice.go diff --git a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go rename to tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go index 07fe50294..c9e8ac5cb 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package sysregistriesv2 diff --git a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go similarity index 96% rename from tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go rename to tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go index 741b99f8f..7dada4b77 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package sysregistriesv2 diff --git a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go similarity index 95% rename from tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go rename to tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go index 71f5bc837..8c72ce7ff 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go @@ -9,13 +9,13 @@ import ( "strings" "github.com/BurntSushi/toml" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/multierr" + "go.podman.io/image/v5/internal/rootless" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/homedir" + "go.podman.io/storage/pkg/lockfile" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam // editShortNameAlias loads the aliases.conf file and changes it. If value is // set, it adds the name-value pair as a new alias. Otherwise, it will remove // name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { if err := validateShortName(name); err != nil { return err } @@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er if err != nil { return err } - defer f.Close() + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := f.Close() + if retErr == nil { + retErr = closeErr + } + }() encoder := toml.NewEncoder(f) return encoder.Encode(conf) @@ -229,7 +235,7 @@ func parseShortNameValue(alias string) (reference.Named, error) { } registry := reference.Domain(named) - if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + if !strings.ContainsAny(registry, ".:") && registry != "localhost" { return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } diff --git a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go rename to tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 1b161474d..1a1fcccf8 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -1,35 +1,37 @@ package sysregistriesv2 import ( + "errors" "fmt" "io/fs" + "maps" "os" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" "github.com/BurntSushi/toml" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" + "go.podman.io/storage/pkg/regexp" ) // systemRegistriesConfPath is the path to the system-wide registry // configuration file and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' +// -ldflags '-X go.podman.io/image/v5/sysregistries.systemRegistriesConfPath=$your_path' var systemRegistriesConfPath = builtinRegistriesConfPath // systemRegistriesConfDirPath is the path to the system-wide registry // configuration directory and is used to add/subtract potential registries for // obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' +// -ldflags '-X go.podman.io/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' var systemRegistriesConfDirPath = builtinRegistriesConfDirPath // AuthenticationFileHelper is a special key for credential helpers indicating @@ -429,7 +431,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error { return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) } // make sure mirrors are valid - for _, mir := range reg.Mirrors { + for j := range reg.Mirrors { + mir := ®.Mirrors[j] mir.Location, err = parseLocation(mir.Location) if err != nil { return err @@ -744,6 +747,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC // Enforce v2 format for drop-in-configs. dropIn, err := loadConfigFile(path, true) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race + continue + } return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) } config.updateWithConfigurationFrom(dropIn) @@ -1034,12 +1042,10 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { } // Go maps have a non-deterministic order when iterating the keys, so - // we dump them in a slice and sort it to enforce some order in - // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // configuration where a non-deterministic order could easily cause - // confusion. - prefixes := maps.Keys(registryMap) - sort.Strings(prefixes) + // we sort the keys to enforce some order in Registries slice. + // Some consumers of c/image (e.g., CRI-O) log the configuration + // and a non-deterministic order could easily cause confusion. + prefixes := slices.Sorted(maps.Keys(registryMap)) c.partialV2.Registries = []Registry{} for _, prefix := range prefixes { diff --git a/tools/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/tools/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go similarity index 91% rename from tools/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go rename to tools/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index f6c0576e0..4e0ee57e9 100644 --- a/tools/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/tools/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -3,6 +3,7 @@ package tlsclientconfig import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/http" @@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf(" crt: %s", fullPath) data, err := os.ReadFile(fullPath) if err != nil { - if os.IsNotExist(err) { - // Dangling symbolic link? - // Race with someone who deleted the - // file after we read the directory's - // list of contents? - logrus.Warnf("error reading certificate %q: %v", fullPath, err) + if errors.Is(err, os.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race continue } return err diff --git a/tools/vendor/go.podman.io/image/v5/signature/docker.go b/tools/vendor/go.podman.io/image/v5/signature/docker.go new file mode 100644 index 000000000..954eda4a5 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/docker.go @@ -0,0 +1,94 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/signature/internal" +) + +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity}) + return sig, err +} + +// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities +// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that +// was used to verify it. +func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, "", err + } + sig, matchedKeyIdentity, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + acceptedKeyIdentities: expectedKeyIdentities, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference)) + } + if signedRef.String() != expectedRef.String() { + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q", + signedDockerReference, expectedDockerReference)) + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)) + } + return nil + }, + }) + if err != nil { + return nil, "", err + } + return sig, matchedKeyIdentity, err +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/fulcio_cert.go b/tools/vendor/go.podman.io/image/v5/signature/fulcio_cert.go new file mode 100644 index 000000000..e9d98368f --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/fulcio_cert.go @@ -0,0 +1,210 @@ +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "slices" + "time" + + "github.com/sigstore/fulcio/pkg/certificate" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "go.podman.io/image/v5/signature/internal" +) + +// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. +// Users should call validate() on the policy before using it. +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + if f.oidcIssuer == "" { + return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer") + } + if f.subjectEmail == "" { + return errors.New("Internal inconsistency: Fulcio use set up without subject email") + } + return nil +} + +// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate; +// it fails if the extension is not present in the certificate, or on any inconsistency. +func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) { + // == Validate the recorded OIDC issuer + gotOIDCIssuer1 := false + gotOIDCIssuer2 := false + var oidcIssuer1, oidcIssuer2 string + // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies + // between certificate.OIDIssuer and certificate.OIDIssuerV2. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it. + if gotOIDCIssuer1 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension") + } + oidcIssuer1 = string(untrustedExt.Value) + gotOIDCIssuer1 = true + } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) { + if gotOIDCIssuer2 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension") + } + rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2) + if err != nil { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err)) + } + if len(rest) != 0 { + return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data") + } + gotOIDCIssuer2 = true + } + } + switch { + case gotOIDCIssuer1 && gotOIDCIssuer2: + if oidcIssuer1 != oidcIssuer2 { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v", + oidcIssuer1, oidcIssuer2)) + } + return oidcIssuer1, nil + case gotOIDCIssuer1: + return oidcIssuer1, nil + case gotOIDCIssuer2: + return oidcIssuer2, nil + default: + return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } +} + +func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + // == Verify the certificate is correctly signed + var untrustedIntermediatePool *x509.CertPool // = nil + // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar, + // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load + // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert + // for intermediate certificates. + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + untrustedIntermediatePool = x509.NewCertPool() + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; + // we match SAN ourselves, so override that. + if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 { + var remaining []asn1.ObjectIdentifier + for _, oid := range untrustedCertificate.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + remaining = append(remaining, oid) + } + } + untrustedCertificate.UnhandledCriticalExtensions = remaining + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: f.caCertificates, + // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation), + // and validates the leaf certificate against relevantTime manually. + // We verify the full certificate chain against relevantTime instead. + // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference. + CurrentTime: relevantTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied). + // + // We don’t currently do that. + // + // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that + // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by + // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor. + // + // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly + // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative + // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually + // logging in using OpenID are not going to maintain a record of those actions. + // + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures + // by correctly-issued certificates. + // + // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, + // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + + // == Validate the recorded OIDC issuer + oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate) + if err != nil { + return nil, err + } + if oidcIssuer != f.oidcIssuer { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) + } + + // == Validate the OIDC subject + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + f.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + // FIXME: Match more subject types? Cosign does: + // - .DNSNames (can’t be issued by Fulcio) + // - .IPAddresses (can’t be issued by Fulcio) + // - .URIs (CAN be issued by Fulcio) + // - OtherName values in SAN (CAN be issued by Fulcio) + // - Various values about GitHub workflows (CAN be issued by Fulcio) + // What does it… mean to get an OAuth2 identity for an IP address? + // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for + // issuers and/or subjects and/or combinations? Regexps? More? + + return untrustedCertificate.PublicKey, nil +} + +func parseLeafCertFromPEM(untrustedCertificateBytes []byte) (*x509.Certificate, error) { + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: // OK + return untrustedLeafCerts[0], nil + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } +} + +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes, + untrustedBase64Signature, untrustedPayloadBytes) + if err != nil { + return nil, err + } + return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/errors.go b/tools/vendor/go.podman.io/image/v5/signature/internal/errors.go new file mode 100644 index 000000000..d21e8544d --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/errors.go @@ -0,0 +1,24 @@ +package internal + +// InvalidSignatureError is returned when parsing an invalid signature. +// This is publicly visible as signature.InvalidSignatureError +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +func NewInvalidSignatureError(msg string) InvalidSignatureError { + return InvalidSignatureError{msg: msg} +} + +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + return err +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/json.go b/tools/vendor/go.podman.io/image/v5/signature/internal/json.go new file mode 100644 index 000000000..246905d78 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/json.go @@ -0,0 +1,90 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "go.podman.io/image/v5/internal/set" +) + +// JSONFormatError is returned when JSON does not match expected format. +type JSONFormatError string + +func (err JSONFormatError) Error() string { + return string(err) +} + +// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t != json.Delim('{') { + return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t)) + } + for { + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t)) + } + if seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf("Duplicate key %q", key)) + } + seenKeys.Add(key) + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return JSONFormatError(fmt.Sprintf("Unknown key %q", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return JSONFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return JSONFormatError("Unexpected data after JSON object") + } + return nil +} + +// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { + if valuePtr, ok := exactFields[key]; ok { + seenKeys.Add(key) + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if !seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key)) + } + } + return nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_api_types.go b/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_api_types.go new file mode 100644 index 000000000..7b941f536 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_api_types.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" +) + +const rekorHashedrekordKind = "hashedrekord" + +type RekorHashedrekord struct { + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` +} + +func (m *RekorHashedrekord) Kind() string { + return rekorHashedrekordKind +} + +func (m *RekorHashedrekord) SetKind(val string) { +} + +func (m *RekorHashedrekord) UnmarshalJSON(raw []byte) error { + var base struct { + Kind string `json:"kind"` + } + dec := json.NewDecoder(bytes.NewReader(raw)) + dec.UseNumber() + if err := dec.Decode(&base); err != nil { + return err + } + + switch base.Kind { + case rekorHashedrekordKind: + var data struct { // We can’t use RekorHashedRekord directly, because that would be an infinite recursion. + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` + } + dec = json.NewDecoder(bytes.NewReader(raw)) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return err + } + res := RekorHashedrekord{ + APIVersion: data.APIVersion, + Spec: data.Spec, + } + *m = res + return nil + + default: + return fmt.Errorf("invalid kind value: %q", base.Kind) + } +} + +func (m RekorHashedrekord) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` + }{ + Kind: m.Kind(), + APIVersion: m.APIVersion, + Spec: m.Spec, + }) +} + +type RekorHashedrekordV001Schema struct { + Data *RekorHashedrekordV001SchemaData `json:"data"` + Signature *RekorHashedrekordV001SchemaSignature `json:"signature"` +} + +type RekorHashedrekordV001SchemaData struct { + Hash *RekorHashedrekordV001SchemaDataHash `json:"hash,omitempty"` +} + +type RekorHashedrekordV001SchemaDataHash struct { + Algorithm *string `json:"algorithm"` + Value *string `json:"value"` +} + +const ( + RekorHashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + RekorHashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + RekorHashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" +) + +type RekorHashedrekordV001SchemaSignature struct { + Content []byte `json:"content,omitempty"` + PublicKey *RekorHashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` +} + +type RekorHashedrekordV001SchemaSignaturePublicKey struct { + Content []byte `json:"content,omitempty"` +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_set.go b/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_set.go new file mode 100644 index 000000000..1c20e496a --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/rekor_set.go @@ -0,0 +1,224 @@ +package internal + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" +) + +// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. +// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. +const RekorHashedRekordV001APIVersion = "0.0.1" + +// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET +// (note that this a signature-specific format, not a format directly used by the Rekor API). +// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder. +type UntrustedRekorSET struct { + UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload + UntrustedPayload json.RawMessage +} + +type UntrustedRekorPayload struct { + Body []byte // In cosign, this is an any, but only a string works + IntegratedTime int64 + LogIndex int64 + LogID string +} + +// A compile-time check that UntrustedRekorSET implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, + "Payload": &s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler +var _ json.Marshaler = UntrustedRekorSET{} +var _ json.Marshaler = (*UntrustedRekorSET)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, + "Payload": s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "body": &p.Body, + "integratedTime": &p.IntegratedTime, + "logIndex": &p.LogIndex, + "logID": &p.LogID, + }) +} + +// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler +var _ json.Marshaler = UntrustedRekorPayload{} +var _ json.Marshaler = (*UntrustedRekorPayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "body": p.Body, + "integratedTime": p.IntegratedTime, + "logIndex": p.LogIndex, + "logID": p.LogID, + }) +} + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + // FIXME: Should the publicKey parameter hard-code ecdsa? + + // == Parse SET bytes + var untrustedSET UntrustedRekorSET + // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature... + if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil { + return time.Time{}, NewInvalidSignatureError(err.Error()) + } + // == Verify SET signature + // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary, + // assuming jsoncanonicalizer is designed to operate on untrusted data. + untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) + } + untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) + publicKeymatched := false + for _, pk := range publicKeys { + if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched = true + break + } + } + if !publicKeymatched { + return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") + } + + // == Parse SET payload + // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation, + // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker + // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing + // of the SET payload. + var rekorPayload UntrustedRekorPayload + if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) + } + // FIXME: Consider being much more strict about decoding JSON. + var hashedRekord RekorHashedrekord + if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) + } + // The decode of HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + if hashedRekord.APIVersion == nil { + return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") + } + if *hashedRekord.APIVersion != RekorHashedRekordV001APIVersion { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) + } + var hashedRekordV001 RekorHashedrekordV001Schema + if err := json.Unmarshal(hashedRekord.Spec, &hashedRekordV001); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) + } + + // == Match unverifiedKeyOrCertBytes + if hashedRekordV001.Signature == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`) + } + if hashedRekordV001.Signature.PublicKey == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`) + + } + rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content) + if rekorKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data") + } + // FIXME: For public keys, let the caller provide the DER-formatted blob instead + // of round-tripping through PEM. + unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes) + if unverifiedKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data") + } + // NOTE: This compares the PEM payload, but not the object type or headers. + if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match") + } + // == Match unverifiedSignatureBytes + unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err)) + } + if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v", + string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes))) + } + + // == Match unverifiedPayloadBytes + if hashedRekordV001.Data == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash.Algorithm == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) + } + // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. + // Eventually we should support them as well. + // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility + // issue. + if *hashedRekordV001.Data.Hash.Algorithm != RekorHashedrekordV001SchemaDataHashAlgorithmSha256 { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) + } + if hashedRekordV001.Data.Hash.Value == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`) + } + rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err)) + + } + unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes) + if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) { + return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match") + } + + // == All OK; return the relevant time. + return time.Unix(rekorPayload.IntegratedTime, 0), nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.c b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.c new file mode 100644 index 000000000..d5314016a --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.c @@ -0,0 +1,200 @@ +/* + * Copying and distribution of this file, with or without modification, + * are permitted in any medium without royalty provided the copyright + * notice and this notice are preserved. This file is offered as-is, + * without any warranty. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "gosequoia.h" + +#if defined(GO_SEQUOIA_ENABLE_DLOPEN) && GO_SEQUOIA_ENABLE_DLOPEN + +#include +#include +#include +#include + +/* If SEQUOIA_SONAME is defined, dlopen handle can be automatically + * set; otherwise, the caller needs to call + * go_sequoia_ensure_library with soname determined at run time. + */ +#ifdef SEQUOIA_SONAME + +static void +ensure_library (void) +{ + if (go_sequoia_ensure_library (SEQUOIA_SONAME, RTLD_LAZY | RTLD_LOCAL) < 0) + abort (); +} + +#if defined(GO_SEQUOIA_ENABLE_PTHREAD) && GO_SEQUOIA_ENABLE_PTHREAD +#include + +static pthread_once_t dlopen_once = PTHREAD_ONCE_INIT; + +#define ENSURE_LIBRARY pthread_once(&dlopen_once, ensure_library) + +#else /* GO_SEQUOIA_ENABLE_PTHREAD */ + +#define ENSURE_LIBRARY do { \ + if (!go_sequoia_dlhandle) \ + ensure_library(); \ + } while (0) + +#endif /* !GO_SEQUOIA_ENABLE_PTHREAD */ + +#else /* SEQUOIA_SONAME */ + +#define ENSURE_LIBRARY do {} while (0) + +#endif /* !SEQUOIA_SONAME */ + +static void *go_sequoia_dlhandle; + +/* Define redirection symbols */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-macros" + +#if (2 <= __GNUC__ || (4 <= __clang_major__)) +#define FUNC(ret, name, args, cargs) \ + static __typeof__(name)(*go_sequoia_sym_##name); +#else +#define FUNC(ret, name, args, cargs) \ + static ret(*go_sequoia_sym_##name)args; +#endif +#define VOID_FUNC FUNC +#include "gosequoiafuncs.h" +#undef VOID_FUNC +#undef FUNC + +#pragma GCC diagnostic pop + +/* Define redirection wrapper functions */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-macros" + +#define FUNC(ret, name, args, cargs) \ +ret go_##name args \ +{ \ + ENSURE_LIBRARY; \ + assert (go_sequoia_sym_##name); \ + return go_sequoia_sym_##name cargs; \ +} +#define VOID_FUNC(ret, name, args, cargs) \ +ret go_##name args \ +{ \ + ENSURE_LIBRARY; \ + assert (go_sequoia_sym_##name); \ + go_sequoia_sym_##name cargs; \ +} +#include "gosequoiafuncs.h" +#undef VOID_FUNC +#undef FUNC + +#pragma GCC diagnostic pop + +static int +ensure_symbol (const char *name, void **symp) +{ + if (!*symp) + { + void *sym = dlsym (go_sequoia_dlhandle, name); + if (!sym) + return -EINVAL; + *symp = sym; + } + return 0; +} + +int +go_sequoia_ensure_library (const char *soname, int flags) +{ + int err; + + if (!go_sequoia_dlhandle) + { + go_sequoia_dlhandle = dlopen (soname, flags); + if (!go_sequoia_dlhandle) + return -EINVAL; + } + +#define ENSURE_SYMBOL(name) \ + ensure_symbol(#name, (void **)&go_sequoia_sym_##name) + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-macros" + +#define FUNC(ret, name, args, cargs) \ + err = ENSURE_SYMBOL(name); \ + if (err < 0) \ + { \ + dlclose (go_sequoia_dlhandle); \ + go_sequoia_dlhandle = NULL; \ + return err; \ + } +#define VOID_FUNC FUNC +#include "gosequoiafuncs.h" +#undef VOID_FUNC +#undef FUNC + +#pragma GCC diagnostic pop + +#undef ENSURE_SYMBOL + return 0; +} + +void +go_sequoia_unload_library (void) +{ + if (go_sequoia_dlhandle) + { + dlclose (go_sequoia_dlhandle); + go_sequoia_dlhandle = NULL; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-macros" + +#define FUNC(ret, name, args, cargs) \ + go_sequoia_sym_##name = NULL; +#define VOID_FUNC FUNC +#include "gosequoiafuncs.h" +#undef VOID_FUNC +#undef FUNC + +#pragma GCC diagnostic pop +} + +unsigned +go_sequoia_is_usable (void) +{ + return go_sequoia_dlhandle != NULL; +} + +#else /* GO_SEQUOIA_ENABLE_DLOPEN */ + +int +go_sequoia_ensure_library (const char *soname, int flags) +{ + (void) soname; + (void) flags; + return 0; +} + +void +go_sequoia_unload_library (void) +{ +} + +unsigned +go_sequoia_is_usable (void) +{ + /* The library is linked at build time, thus always usable */ + return 1; +} + +#endif /* !GO_SEQUOIA_ENABLE_DLOPEN */ diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.h b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.h new file mode 100644 index 000000000..477b985ba --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.h @@ -0,0 +1,54 @@ +/* + * Copying and distribution of this file, with or without modification, + * are permitted in any medium without royalty provided the copyright + * notice and this notice are preserved. This file is offered as-is, + * without any warranty. + */ + +#ifndef GO_SEQUOIA_H_ +#define GO_SEQUOIA_H_ + +#include + +#if defined(GO_SEQUOIA_ENABLE_DLOPEN) && GO_SEQUOIA_ENABLE_DLOPEN + +#define FUNC(ret, name, args, cargs) \ + ret go_##name args; +#define VOID_FUNC FUNC +#include "gosequoiafuncs.h" +#undef VOID_FUNC +#undef FUNC + +#define GO_SEQUOIA_FUNC(name) go_##name + +#else + +#define GO_SEQUOIA_FUNC(name) name + +#endif /* GO_SEQUOIA_ENABLE_DLOPEN */ + +/* Ensure SONAME to be loaded with dlopen FLAGS, and all the necessary + * symbols are resolved. + * + * Returns 0 on success; negative error code otherwise. + * + * Note that this function is NOT thread-safe; when calling it from + * multi-threaded programs, protect it with a locking mechanism. + */ +int go_sequoia_ensure_library (const char *soname, int flags); + +/* Unload library and reset symbols. + * + * Note that this function is NOT thread-safe; when calling it from + * multi-threaded programs, protect it with a locking mechanism. + */ +void go_sequoia_unload_library (void); + +/* Return 1 if the library is loaded and usable. + * + * Note that this function is NOT thread-safe; when calling it from + * multi-threaded programs, protect it with a locking mechanism. + */ +unsigned go_sequoia_is_usable (void); + +#endif /* GO_SEQUOIA_H_ */ diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoiafuncs.h b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoiafuncs.h new file mode 100644 index 000000000..3d7ae5fac --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoiafuncs.h @@ -0,0 +1,21 @@ +/* + * This file was automatically generated from sequoia.h, + * which is covered by the following license: + * SPDX-License-Identifier: Apache-2.0 + */ +VOID_FUNC(void, sequoia_error_free, (struct SequoiaError *err_ptr), (err_ptr)) +FUNC(struct SequoiaMechanism *, sequoia_mechanism_new_from_directory, (const char *dir_ptr, struct SequoiaError **err_ptr), (dir_ptr, err_ptr)) +FUNC(struct SequoiaMechanism *, sequoia_mechanism_new_ephemeral, (struct SequoiaError **err_ptr), (err_ptr)) +VOID_FUNC(void, sequoia_mechanism_free, (struct SequoiaMechanism *mechanism_ptr), (mechanism_ptr)) +VOID_FUNC(void, sequoia_signature_free, (struct SequoiaSignature *signature_ptr), (signature_ptr)) +FUNC(const uint8_t *, sequoia_signature_get_data, (const struct SequoiaSignature *signature_ptr, size_t *data_len), (signature_ptr, data_len)) +VOID_FUNC(void, sequoia_verification_result_free, (struct SequoiaVerificationResult *result_ptr), (result_ptr)) +FUNC(const uint8_t *, sequoia_verification_result_get_content, (const struct SequoiaVerificationResult *result_ptr, size_t *data_len), (result_ptr, data_len)) +FUNC(const char *, sequoia_verification_result_get_signer, (const struct SequoiaVerificationResult *result_ptr), (result_ptr)) +FUNC(struct SequoiaSignature *, sequoia_sign, (struct SequoiaMechanism *mechanism_ptr, const char *key_handle_ptr, const char *password_ptr, const uint8_t *data_ptr, size_t data_len, struct SequoiaError **err_ptr), (mechanism_ptr, key_handle_ptr, password_ptr, data_ptr, data_len, err_ptr)) +FUNC(struct SequoiaVerificationResult *, sequoia_verify, (struct SequoiaMechanism *mechanism_ptr, const uint8_t *signature_ptr, size_t signature_len, struct SequoiaError **err_ptr), (mechanism_ptr, signature_ptr, signature_len, err_ptr)) +VOID_FUNC(void, sequoia_import_result_free, (struct SequoiaImportResult *result_ptr), (result_ptr)) +FUNC(size_t, sequoia_import_result_get_count, (const struct SequoiaImportResult *result_ptr), (result_ptr)) +FUNC(const char *, sequoia_import_result_get_content, (const struct SequoiaImportResult *result_ptr, size_t index, struct SequoiaError **err_ptr), (result_ptr, index, err_ptr)) +FUNC(struct SequoiaImportResult *, sequoia_import_keys, (struct SequoiaMechanism *mechanism_ptr, const uint8_t *blob_ptr, size_t blob_len, struct SequoiaError **err_ptr), (mechanism_ptr, blob_ptr, blob_len, err_ptr)) +FUNC(int, sequoia_set_logger_consumer, (void (*consumer)(enum SequoiaLogLevel, const char *), struct SequoiaError **err_ptr), (consumer, err_ptr)) diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.go b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.go new file mode 100644 index 000000000..46d9084b0 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.go @@ -0,0 +1,223 @@ +//go:build containers_image_sequoia + +package sequoia + +// #cgo CFLAGS: -I. -DGO_SEQUOIA_ENABLE_DLOPEN=1 +// #include "gosequoia.h" +// #include +// #include +// typedef void (*sequoia_logger_consumer_t) (enum SequoiaLogLevel level, char *message); +// extern void sequoia_logrus_logger (enum SequoiaLogLevel level, char *message); +import "C" + +import ( + "errors" + "fmt" + "path/filepath" + "runtime" + "sync" + "unsafe" + + "github.com/sirupsen/logrus" +) + +// sequoiaLibraryDir is the path to the directory where libpodman_sequoia is installed, +// if it is not in the platform’s default library path. +// You can override this at build time with +// -ldflags '-X go.podman.io/image/v5/signature/sequoia.sequoiaLibraryDir=$your_path' +var sequoiaLibraryDir = "" + +type SigningMechanism struct { + mechanism *C.SequoiaMechanism +} + +// NewMechanismFromDirectory initializes a mechanism using (user-managed) Sequoia state +// in dir, which can be "" to indicate the default (using $SEQUOIA_HOME or the default home directory location). +func NewMechanismFromDirectory( + dir string, +) (*SigningMechanism, error) { + var cerr *C.SequoiaError + var cDir *C.char + if dir != "" { + cDir = C.CString(dir) + defer C.free(unsafe.Pointer(cDir)) + } + cMechanism := C.go_sequoia_mechanism_new_from_directory(cDir, &cerr) + if cMechanism == nil { + defer C.go_sequoia_error_free(cerr) + return nil, errors.New(C.GoString(cerr.message)) + } + return &SigningMechanism{ + mechanism: cMechanism, + }, nil +} + +func NewEphemeralMechanism() (*SigningMechanism, error) { + var cerr *C.SequoiaError + cMechanism := C.go_sequoia_mechanism_new_ephemeral(&cerr) + if cMechanism == nil { + defer C.go_sequoia_error_free(cerr) + return nil, errors.New(C.GoString(cerr.message)) + } + return &SigningMechanism{ + mechanism: cMechanism, + }, nil +} + +func (m *SigningMechanism) SignWithPassphrase( + input []byte, + keyIdentity string, + passphrase string, +) ([]byte, error) { + var cerr *C.SequoiaError + var cPassphrase *C.char + if passphrase == "" { + cPassphrase = nil + } else { + cPassphrase = C.CString(passphrase) + defer C.free(unsafe.Pointer(cPassphrase)) + } + cKeyIdentity := C.CString(keyIdentity) + defer C.free(unsafe.Pointer(cKeyIdentity)) + sig := C.go_sequoia_sign( + m.mechanism, + cKeyIdentity, + cPassphrase, + (*C.uchar)(unsafe.Pointer(unsafe.SliceData(input))), + C.size_t(len(input)), + &cerr, + ) + if sig == nil { + defer C.go_sequoia_error_free(cerr) + return nil, errors.New(C.GoString(cerr.message)) + } + defer C.go_sequoia_signature_free(sig) + var size C.size_t + cData := C.go_sequoia_signature_get_data(sig, &size) + if size > C.size_t(C.INT_MAX) { + return nil, errors.New("overflow") // Coverage: This should not reasonably happen, and we don’t want to generate gigabytes of input to test this. + } + return C.GoBytes(unsafe.Pointer(cData), C.int(size)), nil +} + +func (m *SigningMechanism) Sign( + input []byte, + keyIdentity string, +) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +func (m *SigningMechanism) Verify( + unverifiedSignature []byte, +) (contents []byte, keyIdentity string, err error) { + var cerr *C.SequoiaError + result := C.go_sequoia_verify( + m.mechanism, + (*C.uchar)(unsafe.Pointer(unsafe.SliceData(unverifiedSignature))), + C.size_t(len(unverifiedSignature)), + &cerr, + ) + if result == nil { + defer C.go_sequoia_error_free(cerr) + return nil, "", errors.New(C.GoString(cerr.message)) + } + defer C.go_sequoia_verification_result_free(result) + var size C.size_t + cContent := C.go_sequoia_verification_result_get_content(result, &size) + if size > C.size_t(C.INT_MAX) { + return nil, "", errors.New("overflow") // Coverage: This should not reasonably happen, and we don’t want to generate gigabytes of input to test this. + } + contents = C.GoBytes(unsafe.Pointer(cContent), C.int(size)) + cSigner := C.go_sequoia_verification_result_get_signer(result) + keyIdentity = C.GoString(cSigner) + return contents, keyIdentity, nil +} + +func (m *SigningMechanism) ImportKeys(blob []byte) ([]string, error) { + var cerr *C.SequoiaError + result := C.go_sequoia_import_keys( + m.mechanism, + (*C.uchar)(unsafe.Pointer(unsafe.SliceData(blob))), + C.size_t(len(blob)), + &cerr, + ) + if result == nil { + defer C.go_sequoia_error_free(cerr) + return nil, errors.New(C.GoString(cerr.message)) + } + defer C.go_sequoia_import_result_free(result) + + keyIdentities := []string{} + count := C.go_sequoia_import_result_get_count(result) + for i := C.size_t(0); i < count; i++ { + var cerr *C.SequoiaError + cKeyIdentity := C.go_sequoia_import_result_get_content(result, i, &cerr) + if cerr != nil { + defer C.go_sequoia_error_free(cerr) // Coverage: this can fail only if i is out of range. + return nil, errors.New(C.GoString(cerr.message)) + } + keyIdentities = append(keyIdentities, C.GoString(cKeyIdentity)) + } + + return keyIdentities, nil +} + +func (m *SigningMechanism) Close() error { + C.go_sequoia_mechanism_free(m.mechanism) + return nil +} + +//export sequoia_logrus_logger +func sequoia_logrus_logger(level C.enum_SequoiaLogLevel, message *C.char) { + var logrusLevel logrus.Level + switch level { // Coverage: We are not in control of whether / how the Rust code chooses to log things. + case C.SEQUOIA_LOG_LEVEL_ERROR: + logrusLevel = logrus.ErrorLevel + case C.SEQUOIA_LOG_LEVEL_WARN: + logrusLevel = logrus.WarnLevel + case C.SEQUOIA_LOG_LEVEL_INFO: + logrusLevel = logrus.InfoLevel + case C.SEQUOIA_LOG_LEVEL_DEBUG: + logrusLevel = logrus.DebugLevel + case C.SEQUOIA_LOG_LEVEL_TRACE: + logrusLevel = logrus.TraceLevel + case C.SEQUOIA_LOG_LEVEL_UNKNOWN: + fallthrough + default: + logrusLevel = logrus.ErrorLevel // Should never happen + } + logrus.StandardLogger().Log(logrusLevel, C.GoString(message)) +} + +// initOnce should only be called by Init. +func initOnce() error { + var soName string + switch runtime.GOOS { + case "linux": + soName = "libpodman_sequoia.so.0" + case "darwin": + soName = "libpodman_sequoia.dylib" + default: + return fmt.Errorf("Unhandled OS %q in sequoia initialization", runtime.GOOS) // Coverage: This is ~by definition not reached in tests. + } + if sequoiaLibraryDir != "" { + soName = filepath.Join(sequoiaLibraryDir, soName) + } + cSOName := C.CString(soName) + defer C.free(unsafe.Pointer(cSOName)) + if C.go_sequoia_ensure_library(cSOName, + C.RTLD_NOW|C.RTLD_GLOBAL) < 0 { + return fmt.Errorf("unable to load %q", soName) // Coverage: This is impractical to test in-process, with the static go_sequoia_dlhandle. + } + + var cerr *C.SequoiaError + if C.go_sequoia_set_logger_consumer(C.sequoia_logger_consumer_t(C.sequoia_logrus_logger), &cerr) != 0 { + defer C.go_sequoia_error_free(cerr) // Coverage: This is impractical to test in-process, with the static go_sequoia_dlhandle. + return fmt.Errorf("initializing logging: %s", C.GoString(cerr.message)) + } + return nil +} + +// Init ensures the libpodman_sequoia library is available. +// It is safe to call from arbitrary goroutines. +var Init = sync.OnceValue(initOnce) diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.h b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.h new file mode 100644 index 000000000..e0e218926 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include + +typedef enum SequoiaErrorKind { + SEQUOIA_ERROR_KIND_UNKNOWN, + SEQUOIA_ERROR_KIND_INVALID_ARGUMENT, + SEQUOIA_ERROR_KIND_IO_ERROR, +} SequoiaErrorKind; + +typedef enum SequoiaLogLevel { + SEQUOIA_LOG_LEVEL_UNKNOWN, + SEQUOIA_LOG_LEVEL_ERROR, + SEQUOIA_LOG_LEVEL_WARN, + SEQUOIA_LOG_LEVEL_INFO, + SEQUOIA_LOG_LEVEL_DEBUG, + SEQUOIA_LOG_LEVEL_TRACE, +} SequoiaLogLevel; + +typedef struct SequoiaImportResult SequoiaImportResult; + +typedef struct SequoiaMechanism SequoiaMechanism; + +typedef struct SequoiaSignature SequoiaSignature; + +typedef struct SequoiaVerificationResult SequoiaVerificationResult; + +typedef struct SequoiaError { + enum SequoiaErrorKind kind; + char *message; +} SequoiaError; + +void sequoia_error_free(struct SequoiaError *err_ptr); + +struct SequoiaMechanism *sequoia_mechanism_new_from_directory(const char *dir_ptr, + struct SequoiaError **err_ptr); + +struct SequoiaMechanism *sequoia_mechanism_new_ephemeral(struct SequoiaError **err_ptr); + +void sequoia_mechanism_free(struct SequoiaMechanism *mechanism_ptr); + +void sequoia_signature_free(struct SequoiaSignature *signature_ptr); + +const uint8_t *sequoia_signature_get_data(const struct SequoiaSignature *signature_ptr, + size_t *data_len); + +void sequoia_verification_result_free(struct SequoiaVerificationResult *result_ptr); + +const uint8_t *sequoia_verification_result_get_content(const struct SequoiaVerificationResult *result_ptr, + size_t *data_len); + +const char *sequoia_verification_result_get_signer(const struct SequoiaVerificationResult *result_ptr); + +struct SequoiaSignature *sequoia_sign(struct SequoiaMechanism *mechanism_ptr, + const char *key_handle_ptr, + const char *password_ptr, + const uint8_t *data_ptr, + size_t data_len, + struct SequoiaError **err_ptr); + +struct SequoiaVerificationResult *sequoia_verify(struct SequoiaMechanism *mechanism_ptr, + const uint8_t *signature_ptr, + size_t signature_len, + struct SequoiaError **err_ptr); + +void sequoia_import_result_free(struct SequoiaImportResult *result_ptr); + +size_t sequoia_import_result_get_count(const struct SequoiaImportResult *result_ptr); + +const char *sequoia_import_result_get_content(const struct SequoiaImportResult *result_ptr, + size_t index, + struct SequoiaError **err_ptr); + +struct SequoiaImportResult *sequoia_import_keys(struct SequoiaMechanism *mechanism_ptr, + const uint8_t *blob_ptr, + size_t blob_len, + struct SequoiaError **err_ptr); + +int sequoia_set_logger_consumer(void (*consumer)(enum SequoiaLogLevel level, const char *message), + struct SequoiaError **err_ptr); diff --git a/tools/vendor/go.podman.io/image/v5/signature/internal/sigstore_payload.go b/tools/vendor/go.podman.io/image/v5/signature/internal/sigstore_payload.go new file mode 100644 index 000000000..682360bae --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/internal/sigstore_payload.go @@ -0,0 +1,239 @@ +package internal + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + digest "github.com/opencontainers/go-digest" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" + "go.podman.io/image/v5/version" +) + +const ( + sigstoreSignatureType = "cosign container image signature" + sigstoreHarcodedHashAlgorithm = crypto.SHA256 +) + +// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) +type UntrustedSigstorePayload struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with +// the specified primary contents and appropriate metadata. +func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "containers/image " + version.Version + timestamp := time.Now().Unix() + return UntrustedSigstorePayload{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = UntrustedSigstorePayload{} +var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": sigstoreSignatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. + if !bytes.Equal(optional, []byte("null")) { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != sigstoreSignatureType { + return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable. +// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type SigstorePayloadAcceptanceRules struct { + ValidateSignedDockerReference func(string) error + ValidateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created +// by any of the public keys in publicKeys. +// +// This is an internal implementation detail of VerifySigstorePayload and should have no other callers. +// It is INSUFFICIENT alone to consider the signature acceptable. +func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error { + if len(publicKeys) == 0 { + return errors.New("Need at least one public key to verify the sigstore payload, but got 0") + } + + verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys)) + for _, key := range publicKeys { + // Failing to load a verifier indicates that something is really, really + // invalid about the public key; prefer to fail even if the signature might be + // valid with other keys, so that users fix their fallback keys before they need them. + // For that reason, we even initialize all verifiers before trying to validate the signature + // with any key. + verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm) + if err != nil { + return err + } + verifiers = append(verifiers, verifier) + } + + var failures []string + for _, verifier := range verifiers { + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)) + if err == nil { + return nil + } + + failures = append(failures, err.Error()) + } + + if len(failures) == 0 { + // Coverage: We have checked there is at least one public key, any success causes an early return, + // and any failure adds an entry to failures => there must be at least one error. + return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded") + } + return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", ")) +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components +// match expected values, both as specified by rules, and returns it. +// We return an *UntrustedSigstorePayload, although nothing actually uses it, +// just to double-check against stupid typos. +func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { + unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) + } + + if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil { + return nil, err + } + + var unmatchedPayload UntrustedSigstorePayload + if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { + return nil, NewInvalidSignatureError(err.Error()) + } + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil { + return nil, err + } + // SigstorePayloadAcceptanceRules have accepted this value. + return &unmatchedPayload, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/mechanism.go b/tools/vendor/go.podman.io/image/v5/signature/mechanism.go new file mode 100644 index 000000000..897fc4997 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/mechanism.go @@ -0,0 +1,110 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity. + // For mechanisms created using NewEphemeralGPGSigningMechanism, the returned key identity + // is expected to be one of the values returned by NewEphemeralGPGSigningMechanism, + // or the mechanism should implement signingMechanismWithVerificationIdentityLookup. + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls to this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + +// signingMechanismWithVerificationIdentityLookup is an internal extension of SigningMechanism. +type signingMechanismWithVerificationIdentityLookup interface { + SigningMechanism + // keyIdentityForVerificationKeyIdentity re-checks the key identity returned by Verify + // if it doesn't match an identity returned by NewEphemeralGPGSigningMechanism, trying to match it. + // (To be more specific, for mechanisms which return a subkey fingerprint from Verify, + // this converts the subkey fingerprint into the corresponding primary key fingerprint.) + keyIdentityForVerificationKeyIdentity(keyIdentity string) (string, error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism([][]byte{blob}) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme.go b/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 000000000..38b458911 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,179 @@ +//go:build !containers_image_openpgp + +package signature + +// This is shared by mechanism_gpgme_only.go and mechanism_sequoia.go; in both situations +// newGPGSigningMechanismInDirectory is implemented using GPGME. + +import ( + "bytes" + "errors" + "fmt" + "os" + + "github.com/proglottis/gpgme" + "go.podman.io/image/v5/signature/internal" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGMESigningMechanism returns a new GPG/OpenPGP signing mechanism for ctx. +// The caller must call .Close() on the returned SigningMechanism; if ephemeralDir is set, +// the .Close() call will remove its contents. +func newGPGMESigningMechanism(ctx *gpgme.Context, ephemeralDir string) signingMechanismWithPassphrase { + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: ephemeralDir, + } +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return newGPGMESigningMechanism(ctx, ""), nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity. +// For mechanisms created using NewEphemeralGPGSigningMechanism, the returned key identity +// is expected to be one of the values returned by NewEphemeralGPGSigningMechanism, +// or the mechanism should implement signingMechanismWithVerificationIdentityLookup. +func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))) + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig)) + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// keyIdentityForVerificationKeyIdentity re-checks the key identity returned by Verify +// if it doesn't match an identity returned by NewEphemeralGPGSigningMechanism, trying to match it. +// (To be more specific, for mechanisms which return a subkey fingerprint from Verify, +// this converts the subkey fingerprint into the corresponding primary key fingerprint.) +func (m *gpgmeSigningMechanism) keyIdentityForVerificationKeyIdentity(keyIdentity string) (string, error) { + // In theory, if keyIdentity refers to a subkey, the same subkey could be attached to different primary keys; + // in that case, GetKey fails with “ambiguous name”. + // We _could_ handle that, by using KeyList* (GetKey is internally just a helper for KeyList*), but sharing + // a subkey that way is very unexpected, so, for now, prefer the much simpler implementation. + key, err := m.ctx.GetKey(keyIdentity, false) + if err != nil { + return "", err + } + // In theory this value could be nil if (gpg --list-keys --with-colons) misses a "pub:" line + // or a "fpr:" line, but gpg (in recent enough versions) prints that unconditionally. // codespell:ignore fpr + return key.Fingerprint(), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme_only.go b/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme_only.go new file mode 100644 index 000000000..0f971ac6a --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/mechanism_gpgme_only.go @@ -0,0 +1,64 @@ +//go:build !containers_image_openpgp && !containers_image_sequoia + +package signature + +import ( + "os" + + "github.com/proglottis/gpgme" +) + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blobs, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := importKeysFromBytes(ctx, blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + mech := newGPGMESigningMechanism(ctx, dir) + removeDir = false + return mech, keyIdentities, nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func importKeysFromBytes(ctx *gpgme.Context, blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/mechanism_openpgp.go b/tools/vendor/go.podman.io/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 000000000..2f1b99d18 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,182 @@ +//go:build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "go.podman.io/image/v5/signature/internal" + "go.podman.io/storage/pkg/homedir" + + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in go.podman.io/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in go.podman.io/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity. +// For mechanisms created using NewEphemeralGPGSigningMechanism, the returned key identity +// is expected to be one of the values returned by NewEphemeralGPGSigningMechanism, +// or the mechanism should implement signingMechanismWithVerificationIdentityLookup. +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId)) + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry)) + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set") + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.Entity.PrimaryKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/mechanism_sequoia.go b/tools/vendor/go.podman.io/image/v5/signature/mechanism_sequoia.go new file mode 100644 index 000000000..0a6f002f2 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/mechanism_sequoia.go @@ -0,0 +1,84 @@ +//go:build containers_image_sequoia + +package signature + +import ( + "go.podman.io/image/v5/signature/internal/sequoia" +) + +// A GPG/OpenPGP signing mechanism, implemented using Sequoia and only supporting verification. +// Legacy users who reach newGPGSigningMechanismInDirectory will use GPGME. +// Signing using Sequoia is preferable, but should happen via signature/simplesequoia.NewSigner, not using +// the legacy mechanism API. +type sequoiaEphemeralSigningMechanism struct { + inner *sequoia.SigningMechanism +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blobs, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + if err := sequoia.Init(); err != nil { + return nil, nil, err // Coverage: This is impractical to test in-process, with the static go_sequoia_dlhandle. + } + + mech, err := sequoia.NewEphemeralMechanism() + if err != nil { + return nil, nil, err + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := mech.ImportKeys(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + return &sequoiaEphemeralSigningMechanism{ + inner: mech, + }, keyIdentities, nil +} + +func (m *sequoiaEphemeralSigningMechanism) Close() error { + return m.inner.Close() +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *sequoiaEphemeralSigningMechanism) SupportsSigning() error { + // This code is externally reachable via NewEphemeralGPGSigningMechanism(), but that API provides no way to + // import or generate a key. + return SigningNotSupportedError("caller error: Attempt to sign using a mechanism created via NewEphemeralGPGSigningMechanism().") +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *sequoiaEphemeralSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + // This code is externally reachable via NewEphemeralGPGSigningMechanism(), but that API provides no way to + // import or generate a key. + return nil, SigningNotSupportedError("caller error: Attempt to sign using a mechanism created via NewEphemeralGPGSigningMechanism().") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *sequoiaEphemeralSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity. +// For mechanisms created using NewEphemeralGPGSigningMechanism, the returned key identity +// is expected to be one of the values returned by NewEphemeralGPGSigningMechanism, +// or the mechanism should implement signingMechanismWithVerificationIdentityLookup. +func (m *sequoiaEphemeralSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + return m.inner.Verify(unverifiedSignature) +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *sequoiaEphemeralSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/pki_cert.go b/tools/vendor/go.podman.io/image/v5/signature/pki_cert.go new file mode 100644 index 000000000..6e3a40b79 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/pki_cert.go @@ -0,0 +1,74 @@ +package signature + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "slices" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "go.podman.io/image/v5/signature/internal" +) + +type pkiTrustRoot struct { + caRootsCertificates *x509.CertPool + caIntermediateCertificates *x509.CertPool + subjectEmail string + subjectHostname string +} + +func (p *pkiTrustRoot) validate() error { + if p.subjectEmail == "" && p.subjectHostname == "" { + return errors.New("Internal inconsistency: PKI use set up without subject email or subject hostname") + } + return nil +} + +func verifyPKI(pkiTrustRoot *pkiTrustRoot, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + var untrustedIntermediatePool *x509.CertPool + if pkiTrustRoot.caIntermediateCertificates != nil { + untrustedIntermediatePool = pkiTrustRoot.caIntermediateCertificates.Clone() + } else { + untrustedIntermediatePool = x509.NewCertPool() + } + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: pkiTrustRoot.caRootsCertificates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + if pkiTrustRoot.subjectEmail != "" { + if !slices.Contains(untrustedCertificate.EmailAddresses, pkiTrustRoot.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + pkiTrustRoot.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + } + if pkiTrustRoot.subjectHostname != "" { + if err = untrustedCertificate.VerifyHostname(pkiTrustRoot.subjectHostname); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected subject hostname: %v", err)) + } + } + + return untrustedCertificate.PublicKey, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_config.go b/tools/vendor/go.podman.io/image/v5/signature/policy_config.go new file mode 100644 index 000000000..50f445148 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_config.go @@ -0,0 +1,811 @@ +// policy_config.go handles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/signature/internal" + "go.podman.io/image/v5/transports" + "go.podman.io/image/v5/types" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/homedir" + "go.podman.io/storage/pkg/regexp" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X go.podman.io/image/v5/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// userPolicyFile is the path to the per user policy path. +var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + policyPath, err := defaultPolicyPath(sys) + if err != nil { + return nil, err + } + return NewPolicyFromFile(policyPath) +} + +// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing. +func defaultPolicyPath(sys *types.SystemContext) (string, error) { + policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath) + if err != nil { + return "", err + } + return policyFilePath, nil +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with artificial paths. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath, nil + } + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) + if err := fileutils.Exists(userPolicyFilePath); err == nil { + return userPolicyFilePath, nil + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil + } + if err := fileutils.Exists(systemPolicyPath); err == nil { + return systemPolicyPath, nil + } + return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath) +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := os.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // transport can be nil + transport := transports.Get(key) + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + case prTypeSigstoreSigned: + res = &prSigstoreSigned{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType)) + } + keySources := 0 + if keyPath != "" { + keySources++ + } + if keyPaths != nil { + keySources++ + } + if keyData != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyPaths: keyPaths, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type. +func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity) +} + +// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths +func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", nil, keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && !gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && !gotKeyData: + return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present") + default: + return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + case prmTypeRemapIdentity: + res = &prmRemapIdentity{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it returns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it returns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it returns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} + +// Private objects for validateIdentityRemappingPrefix +var ( + // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) + remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") + // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; + // we need this because reference.NameRegexp accepts short names with docker.io implied. + remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/") + // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) + remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$") +) + +// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid +// for the Prefix or SignedPrefix values of prmRemapIdentity. +// Note that it may not recognize _all_ invalid values. +func validateIdentityRemappingPrefix(s string) error { + if remapIdentityDomainRegexp.MatchString(s) || + (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) { + // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference + // does not provide an API for the short vs. long name logic. + // It will either not match, or fail in the ParseNamed call of + // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix. + return nil + } + return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s)) +} + +// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type. +func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) { + if err := validateIdentityRemappingPrefix(prefix); err != nil { + return nil, err + } + if err := validateIdentityRemappingPrefix(signedPrefix); err != nil { + return nil, err + } + return &prmRemapIdentity{ + prmCommon: prmCommon{Type: prmTypeRemapIdentity}, + Prefix: prefix, + SignedPrefix: signedPrefix, + }, nil +} + +// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch. +func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) { + return newPRMRemapIdentity(prefix, signedPrefix) +} + +// Compile-time check that prmRemapIdentity implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmRemapIdentity)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { + *prm = prmRemapIdentity{} + var tmp prmRemapIdentity + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "prefix": &tmp.Prefix, + "signedPrefix": &tmp.SignedPrefix, + }); err != nil { + return err + } + + if tmp.Type != prmTypeRemapIdentity { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_config_sigstore.go b/tools/vendor/go.podman.io/image/v5/signature/policy_config_sigstore.go new file mode 100644 index 000000000..87fb45558 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_config_sigstore.go @@ -0,0 +1,630 @@ +package signature + +import ( + "encoding/json" + "fmt" + + "go.podman.io/image/v5/signature/internal" +) + +// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned +type PRSigstoreSignedOption func(*prSigstoreSigned) error + +// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPath != "" { + return InvalidPolicyFormatError(`"keyPath" already specified`) + } + pr.KeyPath = keyPath + return nil + } +} + +// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPaths != nil { + return InvalidPolicyFormatError(`"keyPaths" already specified`) + } + if len(keyPaths) == 0 { + return InvalidPolicyFormatError(`"keyPaths" contains no entries`) + } + pr.KeyPaths = keyPaths + return nil + } +} + +// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyData != nil { + return InvalidPolicyFormatError(`"keyData" already specified`) + } + pr.KeyData = keyData + return nil + } +} + +// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyDatas != nil { + return InvalidPolicyFormatError(`"keyDatas" already specified`) + } + if len(keyDatas) == 0 { + return InvalidPolicyFormatError(`"keyDatas" contains no entries`) + } + pr.KeyDatas = keyDatas + return nil + } +} + +// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.Fulcio != nil { + return InvalidPolicyFormatError(`"fulcio" already specified`) + } + pr.Fulcio = fulcio + return nil + } +} + +// PRSigstoreSignedWithPKI specifies a value for the "pki" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithPKI(p PRSigstoreSignedPKI) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.PKI != nil { + return InvalidPolicyFormatError(`"pki" already specified`) + } + pr.PKI = p + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPath != "" { + return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`) + } + pr.RekorPublicKeyPath = rekorPublicKeyPath + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPaths != nil { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`) + } + if len(rekorPublickeyPaths) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`) + } + pr.RekorPublicKeyPaths = rekorPublickeyPaths + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyData != nil { + return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`) + } + pr.RekorPublicKeyData = rekorPublicKeyData + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyDatas != nil { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`) + } + if len(rekorPublickeyDatas) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`) + } + pr.RekorPublicKeyDatas = rekorPublickeyDatas + return nil + } +} + +// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.SignedIdentity != nil { + return InvalidPolicyFormatError(`"signedIdentity" already specified`) + } + pr.SignedIdentity = signedIdentity + return nil + } +} + +// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type. +func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) { + res := prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + } + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + keySources := 0 + if res.KeyPath != "" { + keySources++ + } + if res.KeyPaths != nil { + keySources++ + } + if res.KeyData != nil { + keySources++ + } + if res.KeyDatas != nil { + keySources++ + } + if res.Fulcio != nil { + keySources++ + } + if res.PKI != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas, fulcio, and pki must be specified") + } + + rekorSources := 0 + if res.RekorPublicKeyPath != "" { + rekorSources++ + } + if res.RekorPublicKeyPaths != nil { + rekorSources++ + } + if res.RekorPublicKeyData != nil { + rekorSources++ + } + if res.RekorPublicKeyDatas != nil { + rekorSources++ + } + if rekorSources > 1 { + return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously") + } + if res.Fulcio != nil && rekorSources == 0 { + return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") + } + if res.PKI != nil && rekorSources > 0 { + return nil, InvalidPolicyFormatError("rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas are not supported for pki") + } + + if res.SignedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + + return &res, nil +} + +// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options. +func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) { + return newPRSigstoreSigned(options...) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyPath(keyPath), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyData(keyData), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio, gotPKI bool + var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool + var fulcio prSigstoreSignedFulcio + var pki prSigstoreSignedPKI + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "keyDatas": + gotKeyDatas = true + return &tmp.KeyDatas + case "fulcio": + gotFulcio = true + return &fulcio + case "rekorPublicKeyPath": + gotRekorPublicKeyPath = true + return &tmp.RekorPublicKeyPath + case "rekorPublicKeyPaths": + gotRekorPublicKeyPaths = true + return &tmp.RekorPublicKeyPaths + case "rekorPublicKeyData": + gotRekorPublicKeyData = true + return &tmp.RekorPublicKeyData + case "rekorPublicKeyDatas": + gotRekorPublicKeyDatas = true + return &tmp.RekorPublicKeyDatas + case "pki": + gotPKI = true + return &pki + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var opts []PRSigstoreSignedOption + if gotKeyPath { + opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) + } + if gotKeyPaths { + opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths)) + } + if gotKeyData { + opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) + } + if gotKeyDatas { + opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas)) + } + if gotFulcio { + opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) + } + if gotRekorPublicKeyPath { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) + } + if gotRekorPublicKeyPaths { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths)) + } + if gotRekorPublicKeyData { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) + } + if gotRekorPublicKeyDatas { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) + } + if gotPKI { + opts = append(opts, PRSigstoreSignedWithPKI(&pki)) + } + opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) + + res, err := newPRSigstoreSigned(opts...) + if err != nil { + return err + } + *pr = *res + return nil +} + +// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio +type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error + +// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAPath != "" { + return InvalidPolicyFormatError(`"caPath" already specified`) + } + f.CAPath = caPath + return nil + } +} + +// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAData != nil { + return InvalidPolicyFormatError(`"caData" already specified`) + } + f.CAData = caData + return nil + } +} + +// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.OIDCIssuer != "" { + return InvalidPolicyFormatError(`"oidcIssuer" already specified`) + } + f.OIDCIssuer = oidcIssuer + return nil + } +} + +// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + f.SubjectEmail = subjectEmail + return nil + } +} + +// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type +func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) { + res := prSigstoreSignedFulcio{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CAPath != "" && res.CAData != nil { + return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously") + } + if res.CAPath == "" && res.CAData == nil { + return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified") + } + if res.OIDCIssuer == "" { + return nil, InvalidPolicyFormatError("oidcIssuer not specified") + } + if res.SubjectEmail == "" { + return nil, InvalidPolicyFormatError("subjectEmail not specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options. +func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) { + return newPRSigstoreSignedFulcio(options...) +} + +// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil) + +func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { + *f = prSigstoreSignedFulcio{} + var tmp prSigstoreSignedFulcio + var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caPath": + gotCAPath = true + return &tmp.CAPath + case "caData": + gotCAData = true + return &tmp.CAData + case "oidcIssuer": + gotOIDCIssuer = true + return &tmp.OIDCIssuer + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedFulcioOption + if gotCAPath { + opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath)) + } + if gotCAData { + opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData)) + } + if gotOIDCIssuer { + opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail)) + } + + res, err := newPRSigstoreSignedFulcio(opts...) + if err != nil { + return err + } + + *f = *res + return nil +} + +// PRSigstoreSignedPKIOption is a way to pass values to NewPRSigstoreSignedPKI +type PRSigstoreSignedPKIOption func(*prSigstoreSignedPKI) error + +// PRSigstoreSignedPKIWithCARootsPath specifies a value for the "caRootsPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsPath(caRootsPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsPath != "" { + return InvalidPolicyFormatError(`"caRootsPath" already specified`) + } + p.CARootsPath = caRootsPath + return nil + } +} + +// PRSigstoreSignedPKIWithCARootsData specifies a value for the "caRootsData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsData(caRootsData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsData != nil { + return InvalidPolicyFormatError(`"caRootsData" already specified`) + } + p.CARootsData = caRootsData + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesPath specifies a value for the "caIntermediatesPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesPath(caIntermediatesPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesPath != "" { + return InvalidPolicyFormatError(`"caIntermediatesPath" already specified`) + } + p.CAIntermediatesPath = caIntermediatesPath + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesData specifies a value for the "caIntermediatesData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesData(caIntermediatesData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesData != nil { + return InvalidPolicyFormatError(`"caIntermediatesData" already specified`) + } + p.CAIntermediatesData = caIntermediatesData + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectEmail(subjectEmail string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + p.SubjectEmail = subjectEmail + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectHostname specifies a value for the "subjectHostname" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectHostname(subjectHostname string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectHostname != "" { + return InvalidPolicyFormatError(`"subjectHostname" already specified`) + } + p.SubjectHostname = subjectHostname + return nil + } +} + +// newPRSigstoreSignedPKI is NewPRSigstoreSignedPKI, except it returns the private type +func newPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (*prSigstoreSignedPKI, error) { + res := prSigstoreSignedPKI{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CARootsPath != "" && res.CARootsData != nil { + return nil, InvalidPolicyFormatError("caRootsPath and caRootsData cannot be used simultaneously") + } + if res.CARootsPath == "" && res.CARootsData == nil { + return nil, InvalidPolicyFormatError("At least one of caRootsPath and caRootsData must be specified") + } + + if res.CAIntermediatesPath != "" && res.CAIntermediatesData != nil { + return nil, InvalidPolicyFormatError("caIntermediatesPath and caIntermediatesData cannot be used simultaneously") + } + + if res.SubjectEmail == "" && res.SubjectHostname == "" { + return nil, InvalidPolicyFormatError("At least one of subjectEmail, subjectHostname must be specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedPKI returns a PRSigstoreSignedPKI based on options. +func NewPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (PRSigstoreSignedPKI, error) { + return newPRSigstoreSignedPKI(options...) +} + +// Compile-time check that prSigstoreSignedPKI implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedPKI)(nil) + +func (p *prSigstoreSignedPKI) UnmarshalJSON(data []byte) error { + *p = prSigstoreSignedPKI{} + var tmp prSigstoreSignedPKI + var gotCARootsPath, gotCARootsData, gotCAIntermediatesPath, gotCAIntermediatesData, gotSubjectEmail, gotSubjectHostname bool + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caRootsPath": + gotCARootsPath = true + return &tmp.CARootsPath + case "caRootsData": + gotCARootsData = true + return &tmp.CARootsData + case "caIntermediatesPath": + gotCAIntermediatesPath = true + return &tmp.CAIntermediatesPath + case "caIntermediatesData": + gotCAIntermediatesData = true + return &tmp.CAIntermediatesData + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + case "subjectHostname": + gotSubjectHostname = true + return &tmp.SubjectHostname + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedPKIOption + if gotCARootsPath { + opts = append(opts, PRSigstoreSignedPKIWithCARootsPath(tmp.CARootsPath)) + } + if gotCARootsData { + opts = append(opts, PRSigstoreSignedPKIWithCARootsData(tmp.CARootsData)) + } + if gotCAIntermediatesPath { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesPath(tmp.CAIntermediatesPath)) + } + if gotCAIntermediatesData { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesData(tmp.CAIntermediatesData)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedPKIWithSubjectEmail(tmp.SubjectEmail)) + } + if gotSubjectHostname { + opts = append(opts, PRSigstoreSignedPKIWithSubjectHostname(tmp.SubjectHostname)) + } + + res, err := newPRSigstoreSignedPKI(opts...) + if err != nil { + return err + } + + *p = *res + return nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_eval.go b/tools/vendor/go.podman.io/image/v5/signature/policy_eval.go new file mode 100644 index 000000000..2d0db05ae --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_eval.go @@ -0,0 +1,293 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the functions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/unparsedimage" + "go.podman.io/image/v5/types" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for further processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport %q policy section %q`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport %q policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for further processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!) + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_eval_baselayer.go b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 000000000..f310342d1 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.podman.io/image/v5/internal/private" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_eval_signedby.go b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 000000000..21ed59494 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,116 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "errors" + "fmt" + + digest "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/internal/multierr" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/manifest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType)) + } + + // FIXME: move this to per-context initialization + const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified` + data, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: notOneSourceErrorText, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + }) + if err != nil { + return sarRejected, nil, err + } + if data == nil { + return sarRejected, nil, errors.New(notOneSourceErrorText) + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, _, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + acceptedKeyIdentities: trustedIdentities, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME: Use image.UntrustedSignatures, use that to improve error messages + // (needs tests!) + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_eval_sigstore.go b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_sigstore.go new file mode 100644 index 000000000..cee04dc4e --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_sigstore.go @@ -0,0 +1,435 @@ +// Policy evaluation for prSigstoreSigned. + +package signature + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" + "fmt" + "os" + "strings" + + digest "github.com/opencontainers/go-digest" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "go.podman.io/image/v5/internal/multierr" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/signature/internal" +) + +// configBytesSources contains configuration fields which may result in one or more []byte values +type configBytesSources struct { + inconsistencyErrorMessage string // Error to return if more than one source is set + path string // …Path: a path to a file containing the data, or "" + paths []string // …Paths: paths to files containing the data, or nil + data []byte // …Data: a single instance ofhe raw data, or nil + datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas +} + +// loadBytesFromConfigSources ensures at most one of the sources in src is set, +// and returns the referenced data, or nil if neither is set. +func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) { + sources := 0 + var data [][]byte // = nil + if src.path != "" { + sources++ + d, err := os.ReadFile(src.path) + if err != nil { + return nil, err + } + data = [][]byte{d} + } + if src.paths != nil { + sources++ + data = [][]byte{} + for _, path := range src.paths { + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + data = append(data, d) + } + } + if src.data != nil { + sources++ + data = [][]byte{src.data} + } + if src.datas != nil { // codespell:ignore datas + sources++ + data = src.datas // codespell:ignore datas + } + if sources > 1 { + return nil, errors.New(src.inconsistencyErrorMessage) + } + return data, nil +} + +// prepareTrustRoot creates a fulcioTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) +func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { + caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`, + path: f.CAPath, + data: f.CAData, + }) + if err != nil { + return nil, err + } + if len(caCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`) + } + certs := x509.NewCertPool() + if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok { + return nil, errors.New("error loading Fulcio CA certificates") + } + fulcio := fulcioTrustRoot{ + caCertificates: certs, + oidcIssuer: f.OIDCIssuer, + subjectEmail: f.SubjectEmail, + } + if err := fulcio.validate(); err != nil { + return nil, err + } + return &fulcio, nil +} + +// prepareTrustRoot creates a pkiTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) +func (p *prSigstoreSignedPKI) prepareTrustRoot() (*pkiTrustRoot, error) { + caRootsCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caRootsPath" and "caRootsData" specified`, + path: p.CARootsPath, + data: p.CARootsData, + }) + if err != nil { + return nil, err + } + if len(caRootsCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with not exactly one of "caRootsPath" nor "caRootsData"`) + } + rootsCerts := x509.NewCertPool() + if ok := rootsCerts.AppendCertsFromPEM(caRootsCertPEMs[0]); !ok { + return nil, errors.New("error loading PKI CA Roots certificates") + } + pki := pkiTrustRoot{ + caRootsCertificates: rootsCerts, + subjectEmail: p.SubjectEmail, + subjectHostname: p.SubjectHostname, + } + caIntermediateCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caIntermediatesPath" and "caIntermediatesData" specified`, + path: p.CAIntermediatesPath, + data: p.CAIntermediatesData, + }) + if err != nil { + return nil, err + } + if caIntermediateCertPEMs != nil { + if len(caIntermediateCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with invalid value from "caIntermediatesPath" or "caIntermediatesData"`) + } + intermediatePool := x509.NewCertPool() + trustedIntermediates, err := cryptoutils.UnmarshalCertificatesFromPEM(caIntermediateCertPEMs[0]) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading trusted intermediate certificates: %v", err)) + } + for _, trustedIntermediateCert := range trustedIntermediates { + intermediatePool.AddCert(trustedIntermediateCert) + } + pki.caIntermediateCertificates = intermediatePool + } + + if err := pki.validate(); err != nil { + return nil, err + } + return &pki, nil +} + +// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy +type sigstoreSignedTrustRoot struct { + publicKeys []crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKeys []*ecdsa.PublicKey + pki *pkiTrustRoot +} + +func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { + res := sigstoreSignedTrustRoot{} + + publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + datas: pr.KeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if publicKeyPEMs != nil { + for index, keyData := range publicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData) + if err != nil { + return nil, fmt.Errorf("parsing public key %d: %w", index+1, err) + } + res.publicKeys = append(res.publicKeys, pk) + } + if len(res.publicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`) + } + } + + if pr.Fulcio != nil { + f, err := pr.Fulcio.prepareTrustRoot() + if err != nil { + return nil, err + } + res.fulcio = f + } + + rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`, + path: pr.RekorPublicKeyPath, + paths: pr.RekorPublicKeyPaths, + data: pr.RekorPublicKeyData, + datas: pr.RekorPublicKeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if rekorPublicKeyPEMs != nil { + for index, pem := range rekorPublicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1) + + } + res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA) + } + if len(res.rekorPublicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`) + } + } + + if pr.PKI != nil { + p, err := pr.PKI.prepareTrustRoot() + if err != nil { + return nil, err + } + res.pki = p + } + + return &res, nil +} + +func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // We don’t know of a single user of this API, and we might return unexpected values in Signature. + // For now, just punt. + return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore") +} + +func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { + // FIXME: move this to per-context initialization + trustRoot, err := pr.prepareTrustRoot() + if err != nil { + return sarRejected, err + } + + untrustedAnnotations := sig.UntrustedAnnotations() + untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) + } + untrustedPayload := sig.UntrustedPayload() + + keySources := 0 + if trustRoot.publicKeys != nil { + keySources++ + } + if trustRoot.fulcio != nil { + keySources++ + } + if trustRoot.pki != nil { + keySources++ + } + + var publicKeys []crypto.PublicKey + switch { + case keySources > 1: // newPRSigstoreSigned rejects more than one key sources. + return sarRejected, errors.New("Internal inconsistency: More than one of public key, Fulcio, or PKI specified") + case keySources == 0: // newPRSigstoreSigned rejects empty key sources. + return sarRejected, errors.New("Internal inconsistency: A public key, Fulcio, or PKI must be specified.") + case trustRoot.publicKeys != nil: + if trustRoot.rekorPublicKeys != nil { + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + + var rekorFailures []string + for _, candidatePublicKey := range trustRoot.publicKeys { + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + } + // We don’t care about the Rekor timestamp, just about log presence. + _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload) + if err == nil { + publicKeys = append(publicKeys, candidatePublicKey) + break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate + } + rekorFailures = append(rekorFailures, err.Error()) + } + if len(publicKeys) == 0 { + if len(rekorFailures) == 0 { + // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure. + return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`) + } + return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", "))) + } + } else { + publicKeys = trustRoot.publicKeys + } + + case trustRoot.fulcio != nil: + if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") + } + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio, + []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + + case trustRoot.pki != nil: + if trustRoot.rekorPublicKeys != nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: PKI specified with a Rekor public key") + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyPKI(trustRoot.pki, []byte(untrustedCert), untrustedIntermediateChainBytes) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + } + + if len(publicKeys) == 0 { + // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set, + // and we have already excluded the possibility in the switch above. + return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") + } + signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + ValidateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + ValidateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, err + } + if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values + return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen. + } + + return sarAccepted, nil +} + +func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + sigs, err := image.UntrustedSignatures(ctx) + if err != nil { + return false, err + } + var rejections []error + foundNonSigstoreSignatures := 0 + foundSigstoreNonAttachments := 0 + for _, s := range sigs { + sigstoreSig, ok := s.(signature.Sigstore) + if !ok { + foundNonSigstoreSignatures++ + continue + } + if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType { + foundSigstoreNonAttachments++ + continue + } + + var reason error + switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 { + // A nice message for the most common case. + summary = PolicyRequirementError("A signature was required, but no signature exists") + } else { + summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)", + foundNonSigstoreSignatures, foundSigstoreNonAttachments)) + } + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_eval_simple.go b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_simple.go new file mode 100644 index 000000000..4ef35e3ad --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/transports" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_paths_common.go b/tools/vendor/go.podman.io/image/v5/signature/policy_paths_common.go new file mode 100644 index 000000000..038351cb7 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_paths_common.go @@ -0,0 +1,7 @@ +//go:build !freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_paths_freebsd.go b/tools/vendor/go.podman.io/image/v5/signature/policy_paths_freebsd.go new file mode 100644 index 000000000..6a45a78fa --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_paths_freebsd.go @@ -0,0 +1,7 @@ +//go:build freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json" diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_reference_match.go b/tools/vendor/go.podman.io/image/v5/signature/policy_reference_match.go new file mode 100644 index 000000000..0755c0399 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_reference_match.go @@ -0,0 +1,154 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + "strings" + + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/private" + "go.podman.io/image/v5/transports" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference +// using reference.Named values. +func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool { + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} + +func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// refMatchesPrefix returns true if ref matches prm.Prefix. +func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool { + name := ref.Name() + switch { + case len(name) < len(prm.Prefix): + return false + case len(name) == len(prm.Prefix): + return name == prm.Prefix + case len(name) > len(prm.Prefix): + // We are matching only ref.Name(), not ref.String(), so the only separator we are + // expecting is '/': + // - '@' is only valid to separate a digest, i.e. not a part of ref.Name() + // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a + // host:port domain syntax, but we don't treat that specially and require an exact match + // of the domain. + return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix +// or the original ref if it does not. +func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) { + if !prm.refMatchesPrefix(ref) { + return ref, nil + } + refString := ref.String() + newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err) + } + return newParsedRef, nil +} + +func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + intended, err = prm.remapReferencePrefix(intended) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/policy_types.go b/tools/vendor/go.podman.io/image/v5/signature/policy_types.go new file mode 100644 index 000000000..2d107ed45 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/policy_types.go @@ -0,0 +1,253 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" + prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity +type prSigstoreSigned struct { + prCommon + + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyData []byte `json:"keyData,omitempty"` + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyDatas [][]byte `json:"keyDatas,omitempty"` + + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. + Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` + + // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"` + // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` + + // PKI specifies which PKI-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + PKI PRSigstoreSignedPKI `json:"pki,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + // Note that /usr/bin/cosign interoperability might require using repo-only matching. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement. +// This is a public type with a single private implementation. +type PRSigstoreSignedFulcio interface { + // toFulcioTrustRoot creates a fulcioTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) + prepareTrustRoot() (*fulcioTrustRoot, error) +} + +// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned +type prSigstoreSignedFulcio struct { + // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified. + CAPath string `json:"caPath,omitempty"` + // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified. + CAData []byte `json:"caData,omitempty"` + // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates. + OIDCIssuer string `json:"oidcIssuer,omitempty"` + // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates. + SubjectEmail string `json:"subjectEmail,omitempty"` +} + +// PRSigstoreSignedPKI contains PKI configuration options for a "sigstoreSigned" PolicyRequirement. +type PRSigstoreSignedPKI interface { + // prepareTrustRoot creates a pkiTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) + prepareTrustRoot() (*pkiTrustRoot, error) +} + +// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned +type prSigstoreSignedPKI struct { + // CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified. + CARootsPath string `json:"caRootsPath,omitempty"` + // CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified. + CARootsData []byte `json:"caRootsData,omitempty"` + // CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"` + // CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectEmail string `json:"subjectEmail,omitempty"` + // SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectHostname string `json:"subjectHostname,omitempty"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" + prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} + +// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact, +// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references. +type prmRemapIdentity struct { + prmCommon + Prefix string `json:"prefix"` + SignedPrefix string `json:"signedPrefix"` + // Possibly let the users make a choice for tag/digest matching behavior + // similar to prmMatchExact/prmMatchRepository? +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/signer/signer.go b/tools/vendor/go.podman.io/image/v5/signature/signer/signer.go new file mode 100644 index 000000000..0a70338aa --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/signer/signer.go @@ -0,0 +1,9 @@ +package signer + +import "go.podman.io/image/v5/internal/signer" + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// It can only be created from within the containers/image package; it can’t be implemented externally. +// +// The owner of a Signer must call Close() when done. +type Signer = signer.Signer diff --git a/tools/vendor/go.podman.io/image/v5/signature/sigstore/copied.go b/tools/vendor/go.podman.io/image/v5/signature/sigstore/copied.go new file mode 100644 index 000000000..2e510f60e --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/sigstore/copied.go @@ -0,0 +1,103 @@ +package sigstore + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" +) + +// The following code was copied from github.com/sigstore. +// FIXME: Eliminate that duplication. + +// Copyright 2021 The Sigstore Authors. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType. + cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType. + sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +// from sigstore/cosign/pkg/cosign.loadPrivateKey +// FIXME: Do we need all of these key formats? +func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { + // Decrypt first + p, _ := pem.Decode(key) + if p == nil { + return nil, errors.New("invalid pem block") + } + if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType { + return nil, fmt.Errorf("unsupported pem type: %s", p.Type) + } + + x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) + if err != nil { + return nil, fmt.Errorf("decrypt: %w", err) + } + + pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) + if err != nil { + return nil, fmt.Errorf("parsing private key: %w", err) + } + switch pk := pk.(type) { + case *rsa.PrivateKey: + return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256) + case *ecdsa.PrivateKey: + return signature.LoadECDSASignerVerifier(pk, crypto.SHA256) + case ed25519.PrivateKey: + return signature.LoadED25519SignerVerifier(pk) + default: + return nil, errors.New("unsupported key type") + } +} + +// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair +// loadPrivateKey always requires a encryption, so this always requires a passphrase. +func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) { + x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("x509 encoding private key: %w", err) + } + + encBytes, err := encrypted.Encrypt(x509Encoded, password) + if err != nil { + return nil, nil, err + } + + // store in PEM format + privBytes := pem.EncodeToMemory(&pem.Block{ + Bytes: encBytes, + // Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types, + // but a version of cosign that can accept them has not yet been released. + Type: cosignPrivateKeyPemType, + }) + + // Now do the public key + pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return nil, nil, err + } + + return privBytes, pubBytes, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/sigstore/generate.go b/tools/vendor/go.podman.io/image/v5/signature/sigstore/generate.go new file mode 100644 index 000000000..77520c123 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/sigstore/generate.go @@ -0,0 +1,35 @@ +package sigstore + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller. +type GenerateKeyPairResult struct { + PublicKey []byte + PrivateKey []byte +} + +// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format, +// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase). +// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API, +// and can change with best practices over time. +func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) { + // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes + // only requires ECDSA-P256 to be supported, so that’s what we must use. + rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + // Coverage: This can fail only if the randomness source fails + return nil, err + } + private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase) + if err != nil { + return nil, err + } + return &GenerateKeyPairResult{ + PublicKey: public, + PrivateKey: private, + }, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/sigstore/internal/signer.go b/tools/vendor/go.podman.io/image/v5/signature/sigstore/internal/signer.go new file mode 100644 index 000000000..5a4c244bd --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/sigstore/internal/signer.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/internal/signature" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/signature/internal" +) + +type Option func(*SigstoreSigner) error + +// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures. +// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the +// dependency impact. +type SigstoreSigner struct { + PrivateKey sigstoreSignature.Signer // May be nil during initialization + SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey + + // Fulcio results to include + FulcioGeneratedCertificate []byte // Or nil + FulcioGeneratedCertificateChain []byte // Or nil + + // Rekor state + RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *SigstoreSigner) ProgressMessage() string { + return "Signing image using a sigstore signature" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) { + if s.PrivateKey == nil { + return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner") + } + + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return nil, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + var rekorSETBytes []byte // = nil + if s.RekorUploader != nil { + set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes) + if err != nil { + return nil, err + } + rekorSETBytes = set + } + + annotations := map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + } + if s.FulcioGeneratedCertificate != nil { + annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate) + } + if s.FulcioGeneratedCertificateChain != nil { + annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain) + } + if rekorSETBytes != nil { + annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes) + } + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil +} + +func (s *SigstoreSigner) Close() error { + return nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/sigstore/signer.go b/tools/vendor/go.podman.io/image/v5/signature/sigstore/signer.go new file mode 100644 index 000000000..80ebfb2a5 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/sigstore/signer.go @@ -0,0 +1,60 @@ +package sigstore + +import ( + "errors" + "fmt" + "os" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + internalSigner "go.podman.io/image/v5/internal/signer" + "go.podman.io/image/v5/signature/signer" + "go.podman.io/image/v5/signature/sigstore/internal" +) + +type Option = internal.Option + +func WithPrivateKeyFile(file string, passphrase []byte) Option { + return func(s *internal.SigstoreSigner) error { + if s.PrivateKey != nil { + return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures") + } + + if passphrase == nil { + return errors.New("private key passphrase not provided") + } + + privateKeyPEM, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("reading private key from %s: %w", file, err) + } + signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return fmt.Errorf("initializing private key: %w", err) + } + publicKey, err := signerVerifier.PublicKey() + if err != nil { + return fmt.Errorf("getting public key from private key: %w", err) + } + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return fmt.Errorf("converting public key to PEM: %w", err) + } + s.PrivateKey = signerVerifier + s.SigningKeyOrCert = publicKeyPEM + return nil + } +} + +func NewSigner(opts ...Option) (*signer.Signer, error) { + s := internal.SigstoreSigner{} + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.PrivateKey == nil { + return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures") + } + + return internalSigner.NewSigner(&s), nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/simple.go b/tools/vendor/go.podman.io/image/v5/signature/simple.go new file mode 100644 index 000000000..8711f6908 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/simple.go @@ -0,0 +1,297 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "slices" + "time" + + digest "github.com/opencontainers/go-digest" + "go.podman.io/image/v5/signature/internal" + "go.podman.io/image/v5/version" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError = internal.InvalidSignatureError + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler +var _ json.Marshaler = untrustedSignature{} +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. +// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + acceptedKeyIdentities []string + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components +// match expected values, both as specified by rules. +// Returns the signature, and an identity of the key that signed it. +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, string, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, "", err + } + if !slices.Contains(rules.acceptedKeyIdentities, keyIdentity) { + withLookup, ok := mech.(signingMechanismWithVerificationIdentityLookup) + if !ok { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("signature by key %s is not accepted", keyIdentity)) + } + + primaryKey, err := withLookup.keyIdentityForVerificationKeyIdentity(keyIdentity) + if err != nil { + // Coverage: This only fails if lookup by keyIdentity fails, but we just found and used that key. + // Or maybe on some unexpected I/O error. + return nil, "", err + } + if !slices.Contains(rules.acceptedKeyIdentities, primaryKey) { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("signature by key %s of %s is not accepted", keyIdentity, primaryKey)) + } + keyIdentity = primaryKey + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, "", internal.NewInvalidSignatureError(err.Error()) + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil { + return nil, "", err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil { + return nil, "", err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest, + DockerReference: unmatchedSignature.untrustedDockerReference, + }, keyIdentity, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventually do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.untrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/tools/vendor/go.podman.io/image/v5/signature/simplesigning/signer.go b/tools/vendor/go.podman.io/image/v5/signature/simplesigning/signer.go new file mode 100644 index 000000000..038a255c5 --- /dev/null +++ b/tools/vendor/go.podman.io/image/v5/signature/simplesigning/signer.go @@ -0,0 +1,105 @@ +package simplesigning + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.podman.io/image/v5/docker/reference" + internalSig "go.podman.io/image/v5/internal/signature" + internalSigner "go.podman.io/image/v5/internal/signer" + "go.podman.io/image/v5/signature" + "go.podman.io/image/v5/signature/signer" +) + +// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures. +type simpleSigner struct { + mech signature.SigningMechanism + keyFingerprint string + passphrase string // "" if not provided. +} + +type Option func(*simpleSigner) error + +// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint. +func WithKeyFingerprint(keyFingerprint string) Option { + return func(s *simpleSigner) error { + s.keyFingerprint = keyFingerprint + return nil + } +} + +// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key. +// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry. +func WithPassphrase(passphrase string) Option { + return func(s *simpleSigner) error { + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return errors.New("invalid passphrase: must not contain a line break") + } + s.passphrase = passphrase + return nil + } +} + +// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg). +// +// The set of options must identify a key to sign with, probably using a WithKeyFingerprint. +// +// The caller must call Close() on the returned Signer. +func NewSigner(opts ...Option) (*signer.Signer, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, fmt.Errorf("initializing GPG: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + mech.Close() + } + }() + if err := mech.SupportsSigning(); err != nil { + return nil, fmt.Errorf("Signing not supported: %w", err) + } + + s := simpleSigner{ + mech: mech, + } + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.keyFingerprint == "" { + return nil, errors.New("no key identity provided for simple signing") + } + // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that. + + succeeded = true + return internalSigner.NewSigner(&s), nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *simpleSigner) ProgressMessage() string { + return "Signing image using simple signing" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) { + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{ + Passphrase: s.passphrase, + }) + if err != nil { + return nil, err + } + return internalSig.SimpleSigningFromBlob(simpleSig), nil +} + +func (s *simpleSigner) Close() error { + return s.mech.Close() +} diff --git a/tools/vendor/github.com/containers/image/v5/transports/stub.go b/tools/vendor/go.podman.io/image/v5/transports/stub.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/transports/stub.go rename to tools/vendor/go.podman.io/image/v5/transports/stub.go index 2c186a90c..ae44fd428 100644 --- a/tools/vendor/github.com/containers/image/v5/transports/stub.go +++ b/tools/vendor/go.podman.io/image/v5/transports/stub.go @@ -3,7 +3,7 @@ package transports import ( "fmt" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/types" ) // stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. diff --git a/tools/vendor/github.com/containers/image/v5/transports/transports.go b/tools/vendor/go.podman.io/image/v5/transports/transports.go similarity index 93% rename from tools/vendor/github.com/containers/image/v5/transports/transports.go rename to tools/vendor/go.podman.io/image/v5/transports/transports.go index 834f33b48..04f05292b 100644 --- a/tools/vendor/github.com/containers/image/v5/transports/transports.go +++ b/tools/vendor/go.podman.io/image/v5/transports/transports.go @@ -5,8 +5,8 @@ import ( "sort" "sync" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/internal/set" + "go.podman.io/image/v5/types" ) // knownTransports is a registry of known ImageTransport instances. @@ -72,7 +72,7 @@ func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } -var deprecatedTransports = set.NewWithValues("atomic") +var deprecatedTransports = set.NewWithValues("atomic", "ostree") // ListNames returns a list of non deprecated transport names. // Deprecated transports can be used, but are not presented to users. diff --git a/tools/vendor/github.com/containers/image/v5/types/types.go b/tools/vendor/go.podman.io/image/v5/types/types.go similarity index 97% rename from tools/vendor/github.com/containers/image/v5/types/types.go rename to tools/vendor/go.podman.io/image/v5/types/types.go index 7d6097346..41f1a632e 100644 --- a/tools/vendor/github.com/containers/image/v5/types/types.go +++ b/tools/vendor/go.podman.io/image/v5/types/types.go @@ -3,12 +3,13 @@ package types import ( "context" "io" + "net/url" "time" - "github.com/containers/image/v5/docker/reference" - compression "github.com/containers/image/v5/pkg/compression/types" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" + "go.podman.io/image/v5/docker/reference" + compression "go.podman.io/image/v5/pkg/compression/types" ) // ImageTransport is a top-level namespace for ways to store/load an image. @@ -241,6 +242,7 @@ type BlobInfoCache interface { // // WARNING: Various methods which return an object identified by digest generally do not // validate that the returned data actually matches that digest; this is the caller’s responsibility. +// See the individual methods’ documentation for potentially more details. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. @@ -251,10 +253,17 @@ type ImageSource interface { // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + // + // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, + // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead + // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + // + // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents + // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. HasThreadSafeGetBlob() bool @@ -643,17 +652,22 @@ type SystemContext struct { // if true, a V1 ping attempt isn't done to give users a better error. Default is false. // Note that this field is used mainly to integrate containers/image into projectatomic/docker // in order to not break any existing docker's integration tests. + // Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect. DockerDisableV1Ping bool // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list DockerDisableDestSchema1MIMETypes bool // If true, the physical pull source of docker transport images logged as info level DockerLogMirrorChoice bool // Directory to use for OSTree temporary files + // + // Deprecated: The OSTree transport has been removed. OSTreeTmpDirPath string // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, // when the digest for a blob is unknown. DockerRegistryPushPrecomputeDigests bool + // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) + DockerProxyURL *url.URL // === docker/daemon.Transport overrides === // A directory containing a CA certificate (ending with ".crt"), diff --git a/tools/vendor/github.com/containers/image/v5/version/version.go b/tools/vendor/go.podman.io/image/v5/version/version.go similarity index 93% rename from tools/vendor/github.com/containers/image/v5/version/version.go rename to tools/vendor/go.podman.io/image/v5/version/version.go index 64e468725..e577735ee 100644 --- a/tools/vendor/github.com/containers/image/v5/version/version.go +++ b/tools/vendor/go.podman.io/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 32 + VersionMinor = 37 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/tools/vendor/github.com/containers/storage/AUTHORS b/tools/vendor/go.podman.io/storage/AUTHORS similarity index 100% rename from tools/vendor/github.com/containers/storage/AUTHORS rename to tools/vendor/go.podman.io/storage/AUTHORS diff --git a/tools/vendor/github.com/containers/storage/LICENSE b/tools/vendor/go.podman.io/storage/LICENSE similarity index 100% rename from tools/vendor/github.com/containers/storage/LICENSE rename to tools/vendor/go.podman.io/storage/LICENSE diff --git a/tools/vendor/github.com/containers/storage/NOTICE b/tools/vendor/go.podman.io/storage/NOTICE similarity index 100% rename from tools/vendor/github.com/containers/storage/NOTICE rename to tools/vendor/go.podman.io/storage/NOTICE diff --git a/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go new file mode 100644 index 000000000..4f340ae3c --- /dev/null +++ b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go @@ -0,0 +1,64 @@ +package rawfilelock + +import ( + "os" +) + +type LockType byte + +const ( + ReadLock LockType = iota + WriteLock +) + +type FileHandle = fileHandle + +// OpenLock opens a file for locking +// WARNING: This is the underlying file locking primitive of the OS; +// because closing FileHandle releases the lock, it is not suitable for use +// if there is any chance of two concurrent goroutines attempting to use the same lock. +// Most users should use the higher-level operations from internal/staging_lockfile or pkg/lockfile. +func OpenLock(path string, readOnly bool) (FileHandle, error) { + flags := os.O_CREATE + if readOnly { + flags |= os.O_RDONLY + } else { + flags |= os.O_RDWR + } + + fd, err := openHandle(path, flags) + if err == nil { + return fd, nil + } + + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + +// TryLockFile attempts to lock a file handle +func TryLockFile(fd FileHandle, lockType LockType) error { + return lockHandle(fd, lockType, true) +} + +// LockFile locks a file handle +func LockFile(fd FileHandle, lockType LockType) error { + return lockHandle(fd, lockType, false) +} + +// UnlockAndClose unlocks and closes a file handle +func UnlockAndCloseHandle(fd FileHandle) { + unlockAndCloseHandle(fd) +} + +// CloseHandle closes a file handle without unlocking +// +// WARNING: This is a last-resort function for error handling only! +// On Unix systems, closing a file descriptor automatically releases any locks, +// so "closing without unlocking" is impossible. This function will release +// the lock as a side effect of closing the file. +// +// This function should only be used in error paths where the lock state +// is already corrupted or when giving up on lock management entirely. +// Normal code should use UnlockAndCloseHandle instead. +func CloseHandle(fd FileHandle) { + closeHandle(fd) +} diff --git a/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go new file mode 100644 index 000000000..268554076 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go @@ -0,0 +1,49 @@ +//go:build !windows + +package rawfilelock + +import ( + "time" + + "golang.org/x/sys/unix" +) + +type fileHandle uintptr + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= unix.O_CLOEXEC + fd, err := unix.Open(path, mode, 0o644) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { + fType := unix.F_RDLCK + if lType != ReadLock { + fType = unix.F_WRLCK + } + lk := unix.Flock_t{ + Type: int16(fType), + Whence: int16(unix.SEEK_SET), + Start: 0, + Len: 0, + } + cmd := unix.F_SETLKW + if nonblocking { + cmd = unix.F_SETLK + } + for { + err := unix.FcntlFlock(uintptr(fd), cmd, &lk) + if err == nil || nonblocking { + return err + } + time.Sleep(10 * time.Millisecond) + } +} + +func unlockAndCloseHandle(fd fileHandle) { + unix.Close(int(fd)) +} + +func closeHandle(fd fileHandle) { + unix.Close(int(fd)) +} diff --git a/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go new file mode 100644 index 000000000..9c0d692f8 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go @@ -0,0 +1,48 @@ +//go:build windows + +package rawfilelock + +import ( + "golang.org/x/sys/windows" +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +type fileHandle windows.Handle + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= windows.O_CLOEXEC + fd, err := windows.Open(path, mode, windows.S_IWRITE) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { + flags := 0 + if lType != ReadLock { + flags = windows.LOCKFILE_EXCLUSIVE_LOCK + } + if nonblocking { + flags |= windows.LOCKFILE_FAIL_IMMEDIATELY + } + ol := new(windows.Overlapped) + if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { + if nonblocking { + return err + } + panic(err) + } + return nil +} + +func unlockAndCloseHandle(fd fileHandle) { + ol := new(windows.Overlapped) + windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) + closeHandle(fd) +} + +func closeHandle(fd fileHandle) { + windows.Close(windows.Handle(fd)) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/README.md b/tools/vendor/go.podman.io/storage/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive.go new file mode 100644 index 000000000..5f8647af7 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive.go @@ -0,0 +1,1675 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + gzip "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/pools" + "go.podman.io/storage/pkg/promise" + "go.podman.io/storage/pkg/system" + "go.podman.io/storage/pkg/unshare" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData any + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time + } +) + +const PaxSchilyXattr = "SCHILY.xattr." + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" + freebsd = "freebsd" +) + +var xattrsToIgnore = map[string]any{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + + defer func() { + if Err != nil { + // In the normal case, the buffer is embedded in the ReadCloser return. + p.Put(buf) + } + }() + + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse { + return rc, nil + } + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse { + return rc, nil + } + return zstdReader(buf) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + + defer func() { + if Err != nil { + p.Put(buf) + } + }() + + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() any { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := fileInfoHeaderNoLookups(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// readSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) + } + if capability != nil { + hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability) + } + } + return nil +} + +// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func readUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + hdr.PAXRecords[PaxSchilyXattr+key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarWriter struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time +} + +func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter { + return &tarWriter{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + Timestamp: timestamp, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +type addFileData struct { + // The path from which to read contents. + path string + + // os.Stat for the above. + fi os.FileInfo + + // The file header of the above. + hdr *tar.Header + + // if present, an extra whiteout entry to write after the header. + extraWhiteout *tar.Header +} + +// prepareAddFile generates the tar file header(s) for adding a file +// from path as name to the tar archive, without writing to the +// tar stream. Thus, any error may be ignored without corrupting the +// tar file. A (nil, nil) return means that the file should be +// ignored for non-error reasons. +func (ta *tarWriter) prepareAddFile(path, name string) (*addFileData, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Infof("archive: skipping %q since it is a socket", path) + return nil, nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := readSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + if err := readUserXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return nil, err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode := getInodeFromStat(fi.Sys()) + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This must be here for the writer math to add up! + } + } + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return nil, err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return nil, err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway, + // and they unnecessarily give recipients of the tar file potentially private data. + hdr.Uname = "" + hdr.Gname = "" + } + + // if override timestamp set, replace all times with this + if ta.Timestamp != nil { + hdr.ModTime = *ta.Timestamp + hdr.AccessTime = *ta.Timestamp + hdr.ChangeTime = *ta.Timestamp + } + + maybeTruncateHeaderModTime(hdr) + + result := &addFileData{ + path: path, + hdr: hdr, + fi: fi, + } + if ta.WhiteoutConverter != nil { + // The WhiteoutConverter suggests a generic mechanism, + // but this code is only used to convert between + // overlayfs (on-disk) and AUFS (in the tar file) + // whiteouts, and is initiated because the overlayfs + // storage driver returns OverlayWhiteoutFormat from + // Driver.getWhiteoutFormat(). + // + // For AUFS, a directory with all its contents deleted + // should be represented as a directory containing a + // magic whiteout empty regular file, hence the + // extraWhiteout header returned here. + result.extraWhiteout, err = ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return nil, err + } + } + + return result, nil +} + +// addFile performs the write. An error here corrupts the tar file. +func (ta *tarWriter) addFile(headers *addFileData) error { + hdr := headers.hdr + if headers.extraWhiteout != nil { + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // If we write hdr with hdr.Size > 0, we have + // to write the body before we can write the + // extraWhiteout header. This can only happen + // if the contract for WhiteoutConverter is + // not honored, so bail out. + return fmt.Errorf("tar: cannot use extra whiteout with non-empty file %s", hdr.Name) + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + hdr = headers.extraWhiteout + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(headers.path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + if !headers.fi.IsDir() && hasHardlinks(headers.fi) { + ino := getInodeFromStat(headers.fi.Sys()) + if _, seen := ta.SeenFiles[ino]; !seen { + ta.SeenFiles[ino] = headers.hdr.Name + } + } + + return nil +} + +func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + typeFlag := hdr.Typeflag + mask := hdrInfo.Mode() + + // update also the implementation of ForceMask in pkg/chunked + if forceMask != nil { + mask = *forceMask + // If we have a forceMask, force the real type to either be a directory, + // a link, or a regular file. + if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink { + typeFlag = tar.TypeReg + } + } + + switch typeFlag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := handleLLink(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr) + if !ok { + continue + } + if _, found := xattrsToIgnore[xattrKey]; found { + continue + } + if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { + if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. + errs = append(errs, err.Error()) + continue + } + return err + } + } + + if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode(), + Major: int(hdr.Devmajor), + Minor: int(hdr.Devminor), + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { + return err + } + } + + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. This is a convenience wrapper for [TarWithOptions]. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +func tarWithOptionsTo(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + defer func() { + if err := dest.Close(); err != nil && result == nil { + result = err + } + }() + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return err + } + + compressWriter, err := CompressStream(dest, options.Compression) + if err != nil { + return err + } + + ta := newTarWriter( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + options.Timestamp, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + includeFiles := options.IncludeFiles + defer func() { + if err := compressWriter.Close(); err != nil && result == nil { + result = err + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return err + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(includeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + includeFiles = []string{base} + } + + if len(includeFiles) == 0 { + includeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range includeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil //nolint: nilerr + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + return fmt.Errorf("matching %s: %w", relFilePath, err) + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + headers, err := ta.prepareAddFile(filePath, relFilePath) + if err != nil { + logrus.Errorf("Can't add file %s to tar: %s; skipping", filePath, err) + } else if headers != nil { + if err := ta.addFile(headers); err != nil { + return err + } + } + return nil + }); err != nil { + return err + } + } + return ta.TarWriter.Close() +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +// +// If used on a file system being modified concurrently, +// TarWithOptions will create a valid tar archive, but may leave out +// some files. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + go func() { + err := tarWithOptionsTo(pipeWriter, srcPath, options) + if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil { + logrus.Errorf("Can't close pipe writer: %s", pipeErr) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + } + var rootHdr *tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if rel == "." { + rootHdr = hdr + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if options.ForceMask != nil { + value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)} + if rootHdr != nil { + value.IDs.UID = rootHdr.Uid + value.IDs.GID = rootHdr.Gid + value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode) + } + if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { + return err + } + } + + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: unshare.IsRootless(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(xstat, ":") + if len(attrs) >= 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewReader(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) + } + hashWorker.Wait() + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_110.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_110.go new file mode 100644 index 000000000..db614cdd6 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +//go:build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_19.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_19.go new file mode 100644 index 000000000..304464fe7 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +//go:build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_bsd.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_bsd.go new file mode 100644 index 000000000..74e62331a --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,18 @@ +//go:build netbsd || freebsd || darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_linux.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_linux.go new file mode 100644 index 000000000..fd7123bab --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_linux.go @@ -0,0 +1,208 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.PAXRecords != nil { + delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil //nolint: nilnil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok { + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct{} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 0o7777, nil + } + return 0, 0, uint32(f.Mode()), nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_other.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_other.go new file mode 100644 index 000000000..b342ff75e --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_other.go @@ -0,0 +1,11 @@ +//go:build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_unix.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_unix.go new file mode 100644 index 000000000..2d9d68de2 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_unix.go @@ -0,0 +1,136 @@ +//go:build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat any) (inode uint64) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat any) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 0o7777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) +} + +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_windows.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_windows.go new file mode 100644 index 000000000..1183f4a28 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_windows.go @@ -0,0 +1,83 @@ +//go:build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0o111 + permPart &= 0o755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/archive_zstd.go b/tools/vendor/go.podman.io/storage/pkg/archive/archive_zstd.go new file mode 100644 index 000000000..36b7118aa --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/changes.go b/tools/vendor/go.podman.io/storage/pkg/archive/changes.go new file mode 100644 index 000000000..051ab6952 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/changes.go @@ -0,0 +1,505 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "maps" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/pools" + "go.podman.io/storage/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// changesByPath implements sort.Interface. +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok { + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + err := fileutils.Exists(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string + target string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + maps.Copy(oldChildren, oldInfo.children) + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + oldChild.target != newChild.target || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + target: "", + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var oldRoot, newRoot *FileInfo + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + headers, err := ta.prepareAddFile(path, change.Path[1:]) + if err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } else if headers != nil { + if err := ta.addFile(headers); err != nil { + writer.CloseWithError(err) + return + } + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/changes_linux.go b/tools/vendor/go.podman.io/storage/pkg/archive/changes_linux.go new file mode 100644 index 000000000..343f3e686 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/changes_linux.go @@ -0,0 +1,404 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings //nolint:unused + idmap2 *idtools.IDMappings //nolint:unused +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + target: "", + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + if fi.Mode()&os.ModeSymlink != 0 { + info.target, err = os.Readlink(cpath) + if err != nil { + return err + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for ix1 < len(names1) && ix2 < len(names2) { + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := range len(dirent.Name) { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + r, err := overlayDeletedFile(layers, root, path, fi) + if err != nil { + return "", fmt.Errorf("overlay deleted file query: %w", err) + } + return r, nil + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err) + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/changes_other.go b/tools/vendor/go.podman.io/storage/pkg/archive/changes_other.go new file mode 100644 index 000000000..5fde4c738 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/changes_other.go @@ -0,0 +1,112 @@ +//go:build !linux + +package archive + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for range 2 { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + + // Don't cross mount points. This ignores file mounts to avoid + // generating a diff which deletes all files following the + // mount. + if s.Dev() != sourceStat.Dev() && s.IsDir() { + return filepath.SkipDir + } + + info.stat = s + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/changes_unix.go b/tools/vendor/go.podman.io/storage/pkg/archive/changes_unix.go new file mode 100644 index 000000000..f07a135a4 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/changes_unix.go @@ -0,0 +1,51 @@ +//go:build !windows + +package archive + +import ( + "os" + "syscall" + + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || + // Don't look at size for dirs, its not a good measure of change + ((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/changes_windows.go b/tools/vendor/go.podman.io/storage/pkg/archive/changes_windows.go new file mode 100644 index 000000000..947ec2d22 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/changes_windows.go @@ -0,0 +1,29 @@ +package archive + +import ( + "os" + + "go.podman.io/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/copy.go b/tools/vendor/go.podman.io/storage/pkg/archive/copy.go new file mode 100644 index 000000000..308f132d5 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/copy.go @@ -0,0 +1,459 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/fileutils" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if err = fileutils.Lexists(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/copy_unix.go b/tools/vendor/go.podman.io/storage/pkg/archive/copy_unix.go new file mode 100644 index 000000000..f57928244 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +//go:build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/copy_windows.go b/tools/vendor/go.podman.io/storage/pkg/archive/copy_windows.go new file mode 100644 index 000000000..2b775b45c --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/diff.go b/tools/vendor/go.podman.io/storage/pkg/archive/diff.go new file mode 100644 index 000000000..355d65f21 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/diff.go @@ -0,0 +1,274 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/pools" + "go.podman.io/storage/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0o755) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + err := fileutils.Lexists(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in extractTarFileEntry otherwise. + if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go b/tools/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 000000000..829c95ef1 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,166 @@ +//go:build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "go.podman.io/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if clean, ok := strings.CutPrefix(fflag, "no"); ok { + isClear = true + fflag = clean + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + res := []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/fflags_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 000000000..5ad480f7c --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,20 @@ +//go:build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/filter.go b/tools/vendor/go.podman.io/storage/pkg/archive/filter.go new file mode 100644 index 000000000..9902a1ef5 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/filter.go @@ -0,0 +1,73 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "strings" + "sync" +) + +var filterPath sync.Map + +func getFilterPath(name string) string { + path, ok := filterPath.Load(name) + if ok { + return path.(string) + } + + path, err := exec.LookPath(name) + if err != nil { + path = "" + } + + filterPath.Store(name, path) + return path.(string) +} + +type errorRecordingReader struct { + r io.Reader + err error +} + +func (r *errorRecordingReader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if r.err == nil && err != io.EOF { + r.err = err + } + return n, err +} + +// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout. +// cleanup() is a caller provided function that will be called when the command finishes running, regardless of +// whether it succeeds or fails. +// If the command is not found, it returns (nil, false) and the cleanup function is not called. +func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) { + path := getFilterPath(args[0]) + if path == "" { + return nil, false + } + + var stderrBuf bytes.Buffer + + inputWithError := &errorRecordingReader{r: input} + + r, w := io.Pipe() + cmd := exec.Command(path, args[1:]...) + cmd.Stdin = inputWithError + cmd.Stdout = w + cmd.Stderr = &stderrBuf + go func() { + err := cmd.Run() + // if there is an error reading from input, prefer to return that error + if inputWithError.err != nil { + err = inputWithError.err + } else if err != nil && stderrBuf.Len() > 0 { + err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err) + } + w.CloseWithError(err) // CloseWithErr(nil) == Close() + cleanup() + }() + return r, true +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/time_linux.go b/tools/vendor/go.podman.io/storage/pkg/archive/time_linux.go new file mode 100644 index 000000000..3448569b1 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/time_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..3555d753a --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/whiteouts.go b/tools/vendor/go.podman.io/storage/pkg/archive/whiteouts.go new file mode 100644 index 000000000..d20478a10 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/tools/vendor/go.podman.io/storage/pkg/archive/wrap.go b/tools/vendor/go.podman.io/storage/pkg/archive/wrap.go new file mode 100644 index 000000000..903befd76 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go b/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 000000000..23bcbda51 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,524 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "bytes" + "io" + + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" + "go.podman.io/storage/pkg/chunked/internal/minimal" + "go.podman.io/storage/pkg/ioutils" +) + +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) + +type holesFinder struct { + reader *bufio.Reader + zeros int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// readByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) readByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := min(rc.pendingHole, int64(len(b))) + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := range b { + holeLen, n, err := rc.reader.readByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd minimal.ZstdWriter + packer storage.Packer +} + +func newTarSplitData(createZstdWriter minimal.CreateZstdWriterFunc) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := createZstdWriter(io.MultiWriter(compressed, digester.Hash())) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, createZstdWriter minimal.CreateZstdWriterFunc) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tarSplitData, err := newTarSplitData(createZstdWriter) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := createZstdWriter(dest) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []minimal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := minimal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = minimal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + mainEntry, err := minimal.NewFileMetadata(hdr) + if err != nil { + return err + } + mainEntry.Digest = checksum + mainEntry.Offset = startOffset + mainEntry.EndOffset = lastOffset + entries := []minimal.FileMetadata{mainEntry} + for i := 1; i < len(chunks); i++ { + entries = append(entries, minimal.FileMetadata{ + Type: minimal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() + return err + } + + // make sure the entire tarball is flushed to the output as it might contain + // some trailing zeros that affect the checksum. + if _, err := io.Copy(zstdWriter, its); err != nil { + zstdWriter.Close() + return err + } + + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := minimal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, createZstdWriter) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + errClose := w.tarSplitOut.Close() + + if err := <-w.tarSplitErr; err != nil && err != io.EOF { + return err + } + return errClose +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// makeZstdChunkedWriter writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func makeZstdChunkedWriter(out io.Writer, metadata map[string]string, createZstdWriter minimal.CreateZstdWriterFunc) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, createZstdWriter) + _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + createZstdWriter := func(dest io.Writer) (minimal.ZstdWriter, error) { + return minimal.ZstdWriterWithLevel(dest, *level) + } + + return makeZstdChunkedWriter(r, metadata, createZstdWriter) +} + +type noCompression struct { + dest io.Writer +} + +func (n *noCompression) Write(p []byte) (int, error) { + return n.dest.Write(p) +} + +func (n *noCompression) Close() error { + return nil +} + +func (n *noCompression) Flush() error { + return nil +} + +func (n *noCompression) Reset(dest io.Writer) { + n.dest = dest +} + +// NoCompression writes directly to the output file without any compression +// +// Such an output does not follow the zstd:chunked spec and cannot be generally consumed; this function +// only exists for internal purposes and should not be called from outside c/storage. +func NoCompression(r io.Writer, metadata map[string]string) (io.WriteCloser, error) { + createZstdWriter := func(dest io.Writer) (minimal.ZstdWriter, error) { + return &noCompression{dest: dest}, nil + } + return makeZstdChunkedWriter(r, metadata, createZstdWriter) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/rollsum.go b/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 000000000..59df6901e --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,85 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) + +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go b/tools/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go new file mode 100644 index 000000000..256365759 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go @@ -0,0 +1,333 @@ +package minimal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" + "go.podman.io/storage/pkg/archive" +) + +// ZstdWriter is an interface that wraps standard io.WriteCloser and Reset() to reuse the compressor with a new writer. +type ZstdWriter interface { + io.WriteCloser + Reset(dest io.Writer) +} + +// CreateZstdWriterFunc is a function that creates a ZstdWriter for the provided destination writer. +type CreateZstdWriterFunc func(dest io.Writer) (ZstdWriter, error) + +// TOC is short for Table of Contents and is used by the zstd:chunked +// file format to effectively add an overall index into the contents +// of a tarball; it also includes file metadata. +type TOC struct { + // Version is currently expected to be 1 + Version int `json:"version"` + // Entries is the list of file metadata in this TOC. + // The ordering in this array currently defaults to being the same + // as that of the tar stream; however, this should not be relied on. + Entries []FileMetadata `json:"entries"` + // TarSplitDigest is the checksum of the "tar-split" data which + // is included as a distinct skippable zstd frame before the TOC. + TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` +} + +// FileMetadata is an entry in the TOC that includes both generic file metadata +// that duplicates what can found in the tar header (and should match), but +// also special/custom content (see below). +// +// Regular files may optionally be represented as a sequence of “chunks”, +// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries +// are heuristically determined to increase chance of chunk matching / reuse +// similar to rsync). In that case, the regular file is represented +// as an initial TypeReg entry (with all metadata for the file as a whole) +// immediately followed by zero or more TypeChunk entries (containing only Type, +// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk* +// fields are relevant in all of these entries, including the initial +// TypeReg one. +// +// Note that the metadata here, when fetched by a zstd:chunked aware client, +// is used instead of that in the tar stream. The contents of the tar stream +// are not used in this scenario. +type FileMetadata struct { + // If you add any fields, update ensureFileMetadataMatches as well! + + // The metadata below largely duplicates that in the tar headers. + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + // Digest is a hexadecimal sha256 checksum of the file contents; it + // is empty for empty files + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + // The following types correspond to regular types of entries that can + // appear in a tar archive. + TypeReg = "reg" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" + // TypeChunk is special; in zstd:chunked not only are files individually + // compressed and indexable, there is a "rolling checksum" used to compute + // "chunks" of individual file contents, that are also added to the TOC + TypeChunk = "chunk" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + // ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest. + ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" + // ManifestInfoKey is an annotation that signals the start of the TOC (manifest) + // contents which are embedded as a skippable zstd frame. It has a format of + // four decimal integers separated by `:` as follows: + // ::: + // The is ManifestTypeCRFS which should have the value `1`. + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + // TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata + // contents which are embedded as a skippable zstd frame. It has a format of + // three decimal integers separated by `:` as follows: + // :: + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + + // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead. + // The value is retained here as a constant as a historical reference for older zstd:chunked images. + // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 64 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + size := make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, createZstdWriter CreateZstdWriterFunc) error { + // 8 is the size of the zstd skippable frame header + the frame size + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader + + toc := TOC{ + Version: 1, + Entries: metadata, + TarSplitDigest: tarSplitData.Digest, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := createZstdWriter(&compressedBuffer) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + + footer := ZstdChunkedFooterData{ + ManifestType: uint64(ManifestTypeCRFS), + Offset: manifestOffset, + LengthCompressed: uint64(len(compressedManifest)), + LengthUncompressed: uint64(len(manifest)), + OffsetTarSplit: tarSplitOffset, + LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), + LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), + } + + manifestDataLE := footerDataToBlob(footer) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (ZstdWriter, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +// This footer exists to make the blobs self-describing, our implementation +// never reads it: +// Partial pull security hinges on the TOC digest, and that exists as a layer annotation; +// so we are relying on the layer annotations anyway, and doing so means we can avoid +// a round-trip to fetch this binary footer. +type ZstdChunkedFooterData struct { + ManifestType uint64 + + Offset uint64 + LengthCompressed uint64 + LengthUncompressed uint64 + + OffsetTarSplit uint64 + LengthCompressedTarSplit uint64 + LengthUncompressedTarSplit uint64 + ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose. +} + +func footerDataToBlob(footer ZstdChunkedFooterData) []byte { + // Store the offset to the manifest and its size in LE order + manifestDataLE := make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType) + binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit) + copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic) + + return manifestDataLE +} + +// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil. +func timeIfNotZero(t *time.Time) *time.Time { + if t == nil || t.IsZero() { + return nil + } + return t +} + +// NewFileMetadata creates a basic FileMetadata entry for hdr. +// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately. +func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) { + typ, err := GetType(hdr.Typeflag) + if err != nil { + return FileMetadata{}, err + } + xattrs := make(map[string]string) + for k, v := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) + if !ok { + continue + } + xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) + } + return FileMetadata{ + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: timeIfNotZero(&hdr.ModTime), + AccessTime: timeIfNotZero(&hdr.AccessTime), + ChangeTime: timeIfNotZero(&hdr.ChangeTime), + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + }, nil +} diff --git a/tools/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go b/tools/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go new file mode 100644 index 000000000..7059b5406 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go @@ -0,0 +1,41 @@ +package toc + +import ( + "errors" + + digest "github.com/opencontainers/go-digest" + "go.podman.io/storage/pkg/chunked/internal/minimal" +) + +// tocJSONDigestAnnotation is the annotation key for the digest of the estargz +// TOC JSON. +// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation +// Duplicate it here to avoid a dependency on the package. +const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This function retrieves a digest that represents the content of a +// table of contents (TOC) from the image's annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[minimal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) + if err != nil { + return nil, err + } + return &d, nil + case ok2: + d, err := digest.Parse(d2) + if err != nil { + return nil, err + } + return &d, nil + default: + return nil, nil + } +} diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go similarity index 89% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go index 785b13317..04cfafcd5 100644 --- a/tools/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ b/tools/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go @@ -13,7 +13,7 @@ import ( func Exists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } @@ -25,7 +25,7 @@ func Exists(path string) error { func Lexists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go rename to tools/vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go diff --git a/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go new file mode 100644 index 000000000..9f5c6c90b --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go @@ -0,0 +1,20 @@ +package fileutils + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) + if err == nil { + return nil + } + + _, err = io.Copy(dst, src) + return err +} diff --git a/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go new file mode 100644 index 000000000..c0a30e670 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package fileutils + +import ( + "io" + "os" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + _, err := io.Copy(dst, src) + return err +} diff --git a/tools/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/tools/vendor/go.podman.io/storage/pkg/homedir/homedir.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/homedir/homedir.go rename to tools/vendor/go.podman.io/storage/pkg/homedir/homedir.go diff --git a/tools/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/tools/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go similarity index 99% rename from tools/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go rename to tools/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go index f351b48bb..b088c2a85 100644 --- a/tools/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/tools/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go @@ -14,8 +14,8 @@ import ( "sync" "syscall" - "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/unshare" ) // Key returns the env var name for the user's home dir based on diff --git a/tools/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/tools/vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go rename to tools/vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools.go similarity index 96% rename from tools/vendor/github.com/containers/storage/pkg/idtools/idtools.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/idtools.go index 299bdbef7..6fcba9b33 100644 --- a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools.go @@ -14,8 +14,8 @@ import ( "sync" "syscall" - "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/system" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -429,25 +429,25 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { var stat Stat attrs := strings.Split(string(xstat), ":") if len(attrs) < 3 { - return stat, fmt.Errorf("The number of parts in %s is less than 3", + return stat, fmt.Errorf("the number of parts in %s is less than 3", ContainersOverrideXattr) } value, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse UID: %w", err) + return stat, fmt.Errorf("failed to parse UID: %w", err) } stat.IDs.UID = int(value) value, err = strconv.ParseUint(attrs[1], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse GID: %w", err) + return stat, fmt.Errorf("failed to parse GID: %w", err) } stat.IDs.GID = int(value) value, err = strconv.ParseUint(attrs[2], 8, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse mode: %w", err) + return stat, fmt.Errorf("failed to parse mode: %w", err) } stat.Mode = os.FileMode(value) & os.ModePerm if value&0o1000 != 0 { @@ -484,7 +484,7 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { return stat, err } } else { - return stat, fmt.Errorf("Invalid file type %s", typ) + return stat, fmt.Errorf("invalid file type %s", typ) } } return stat, nil @@ -494,18 +494,18 @@ func parseDevice(typ string) (int, int, error) { parts := strings.Split(typ, "-") // If there are more than 3 parts, just ignore them to be forward compatible if len(parts) < 3 { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } if parts[0] != "block" && parts[0] != "char" { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } major, err := strconv.Atoi(parts[1]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse major number: %w", err) + return 0, 0, fmt.Errorf("failed to parse major number: %w", err) } minor, err := strconv.Atoi(parts[2]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err) + return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) } return major, minor, nil } diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go similarity index 91% rename from tools/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go index 2bd26d0e3..9a17f5701 100644 --- a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go @@ -5,6 +5,7 @@ package idtools import ( "errors" "os/user" + "sync" "unsafe" ) @@ -13,16 +14,14 @@ import ( #include #include #include -const char *Prog = "storage"; -FILE *shadow_logfd = NULL; struct subid_range get_range(struct subid_range *ranges, int i) { - shadow_logfd = stderr; - return ranges[i]; + return ranges[i]; } #if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_init libsubid_init # define subid_get_uid_ranges get_subuid_ranges # define subid_get_gid_ranges get_subgid_ranges #endif @@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i) */ import "C" +var onceInit sync.Once + func readSubid(username string, isUser bool) (ranges, error) { var ret ranges uidstr := "" @@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) { uidstr = u.Uid } + onceInit.Do(func() { + C.subid_init(C.CString("storage"), C.stderr) + }) + cUsername := C.CString(username) defer C.free(unsafe.Pointer(cUsername)) diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go similarity index 98% rename from tools/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go index 1da7dadbf..817b59aed 100644 --- a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go @@ -12,9 +12,9 @@ import ( "sync" "syscall" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/system" "github.com/moby/sys/user" + "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/system" ) var ( diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/tools/vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/parser.go b/tools/vendor/go.podman.io/storage/pkg/idtools/parser.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/idtools/parser.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/parser.go diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/tools/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go similarity index 99% rename from tools/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go index ac27718de..d2ff4466c 100644 --- a/tools/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/tools/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/containers/storage/pkg/regexp" + "go.podman.io/storage/pkg/regexp" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/tools/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go rename to tools/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/buffer.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/buffer.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/buffer.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go similarity index 95% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go index 72a04f349..cf6058035 100644 --- a/tools/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ b/tools/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go @@ -93,10 +93,7 @@ loop0: } // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } + nextCap := min(b.Cap()*2, maxCap) bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() @@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer { bufPoolsLock.Lock() pool, ok := bufPools[size] if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} bufPools[size] = pool } bufPoolsLock.Unlock() diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/readers.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/readers.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/readers.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go similarity index 87% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go index 79837fb33..b60ad8f49 100644 --- a/tools/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ b/tools/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go @@ -5,7 +5,7 @@ package ioutils import ( "os" - "github.com/containers/storage/pkg/longpath" + "go.podman.io/storage/pkg/longpath" ) // TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go diff --git a/tools/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/tools/vendor/go.podman.io/storage/pkg/ioutils/writers.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/ioutils/writers.go rename to tools/vendor/go.podman.io/storage/pkg/ioutils/writers.go diff --git a/tools/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go b/tools/vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go rename to tools/vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go diff --git a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go similarity index 92% rename from tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go rename to tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go index 52f6c0a62..52b4fe597 100644 --- a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go @@ -6,6 +6,8 @@ import ( "path/filepath" "sync" "time" + + "go.podman.io/storage/internal/rawfilelock" ) // A Locker represents a file lock where the file is used to cache an @@ -55,13 +57,6 @@ type Locker interface { AssertLockedForWriting() } -type lockType byte - -const ( - readLock lockType = iota - writeLock -) - // LockFile represents a file lock where the file is used to cache an // identifier of the last party that made changes to whatever's being protected // by the lock. @@ -79,12 +74,12 @@ type LockFile struct { stateMutex *sync.Mutex counter int64 lw LastWrite // A global value valid as of the last .Touch() or .Modified() - lockType lockType + lockType rawfilelock.LockType locked bool // The following fields are only modified on transitions between counter == 0 / counter != 0. // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. // In other cases, they need to be protected using stateMutex. - fd fileHandle + fd rawfilelock.FileHandle } var ( @@ -129,12 +124,12 @@ func (l *LockFile) Lock() { if l.ro { panic("can't take write lock on read-only lock file") } - l.lock(writeLock) + l.lock(rawfilelock.WriteLock) } // RLock locks the lockfile as a reader. func (l *LockFile) RLock() { - l.lock(readLock) + l.lock(rawfilelock.ReadLock) } // TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one. @@ -142,12 +137,12 @@ func (l *LockFile) TryLock() error { if l.ro { panic("can't take write lock on read-only lock file") } - return l.tryLock(writeLock) + return l.tryLock(rawfilelock.WriteLock) } // TryRLock attempts to lock the lockfile as a reader. func (l *LockFile) TryRLock() error { - return l.tryLock(readLock) + return l.tryLock(rawfilelock.ReadLock) } // Unlock unlocks the lockfile. @@ -172,9 +167,9 @@ func (l *LockFile) Unlock() { l.locked = false // Close the file descriptor on the last unlock, releasing the // file lock. - unlockAndCloseHandle(l.fd) + rawfilelock.UnlockAndCloseHandle(l.fd) } - if l.lockType == readLock { + if l.lockType == rawfilelock.ReadLock { l.rwMutex.RUnlock() } else { l.rwMutex.Unlock() @@ -206,7 +201,7 @@ func (l *LockFile) AssertLockedForWriting() { l.AssertLocked() // Like AssertLocked, don’t even bother with l.stateMutex. - if l.lockType == readLock { + if l.lockType == rawfilelock.ReadLock { panic("internal error: lock is not held for writing") } } @@ -273,7 +268,7 @@ func (l *LockFile) Touch() error { return err } l.stateMutex.Lock() - if !l.locked || (l.lockType == readLock) { + if !l.locked || (l.lockType == rawfilelock.ReadLock) { panic("attempted to update last-writer in lockfile without the write lock") } defer l.stateMutex.Unlock() @@ -324,6 +319,24 @@ func getLockfile(path string, ro bool) (*LockFile, error) { return lockFile, nil } +// openLock opens a lock file at the specified path, creating the parent directory if it does not exist. +func openLock(path string, readOnly bool) (rawfilelock.FileHandle, error) { + fd, err := rawfilelock.OpenLock(path, readOnly) + if err == nil { + return fd, nil + } + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return fd, fmt.Errorf("creating lock file directory: %w", err) + } + + return openLock(path, readOnly) + } + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + // createLockFileForPath returns new *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // @@ -343,11 +356,11 @@ func createLockFileForPath(path string, ro bool) (*LockFile, error) { if err != nil { return nil, err } - unlockAndCloseHandle(fd) + rawfilelock.UnlockAndCloseHandle(fd) - lType := writeLock + lType := rawfilelock.WriteLock if ro { - lType = readLock + lType = rawfilelock.ReadLock } return &LockFile{ @@ -362,40 +375,10 @@ func createLockFileForPath(path string, ro bool) (*LockFile, error) { }, nil } -// openLock opens the file at path and returns the corresponding file -// descriptor. The path is opened either read-only or read-write, -// depending on the value of ro argument. -// -// openLock will create the file and its parent directories, -// if necessary. -func openLock(path string, ro bool) (fd fileHandle, err error) { - flags := os.O_CREATE - if ro { - flags |= os.O_RDONLY - } else { - flags |= os.O_RDWR - } - fd, err = openHandle(path, flags) - if err == nil { - return fd, nil - } - - // the directory of the lockfile seems to be removed, try to create it - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return fd, fmt.Errorf("creating lock file directory: %w", err) - } - - return openLock(path, ro) - } - - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - // lock locks the lockfile via syscall based on the specified type and // command. -func (l *LockFile) lock(lType lockType) { - if lType == readLock { +func (l *LockFile) lock(lType rawfilelock.LockType) { + if lType == rawfilelock.ReadLock { l.rwMutex.RLock() } else { l.rwMutex.Lock() @@ -413,7 +396,7 @@ func (l *LockFile) lock(lType lockType) { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - if err := lockHandle(l.fd, lType, false); err != nil { + if err := rawfilelock.LockFile(l.fd, lType); err != nil { panic(err) } } @@ -424,10 +407,10 @@ func (l *LockFile) lock(lType lockType) { // lock locks the lockfile via syscall based on the specified type and // command. -func (l *LockFile) tryLock(lType lockType) error { +func (l *LockFile) tryLock(lType rawfilelock.LockType) error { var success bool var rwMutexUnlocker func() - if lType == readLock { + if lType == rawfilelock.ReadLock { success = l.rwMutex.TryRLock() rwMutexUnlocker = l.rwMutex.RUnlock } else { @@ -451,8 +434,8 @@ func (l *LockFile) tryLock(lType lockType) error { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - if err = lockHandle(l.fd, lType, true); err != nil { - closeHandle(fd) + if err = rawfilelock.TryLockFile(l.fd, lType); err != nil { + rawfilelock.CloseHandle(fd) rwMutexUnlocker() return err } diff --git a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go similarity index 70% rename from tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go rename to tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go index 885f2f88a..780ad8aef 100644 --- a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go @@ -5,12 +5,10 @@ package lockfile import ( "time" - "github.com/containers/storage/pkg/system" + "go.podman.io/storage/pkg/system" "golang.org/x/sys/unix" ) -type fileHandle uintptr - // GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer // of the data protected by the lock. @@ -66,41 +64,3 @@ func (l *LockFile) TouchedSince(when time.Time) bool { touched := time.Unix(mtim.Unix()) return when.Before(touched) } - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= unix.O_CLOEXEC - fd, err := unix.Open(path, mode, 0o644) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { - fType := unix.F_RDLCK - if lType != readLock { - fType = unix.F_WRLCK - } - lk := unix.Flock_t{ - Type: int16(fType), - Whence: int16(unix.SEEK_SET), - Start: 0, - Len: 0, - } - cmd := unix.F_SETLKW - if nonblocking { - cmd = unix.F_SETLK - } - for { - err := unix.FcntlFlock(uintptr(fd), cmd, &lk) - if err == nil || nonblocking { - return err - } - time.Sleep(10 * time.Millisecond) - } -} - -func unlockAndCloseHandle(fd fileHandle) { - unix.Close(int(fd)) -} - -func closeHandle(fd fileHandle) { - unix.Close(int(fd)) -} diff --git a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go similarity index 71% rename from tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go rename to tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go index 0cc1c50cc..e66f7bfbb 100644 --- a/tools/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/tools/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go @@ -14,8 +14,6 @@ const ( allBytes = ^uint32(0) ) -type fileHandle windows.Handle - // GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer // of the data protected by the lock. @@ -73,37 +71,3 @@ func (l *LockFile) TouchedSince(when time.Time) bool { } return when.Before(stat.ModTime()) } - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= windows.O_CLOEXEC - fd, err := windows.Open(path, mode, windows.S_IWRITE) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { - flags := 0 - if lType != readLock { - flags = windows.LOCKFILE_EXCLUSIVE_LOCK - } - if nonblocking { - flags |= windows.LOCKFILE_FAIL_IMMEDIATELY - } - ol := new(windows.Overlapped) - if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { - if nonblocking { - return err - } - panic(err) - } - return nil -} - -func unlockAndCloseHandle(fd fileHandle) { - ol := new(windows.Overlapped) - windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) - closeHandle(fd) -} - -func closeHandle(fd fileHandle) { - windows.Close(windows.Handle(fd)) -} diff --git a/tools/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/tools/vendor/go.podman.io/storage/pkg/longpath/longpath.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/longpath/longpath.go rename to tools/vendor/go.podman.io/storage/pkg/longpath/longpath.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/flags.go b/tools/vendor/go.podman.io/storage/pkg/mount/flags.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/flags.go rename to tools/vendor/go.podman.io/storage/pkg/mount/flags.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/tools/vendor/go.podman.io/storage/pkg/mount/flags_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/flags_linux.go rename to tools/vendor/go.podman.io/storage/pkg/mount/flags_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mount.go b/tools/vendor/go.podman.io/storage/pkg/mount/mount.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mount.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mount.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/tools/vendor/go.podman.io/storage/pkg/mount/mounter_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mounter_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/tools/vendor/go.podman.io/storage/pkg/mount/mountinfo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mountinfo.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mountinfo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/tools/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go rename to tools/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/tools/vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go rename to tools/vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/tools/vendor/go.podman.io/storage/pkg/mount/unmount_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go rename to tools/vendor/go.podman.io/storage/pkg/mount/unmount_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go diff --git a/tools/vendor/go.podman.io/storage/pkg/pools/pools.go b/tools/vendor/go.podman.io/storage/pkg/pools/pools.go new file mode 100644 index 000000000..78b729c2e --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "go.podman.io/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/tools/vendor/go.podman.io/storage/pkg/promise/promise.go b/tools/vendor/go.podman.io/storage/pkg/promise/promise.go new file mode 100644 index 000000000..dd52b9082 --- /dev/null +++ b/tools/vendor/go.podman.io/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/README.md b/tools/vendor/go.podman.io/storage/pkg/reexec/README.md similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/README.md rename to tools/vendor/go.podman.io/storage/pkg/reexec/README.md diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/tools/vendor/go.podman.io/storage/pkg/reexec/command_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/command_linux.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/command_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/tools/vendor/go.podman.io/storage/pkg/reexec/command_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/command_unix.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/command_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/tools/vendor/go.podman.io/storage/pkg/reexec/command_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/reexec/command_windows.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/command_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/tools/vendor/go.podman.io/storage/pkg/reexec/reexec.go similarity index 97% rename from tools/vendor/github.com/containers/storage/pkg/reexec/reexec.go rename to tools/vendor/go.podman.io/storage/pkg/reexec/reexec.go index 0c032e6c4..a1938cd4f 100644 --- a/tools/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/tools/vendor/go.podman.io/storage/pkg/reexec/reexec.go @@ -49,7 +49,7 @@ func panicIfNotInitialized() { } } -func naiveSelf() string { //nolint: unused +func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { diff --git a/tools/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/tools/vendor/go.podman.io/storage/pkg/regexp/regexp.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/regexp/regexp.go rename to tools/vendor/go.podman.io/storage/pkg/regexp/regexp.go diff --git a/tools/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/tools/vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go rename to tools/vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go diff --git a/tools/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/tools/vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go rename to tools/vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/chmod.go b/tools/vendor/go.podman.io/storage/pkg/system/chmod.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/chmod.go rename to tools/vendor/go.podman.io/storage/pkg/system/chmod.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/chtimes.go b/tools/vendor/go.podman.io/storage/pkg/system/chtimes.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/chtimes.go rename to tools/vendor/go.podman.io/storage/pkg/system/chtimes.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/chtimes_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/chtimes_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/chtimes_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/chtimes_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/errors.go b/tools/vendor/go.podman.io/storage/pkg/system/errors.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/errors.go rename to tools/vendor/go.podman.io/storage/pkg/system/errors.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/exitcode.go b/tools/vendor/go.podman.io/storage/pkg/system/exitcode.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/exitcode.go rename to tools/vendor/go.podman.io/storage/pkg/system/exitcode.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/init.go b/tools/vendor/go.podman.io/storage/pkg/system/init.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/init.go rename to tools/vendor/go.podman.io/storage/pkg/system/init.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/init_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/init_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/init_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/init_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/tools/vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lchown.go b/tools/vendor/go.podman.io/storage/pkg/system/lchown.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lchown.go rename to tools/vendor/go.podman.io/storage/pkg/system/lchown.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/lcow_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lcow_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/lcow_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/lcow_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lcow_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/lcow_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/lstat_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lstat_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/lstat_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/lstat_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/lstat_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/lstat_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/meminfo_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/meminfo_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/mknod.go b/tools/vendor/go.podman.io/storage/pkg/system/mknod.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/mknod.go rename to tools/vendor/go.podman.io/storage/pkg/system/mknod.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/mknod_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/mknod_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/mknod_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/path.go b/tools/vendor/go.podman.io/storage/pkg/system/path.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/path.go rename to tools/vendor/go.podman.io/storage/pkg/system/path.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/path_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/path_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/path_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/path_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/path_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/path_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/path_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/path_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/process_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/process_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/process_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/process_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/rm.go b/tools/vendor/go.podman.io/storage/pkg/system/rm.go similarity index 98% rename from tools/vendor/github.com/containers/storage/pkg/system/rm.go rename to tools/vendor/go.podman.io/storage/pkg/system/rm.go index 12243707a..c151c1449 100644 --- a/tools/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/tools/vendor/go.podman.io/storage/pkg/system/rm.go @@ -7,8 +7,8 @@ import ( "syscall" "time" - "github.com/containers/storage/pkg/mount" "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/mount" ) // EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can diff --git a/tools/vendor/github.com/containers/storage/pkg/system/rm_common.go b/tools/vendor/go.podman.io/storage/pkg/system/rm_common.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/rm_common.go rename to tools/vendor/go.podman.io/storage/pkg/system/rm_common.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/rm_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/rm_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_common.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_common.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_common.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_common.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_darwin.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_darwin.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_darwin.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_linux.go similarity index 84% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_linux.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_linux.go index e3d13463f..0dee88d1b 100644 --- a/tools/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/tools/vendor/go.podman.io/storage/pkg/system/stat_linux.go @@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: uint64(s.Rdev), + rdev: uint64(s.Rdev), //nolint:unconvert mtim: s.Mtim, - dev: uint64(s.Dev), + dev: uint64(s.Dev), //nolint:unconvert }, nil } diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_openbsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_openbsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_solaris.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_solaris.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_solaris.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/stat_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/stat_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/stat_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/tools/vendor/go.podman.io/storage/pkg/system/syscall_unix.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/syscall_unix.go rename to tools/vendor/go.podman.io/storage/pkg/system/syscall_unix.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/syscall_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/syscall_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/syscall_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/umask.go b/tools/vendor/go.podman.io/storage/pkg/system/umask.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/umask.go rename to tools/vendor/go.podman.io/storage/pkg/system/umask.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/tools/vendor/go.podman.io/storage/pkg/system/umask_windows.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/umask_windows.go rename to tools/vendor/go.podman.io/storage/pkg/system/umask_windows.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/tools/vendor/go.podman.io/storage/pkg/system/utimes_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/utimes_linux.go rename to tools/vendor/go.podman.io/storage/pkg/system/utimes_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/tools/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go rename to tools/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/tools/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go rename to tools/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go diff --git a/tools/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/tools/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/tools/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare.c similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare.c rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare.c diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go similarity index 97% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go index 5d0a7a683..a9daf714c 100644 --- a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go +++ b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go @@ -5,8 +5,8 @@ package unshare import ( "os" - "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runtime-spec/specs-go" + "go.podman.io/storage/pkg/idtools" ) const ( diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go similarity index 98% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go index 37a87fa5b..2b81f896b 100644 --- a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go +++ b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go @@ -13,8 +13,8 @@ import ( "strconv" "syscall" - "github.com/containers/storage/pkg/reexec" "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/reexec" ) // Cmd wraps an exec.Cmd created by the reexec package in unshare(), diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go similarity index 97% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go index b45a6819a..7cb069c78 100644 --- a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go @@ -18,11 +18,11 @@ import ( "sync" "syscall" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/reexec" "github.com/moby/sys/capability" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "go.podman.io/storage/pkg/idtools" + "go.podman.io/storage/pkg/reexec" ) // Cmd wraps an exec.Cmd created by the reexec package in unshare(), and @@ -32,9 +32,9 @@ type Cmd struct { *exec.Cmd UnshareFlags int UseNewuidmap bool - UidMappings []specs.LinuxIDMapping // nolint: revive,golint + UidMappings []specs.LinuxIDMapping //nolint: revive UseNewgidmap bool - GidMappings []specs.LinuxIDMapping // nolint: revive,golint + GidMappings []specs.LinuxIDMapping //nolint: revive GidMappingsEnableSetgroups bool Setsid bool Setpgrp bool @@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error return cap.Get(capability.EFFECTIVE, capid), nil } -func (c *Cmd) Start() error { +func (c *Cmd) Start() (retErr error) { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -167,6 +167,15 @@ func (c *Cmd) Start() error { return err } + // If the function fails from here, we need to make sure the + // child process is killed and properly cleaned up. + defer func() { + if retErr != nil { + _ = c.Cmd.Process.Kill() + _ = c.Cmd.Wait() + } + }() + // Close the ends of the pipes that the parent doesn't need. continueRead.Close() continueRead = nil @@ -240,7 +249,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newgidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g @@ -258,7 +267,7 @@ func (c *Cmd) Start() error { } logrus.Warnf("Falling back to single mapping") g.Reset() - g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) } } if !gidmapSet { @@ -300,7 +309,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newuidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u @@ -319,7 +328,7 @@ func (c *Cmd) Start() error { logrus.Warnf("Falling back to single mapping") u.Reset() - u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) } } if !uidmapSet { @@ -459,7 +468,7 @@ type Runnable interface { Run() error } -func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname +func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname if err != nil { if format != "" { logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go similarity index 97% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go index 05706b8fe..3b463627c 100644 --- a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go @@ -5,8 +5,8 @@ package unshare import ( "os" - "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runtime-spec/specs-go" + "go.podman.io/storage/pkg/idtools" ) const ( diff --git a/tools/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/tools/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go similarity index 100% rename from tools/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go rename to tools/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go diff --git a/tools/vendor/go.yaml.in/yaml/v2/.travis.yml b/tools/vendor/go.yaml.in/yaml/v2/.travis.yml new file mode 100644 index 000000000..7348c50c0 --- /dev/null +++ b/tools/vendor/go.yaml.in/yaml/v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/tools/vendor/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to tools/vendor/go.yaml.in/yaml/v2/LICENSE diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml rename to tools/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/tools/vendor/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to tools/vendor/go.yaml.in/yaml/v2/NOTICE diff --git a/tools/vendor/go.yaml.in/yaml/v2/README.md b/tools/vendor/go.yaml.in/yaml/v2/README.md new file mode 100644 index 000000000..c9388da42 --- /dev/null +++ b/tools/vendor/go.yaml.in/yaml/v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *go.yaml.in/yaml/v2*. + +To install it, run: + + go get go.yaml.in/yaml/v2 + +API documentation +----------------- + +See: + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/tools/vendor/go.yaml.in/yaml/v2/apic.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go rename to tools/vendor/go.yaml.in/yaml/v2/apic.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/tools/vendor/go.yaml.in/yaml/v2/decode.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go rename to tools/vendor/go.yaml.in/yaml/v2/decode.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/tools/vendor/go.yaml.in/yaml/v2/emitterc.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go rename to tools/vendor/go.yaml.in/yaml/v2/emitterc.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/tools/vendor/go.yaml.in/yaml/v2/encode.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go rename to tools/vendor/go.yaml.in/yaml/v2/encode.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/tools/vendor/go.yaml.in/yaml/v2/parserc.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go rename to tools/vendor/go.yaml.in/yaml/v2/parserc.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/tools/vendor/go.yaml.in/yaml/v2/readerc.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go rename to tools/vendor/go.yaml.in/yaml/v2/readerc.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/tools/vendor/go.yaml.in/yaml/v2/resolve.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go rename to tools/vendor/go.yaml.in/yaml/v2/resolve.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/tools/vendor/go.yaml.in/yaml/v2/scannerc.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go rename to tools/vendor/go.yaml.in/yaml/v2/scannerc.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/tools/vendor/go.yaml.in/yaml/v2/sorter.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go rename to tools/vendor/go.yaml.in/yaml/v2/sorter.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/tools/vendor/go.yaml.in/yaml/v2/writerc.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go rename to tools/vendor/go.yaml.in/yaml/v2/writerc.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/tools/vendor/go.yaml.in/yaml/v2/yaml.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go rename to tools/vendor/go.yaml.in/yaml/v2/yaml.go index 30813884c..5248e1263 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go +++ b/tools/vendor/go.yaml.in/yaml/v2/yaml.go @@ -2,7 +2,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml +// https://github.com/yaml/go-yaml // package yaml diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/tools/vendor/go.yaml.in/yaml/v2/yamlh.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go rename to tools/vendor/go.yaml.in/yaml/v2/yamlh.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go rename to tools/vendor/go.yaml.in/yaml/v2/yamlprivateh.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE b/tools/vendor/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE rename to tools/vendor/go.yaml.in/yaml/v3/LICENSE diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE b/tools/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE rename to tools/vendor/go.yaml.in/yaml/v3/NOTICE diff --git a/tools/vendor/go.yaml.in/yaml/v3/README.md b/tools/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 000000000..15a85a635 --- /dev/null +++ b/tools/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go b/tools/vendor/go.yaml.in/yaml/v3/apic.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go rename to tools/vendor/go.yaml.in/yaml/v3/apic.go index ae7d049f1..05fd305da 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go +++ b/tools/vendor/go.yaml.in/yaml/v3/apic.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go b/tools/vendor/go.yaml.in/yaml/v3/decode.go similarity index 97% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go rename to tools/vendor/go.yaml.in/yaml/v3/decode.go index 0173b6982..02e2b17bf 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go +++ b/tools/vendor/go.yaml.in/yaml/v3/decode.go @@ -832,10 +832,10 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { if d.unmarshal(n.Content[i], k) { if mergedFields != nil { ki := k.Interface() - if mergedFields[ki] { + if d.getPossiblyUnhashableKey(mergedFields, ki) { continue } - mergedFields[ki] = true + d.setPossiblyUnhashableKey(mergedFields, ki, true) } kkind := k.Kind() if kkind == reflect.Interface { @@ -956,6 +956,24 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { mergedFields := d.mergedFields if mergedFields == nil { @@ -963,7 +981,7 @@ func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { for i := 0; i < len(parent.Content); i += 2 { k := reflect.New(ifaceType).Elem() if d.unmarshal(parent.Content[i], k) { - d.mergedFields[k.Interface()] = true + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) } } } diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go b/tools/vendor/go.yaml.in/yaml/v3/emitterc.go similarity index 98% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go rename to tools/vendor/go.yaml.in/yaml/v3/emitterc.go index 6ea0ae8c1..ab4e03ba7 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go +++ b/tools/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { // Check if we need to accumulate more events before emitting. // // We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true @@ -485,6 +484,18 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go b/tools/vendor/go.yaml.in/yaml/v3/encode.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go rename to tools/vendor/go.yaml.in/yaml/v3/encode.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go b/tools/vendor/go.yaml.in/yaml/v3/parserc.go similarity index 93% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go rename to tools/vendor/go.yaml.in/yaml/v3/parserc.go index 268558a0d..25fe82363 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go +++ b/tools/vendor/go.yaml.in/yaml/v3/parserc.go @@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ +// +// ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// * +// +// * +// // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* +// +// ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) @@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** // +// *********** func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// // block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// // flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// // properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* +// +// ************************* +// // block_content ::= block_collection | flow_collection | SCALAR -// ****** +// +// ****** +// // flow_content ::= flow_collection | SCALAR -// ****** +// +// ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() @@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* // +// ******************** *********** * ********* func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * +// +// *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* // -// BLOCK-END -// ********* +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* // +// BLOCK-END +// ********* func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// +// ((KEY block_node_or_indentless_sequence?)? // +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev return true } -// // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * // +// *** * func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * // +// ***** * func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * // +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// +// - ***** * func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go b/tools/vendor/go.yaml.in/yaml/v3/readerc.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go rename to tools/vendor/go.yaml.in/yaml/v3/readerc.go index b7de0a89c..56af24536 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go +++ b/tools/vendor/go.yaml.in/yaml/v3/readerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go b/tools/vendor/go.yaml.in/yaml/v3/resolve.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go rename to tools/vendor/go.yaml.in/yaml/v3/resolve.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go b/tools/vendor/go.yaml.in/yaml/v3/scannerc.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go rename to tools/vendor/go.yaml.in/yaml/v3/scannerc.go index ca0070108..30b1f0892 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go +++ b/tools/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark @@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Scan the directive name. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ // +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark // Scan the value of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ +// +// %YAML 1.1 # a comment \n +// ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1797,10 +1798,11 @@ const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. @@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark // Scan the value of a TAG-DIRECTIVE token. // // Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte @@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t continue } if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t parser.comments = append(parser.comments, yaml_comment_t{ token_mark: token_mark, start_mark: start_mark, - line: text, + line: text, }) } return true @@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo // the foot is the line below it. var foot_line = -1 if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 + foot_line = parser.mark.line - parser.newlines + 1 if parser.newlines == 0 && parser.mark.column > 1 { foot_line++ } @@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo recent_empty = false // Consume until after the consumed comment line. - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go b/tools/vendor/go.yaml.in/yaml/v3/sorter.go similarity index 100% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go rename to tools/vendor/go.yaml.in/yaml/v3/sorter.go diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go b/tools/vendor/go.yaml.in/yaml/v3/writerc.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go rename to tools/vendor/go.yaml.in/yaml/v3/writerc.go index b8a116bf9..266d0b092 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go +++ b/tools/vendor/go.yaml.in/yaml/v3/writerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go b/tools/vendor/go.yaml.in/yaml/v3/yaml.go similarity index 91% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go rename to tools/vendor/go.yaml.in/yaml/v3/yaml.go index 8cec6da48..0b101cd20 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go +++ b/tools/vendor/go.yaml.in/yaml/v3/yaml.go @@ -17,8 +17,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml -// +// https://github.com/yaml/go-yaml package yaml import ( @@ -75,16 +74,15 @@ type Marshaler interface { // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. -// func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } @@ -185,36 +183,35 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // The field tag format accepted is: // -// `(...) yaml:"[][,[,]]" (...)` +// `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. // -// flow Marshal using a flow style (useful for structs, -// sequences and maps). +// flow Marshal using a flow style (useful for structs, +// sequences and maps). // -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() @@ -278,6 +275,16 @@ func (e *Encoder) SetIndent(spaces int) { e.encoder.indent = spaces } +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { @@ -358,22 +365,21 @@ const ( // // For example: // -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) // -// var person Node -// err := yaml.Unmarshal(data, &person) +// Or by itself: // +// var person Node +// err := yaml.Unmarshal(data, &person) type Node struct { // Kind defines whether the node is a document, a mapping, a sequence, // a scalar value, or an alias to another node. The specific data type of // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind + Kind Kind // Style allows customizing the apperance of the node in the tree. Style Style @@ -421,7 +427,6 @@ func (n *Node) IsZero() bool { n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 } - // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go b/tools/vendor/go.yaml.in/yaml/v3/yamlh.go similarity index 99% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go rename to tools/vendor/go.yaml.in/yaml/v3/yamlh.go index 40c74de49..f59aa40f6 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go +++ b/tools/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -438,7 +438,9 @@ type yaml_document_t struct { // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). +// +// yaml_parser_set_input(). +// // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. @@ -639,7 +641,6 @@ type yaml_parser_t struct { } type yaml_comment_t struct { - scan_mark yaml_mark_t // Position where scanning for comments started token_mark yaml_mark_t // Position after which tokens will be associated with this comment start_mark yaml_mark_t // Position of '#' comment mark @@ -659,13 +660,14 @@ type yaml_comment_t struct { // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). +// +// yaml_emitter_set_output(). +// // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. -// type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int diff --git a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go b/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go similarity index 97% rename from tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go rename to tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go index e88f9c54a..dea1ba961 100644 --- a/tools/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go +++ b/tools/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool { func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( - // is_break: - b[i] == '\r' || // CR (#xD) + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool { func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( - // is_space: - b[i] == ' ' || + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool { func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/tools/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/tools/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index dc9311870..3e7f8df87 100644 --- a/tools/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/tools/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -50,7 +50,7 @@ func (ih InvalidHashPrefixError) Error() string { type InvalidCostError int func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed inclusive range %d..%d", int(ic), MinCost, MaxCost) } const ( diff --git a/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..d25979d9f --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..90ef6a241 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go b/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..cf254f5f1 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/tools/vendor/golang.org/x/crypto/cryptobyte/string.go b/tools/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..4b0f8097f --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/tools/vendor/golang.org/x/crypto/internal/alias/alias.go b/tools/vendor/golang.org/x/crypto/internal/alias/alias.go new file mode 100644 index 000000000..551ff0c35 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/tools/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/tools/vendor/golang.org/x/crypto/internal/alias/alias_purego.go new file mode 100644 index 000000000..6fe61b5c6 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/tools/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go new file mode 100644 index 000000000..8d99551fe --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/tools/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go new file mode 100644 index 000000000..4aaea810a --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s new file mode 100644 index 000000000..133757384 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -0,0 +1,93 @@ +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 + JB bytes_between_0_and_15 + +loop: + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI + +multiply: + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $0x00000001, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $0x08, BX, CX + SHLQ $0x08, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 + JMP multiply + +done: + MOVQ R8, (DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go new file mode 100644 index 000000000..315b84ac3 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go @@ -0,0 +1,47 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le) + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go new file mode 100644 index 000000000..ec2202bd7 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -0,0 +1,312 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import ( + "encoding/binary" + "math/bits" +) + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s new file mode 100644 index 000000000..bc8361da4 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVV state+0(FP), R4 + MOVV msg_base+8(FP), R5 + MOVV msg_len+16(FP), R6 + + MOVV $0x10, R7 + + MOVV (R4), R8 // h0 + MOVV 8(R4), R9 // h1 + MOVV 16(R4), R10 // h2 + MOVV 24(R4), R11 // r0 + MOVV 32(R4), R12 // r1 + + BLT R6, R7, bytes_between_0_and_15 + +loop: + MOVV (R5), R14 // msg[0:8] + MOVV 8(R5), R16 // msg[8:16] + ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow) + ADDV R16, R9, R27 + SGTU R14, R8, R24 // h0.carry + SGTU R9, R27, R28 + ADDV R27, R24, R9 // h1 + SGTU R27, R9, R24 + OR R24, R28, R24 // h1.carry + ADDV $0x01, R24, R24 + ADDV R10, R24, R10 // h2 + + ADDV $16, R5, R5 // msg = msg[16:] + +multiply: + MULV R8, R11, R14 // h0r0.lo + MULHVU R8, R11, R15 // h0r0.hi + MULV R9, R11, R13 // h1r0.lo + MULHVU R9, R11, R16 // h1r0.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MULV R10, R11, R25 + ADDV R16, R25, R25 + MULV R8, R12, R13 // h0r1.lo + MULHVU R8, R12, R16 // h0r1.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MOVV R16, R8 + MULV R10, R12, R26 // h2r1 + MULV R9, R12, R13 // h1r1.lo + MULHVU R9, R12, R16 // h1r1.hi + ADDV R13, R25, R25 + ADDV R16, R26, R27 + SGTU R13, R25, R24 + ADDV R27, R24, R26 + ADDV R8, R25, R25 + SGTU R8, R25, R24 + ADDV R24, R26, R26 + AND $3, R25, R10 + AND $-4, R25, R17 + ADDV R17, R14, R8 + ADDV R26, R15, R27 + SGTU R17, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + SLLV $62, R26, R27 + SRLV $2, R25, R28 + SRLV $2, R26, R26 + OR R27, R28, R25 + ADDV R25, R8, R8 + ADDV R26, R9, R27 + SGTU R25, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + + SUBV $16, R6, R6 + BGE R6, R7, loop + +bytes_between_0_and_15: + BEQ R6, R0, done + MOVV $1, R14 + XOR R15, R15 + ADDV R6, R5, R5 + +flush_buffer: + MOVBU -1(R5), R25 + SRLV $56, R14, R24 + SLLV $8, R15, R28 + SLLV $8, R14, R14 + OR R24, R28, R15 + XOR R25, R14, R14 + SUBV $1, R6, R6 + SUBV $1, R5, R5 + BNE R6, R0, flush_buffer + + ADDV R14, R8, R8 + SGTU R14, R8, R24 + ADDV R15, R9, R27 + SGTU R15, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R10, R24, R10 + + MOVV $16, R6 + JMP multiply + +done: + MOVV R8, (R4) + MOVV R9, 8(R4) + MOVV R10, 16(R4) + RET diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s new file mode 100644 index 000000000..6899a1dab --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s @@ -0,0 +1,187 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego && (ppc64 || ppc64le) + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#ifdef GOARCH_ppc64le +#define LE_MOVD MOVD +#define LE_MOVWZ MOVWZ +#define LE_MOVHZ MOVHZ +#else +#define LE_MOVD MOVDBR +#define LE_MOVWZ MOVWBR +#define LE_MOVHZ MOVHBR +#endif + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + LE_MOVD (msg)( R0), t0; \ + LE_MOVD (msg)(R24), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADDE t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + ADDZE t3; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ + ADDC t0, h0, h0; \ + ADDE t3, t1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + MOVD $8, R24 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + + PCALIGN $16 +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP R5, $0 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + LE_MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + LE_MOVD (R4), R16 + + CMP R17, $0 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + LE_MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + LE_MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP R5, $0 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go new file mode 100644 index 000000000..e1d033a49 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +// +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s new file mode 100644 index 000000000..0fe3a7c21 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -0,0 +1,503 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accommodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/tools/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/tools/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 000000000..1fe600ad0 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox + +import ( + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if alias.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if alias.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/tools/vendor/golang.org/x/crypto/ocsp/ocsp.go b/tools/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 000000000..e6c645e7c --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,793 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "strconv" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int + +const ( + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is unused in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 +) + +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that it's indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0,optional"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0,explicit,tag:0"` + RawResponderID asn1.RawValue + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// This is the exposed reflection of the internal OCSP structures. + +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. +const ( + // Good means that the certificate is valid. + Good = 0 + // Revoked means that the certificate has been deliberately revoked. + Revoked = 1 + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown = 2 + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed = 3 +) + +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = 0 + KeyCompromise = 1 + CACompromise = 2 + AffiliationChanged = 3 + Superseded = 4 + CessationOfOperation = 5 + CertificateHold = 6 + + RemoveFromCRL = 8 + PrivilegeWithdrawn = 9 + AACompromise = 10 +) + +// Request represents an OCSP request. See RFC 6960. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Marshal marshals the OCSP request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) + if hashAlg == nil { + return nil, errors.New("Unknown hash algorithm") + } + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashAlg, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + req.IssuerNameHash, + req.IssuerKeyHash, + req.SerialNumber, + }, + }, + }, + }, + }) +} + +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. +type Response struct { + Raw []byte + + // Status is one of {Good, Revoked, Unknown} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm + + // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. + // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. + // If zero, the default is crypto.SHA1. + IssuerHash crypto.Hash + + // RawResponderName optionally contains the DER-encoded subject of the + // responder certificate. Exactly one of RawResponderName and + // ResponderKeyHash is set. + RawResponderName []byte + // ResponderKeyHash optionally contains the SHA-1 hash of the + // responder's public key. Exactly one of RawResponderName and + // ResponderKeyHash is set. + ResponderKeyHash []byte + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. The response must contain +// only one certificate status. To parse the status of a specific certificate +// from a response which may contain multiple statuses, use ParseResponseForCert +// instead. +// +// If the response contains an embedded certificate, then that certificate will +// be used to verify the response signature. If the response contains an +// embedded certificate and issuer is not nil, then issuer will be used to verify +// the signature on the embedded certificate. +// +// If the response does not contain an embedded certificate and issuer is not +// nil, then issuer will be used to verify the response signature. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + return ParseResponseForCert(bytes, nil, issuer) +} + +// ParseResponseForCert acts identically to ParseResponse, except it supports +// parsing responses that contain multiple statuses. If the response contains +// multiple statuses and cert is not nil, then ParseResponseForCert will return +// the first status which contains a matching serial, otherwise it will return an +// error. If cert is nil, then the first status in the response will be returned. +func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + var singleResp singleResponse + if cert == nil { + singleResp = basicResp.TBSResponseData.Responses[0] + } else { + match := false + for _, resp := range basicResp.TBSResponseData.Responses { + if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { + singleResp = resp + match = true + break + } + } + if !match { + return nil, ParseError("no response matching the supplied certificate") + } + } + + ret := &Response{ + Raw: bytes, + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + Extensions: singleResp.SingleExtensions, + SerialNumber: singleResp.CertID.SerialNumber, + ProducedAt: basicResp.TBSResponseData.ProducedAt, + ThisUpdate: singleResp.ThisUpdate, + NextUpdate: singleResp.NextUpdate, + } + + // Handle the ResponderID CHOICE tag. ResponderID can be flattened into + // TBSResponseData once https://go-review.googlesource.com/34503 has been + // released. + rawResponderID := basicResp.TBSResponseData.RawResponderID + switch rawResponderID.Tag { + case 1: // Name + var rdn pkix.RDNSequence + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder name") + } + ret.RawResponderName = rawResponderID.Bytes + case 2: // KeyHash + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder key hash") + } + default: + return nil, ParseError("invalid responder id tag") + } + + if len(basicResp.Certificates) > 0 { + // Responders should only send a single certificate (if they + // send any) that connects the responder's certificate to the + // original issuer. We accept responses with multiple + // certificates due to a number responders sending them[1], but + // ignore all but the first. + // + // [1] https://github.com/golang/go/issues/21527 + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad signature on embedded certificate: " + err.Error()) + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + + for _, ext := range singleResp.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + + for h, oid := range hashOIDs { + if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { + ret.IssuerHash = h + break + } + } + if ret.IssuerHash == 0 { + return nil, ParseError("unsupported issuer hash algorithm") + } + + switch { + case bool(singleResp.Good): + ret.Status = Good + case bool(singleResp.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = singleResp.Revoked.RevocationTime + ret.RevocationReason = int(singleResp.Revoked.Reason) + } + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + _, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + req := &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: cert.SerialNumber, + } + return req.Marshal() +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to populate the IssuerNameHash and IssuerKeyHash fields. +// +// The template is used to populate the SerialNumber, Status, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// If template.IssuerHash is not set, SHA1 will be used. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + if template.IssuerHash == 0 { + template.IssuerHash = crypto.SHA1 + } + hashOID := getOIDFromHashAlgorithm(template.IssuerHash) + if hashOID == nil { + return nil, errors.New("unsupported issuer hash algorithm") + } + + if !template.IssuerHash.Available() { + return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) + } + h := template.IssuerHash.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), + } + } + + rawResponderID := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // Name (explicit tag) + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderID: rawResponderID, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + {FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: asn1.Enumerated(Success), + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/tools/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 000000000..75df77406 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,150 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +// +// Deprecated: this package exposes unsafe low-level operations. New applications +// should consider using the AEAD construction in golang.org/x/crypto/chacha20poly1305 +// instead. Existing users should migrate to golang.org/x/crypto/salsa20. +package salsa + +import "math/bits" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 000000000..7ec7bb39b --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,201 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 000000000..e76b44fe5 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 000000000..3883e0ec2 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,880 @@ +// Code generated by command: go run salsa20_amd64_asm.go -out ../salsa20_amd64.s -pkg salsa. DO NOT EDIT. + +//go:build amd64 && !purego && gc + +// func salsa2020XORKeyStream(out *byte, in *byte, n uint64, nonce *byte, key *byte) +// Requires: SSE2 +TEXT ·salsa2020XORKeyStream(SB), $456-40 + // This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + MOVQ n+16(FP), DX + MOVQ nonce+24(FP), CX + MOVQ key+32(FP), R8 + MOVQ SP, R12 + ADDQ $0x1f, R12 + ANDQ $-32, R12 + MOVQ DX, R9 + MOVQ CX, DX + MOVQ R8, R10 + CMPQ R9, $0x00 + JBE DONE + MOVL 20(R10), CX + MOVL (R10), R8 + MOVL (DX), AX + MOVL 16(R10), R11 + MOVL CX, (R12) + MOVL R8, 4(R12) + MOVL AX, 8(R12) + MOVL R11, 12(R12) + MOVL 8(DX), CX + MOVL 24(R10), R8 + MOVL 4(R10), AX + MOVL 4(DX), R11 + MOVL CX, 16(R12) + MOVL R8, 20(R12) + MOVL AX, 24(R12) + MOVL R11, 28(R12) + MOVL 12(DX), CX + MOVL 12(R10), DX + MOVL 28(R10), R8 + MOVL 8(R10), AX + MOVL DX, 32(R12) + MOVL CX, 36(R12) + MOVL R8, 40(R12) + MOVL AX, 44(R12) + MOVQ $0x61707865, DX + MOVQ $0x3320646e, CX + MOVQ $0x79622d32, R8 + MOVQ $0x6b206574, AX + MOVL DX, 48(R12) + MOVL CX, 52(R12) + MOVL R8, 56(R12) + MOVL AX, 60(R12) + CMPQ R9, $0x00000100 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12), X0 + PSHUFL $0x55, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X3 + PSHUFL $0x00, X0, X0 + MOVOA X1, 64(R12) + MOVOA X2, 80(R12) + MOVOA X3, 96(R12) + MOVOA X0, 112(R12) + MOVOA (R12), X0 + PSHUFL $0xaa, X0, X1 + PSHUFL $0xff, X0, X2 + PSHUFL $0x00, X0, X3 + PSHUFL $0x55, X0, X0 + MOVOA X1, 128(R12) + MOVOA X2, 144(R12) + MOVOA X3, 160(R12) + MOVOA X0, 176(R12) + MOVOA 16(R12), X0 + PSHUFL $0xff, X0, X1 + PSHUFL $0x55, X0, X2 + PSHUFL $0xaa, X0, X0 + MOVOA X1, 192(R12) + MOVOA X2, 208(R12) + MOVOA X0, 224(R12) + MOVOA 32(R12), X0 + PSHUFL $0x00, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X0 + MOVOA X1, 240(R12) + MOVOA X2, 256(R12) + MOVOA X0, 272(R12) + +BYTESATLEAST256: + MOVL 16(R12), DX + MOVL 36(R12), CX + MOVL DX, 288(R12) + MOVL CX, 304(R12) + SHLQ $0x20, CX + ADDQ CX, DX + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 292(R12) + MOVL CX, 308(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 296(R12) + MOVL CX, 312(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 300(R12) + MOVL CX, 316(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 16(R12) + MOVL CX, 36(R12) + MOVQ R9, 352(R12) + MOVQ $0x00000014, DX + MOVOA 64(R12), X0 + MOVOA 80(R12), X1 + MOVOA 96(R12), X2 + MOVOA 256(R12), X3 + MOVOA 272(R12), X4 + MOVOA 128(R12), X5 + MOVOA 144(R12), X6 + MOVOA 176(R12), X7 + MOVOA 192(R12), X8 + MOVOA 208(R12), X9 + MOVOA 224(R12), X10 + MOVOA 304(R12), X11 + MOVOA 112(R12), X12 + MOVOA 160(R12), X13 + MOVOA 240(R12), X14 + MOVOA 288(R12), X15 + +MAINLOOP1: + MOVOA X1, 320(R12) + MOVOA X2, 336(R12) + MOVOA X13, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X14 + PSRLL $0x19, X2 + PXOR X2, X14 + MOVOA X7, X1 + PADDL X0, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X11 + PSRLL $0x19, X2 + PXOR X2, X11 + MOVOA X12, X1 + PADDL X14, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X15 + PSRLL $0x17, X2 + PXOR X2, X15 + MOVOA X0, X1 + PADDL X11, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X9 + PSRLL $0x17, X2 + PXOR X2, X9 + MOVOA X14, X1 + PADDL X15, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X13 + PSRLL $0x13, X2 + PXOR X2, X13 + MOVOA X11, X1 + PADDL X9, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X7 + PSRLL $0x13, X2 + PXOR X2, X7 + MOVOA X15, X1 + PADDL X13, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA 320(R12), X1 + MOVOA X12, 320(R12) + MOVOA X9, X2 + PADDL X7, X2 + MOVOA X2, X12 + PSLLL $0x12, X2 + PXOR X2, X0 + PSRLL $0x0e, X12 + PXOR X12, X0 + MOVOA X5, X2 + PADDL X1, X2 + MOVOA X2, X12 + PSLLL $0x07, X2 + PXOR X2, X3 + PSRLL $0x19, X12 + PXOR X12, X3 + MOVOA 336(R12), X2 + MOVOA X0, 336(R12) + MOVOA X6, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X4 + PSRLL $0x19, X12 + PXOR X12, X4 + MOVOA X1, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X10 + PSRLL $0x17, X12 + PXOR X12, X10 + MOVOA X2, X0 + PADDL X4, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X8 + PSRLL $0x17, X12 + PXOR X12, X8 + MOVOA X3, X0 + PADDL X10, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X5 + PSRLL $0x13, X12 + PXOR X12, X5 + MOVOA X4, X0 + PADDL X8, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X6 + PSRLL $0x13, X12 + PXOR X12, X6 + MOVOA X10, X0 + PADDL X5, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA 320(R12), X0 + MOVOA X1, 320(R12) + MOVOA X4, X1 + PADDL X0, X1 + MOVOA X1, X12 + PSLLL $0x07, X1 + PXOR X1, X7 + PSRLL $0x19, X12 + PXOR X12, X7 + MOVOA X8, X1 + PADDL X6, X1 + MOVOA X1, X12 + PSLLL $0x12, X1 + PXOR X1, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 336(R12), X12 + MOVOA X2, 336(R12) + MOVOA X14, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X5 + PSRLL $0x19, X2 + PXOR X2, X5 + MOVOA X0, X1 + PADDL X7, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X10 + PSRLL $0x17, X2 + PXOR X2, X10 + MOVOA X12, X1 + PADDL X5, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X8 + PSRLL $0x17, X2 + PXOR X2, X8 + MOVOA X7, X1 + PADDL X10, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X4 + PSRLL $0x13, X2 + PXOR X2, X4 + MOVOA X5, X1 + PADDL X8, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X14 + PSRLL $0x13, X2 + PXOR X2, X14 + MOVOA X10, X1 + PADDL X4, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X0 + PSRLL $0x0e, X2 + PXOR X2, X0 + MOVOA 320(R12), X1 + MOVOA X0, 320(R12) + MOVOA X8, X0 + PADDL X14, X0 + MOVOA X0, X2 + PSLLL $0x12, X0 + PXOR X0, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA X11, X0 + PADDL X1, X0 + MOVOA X0, X2 + PSLLL $0x07, X0 + PXOR X0, X6 + PSRLL $0x19, X2 + PXOR X2, X6 + MOVOA 336(R12), X2 + MOVOA X12, 336(R12) + MOVOA X3, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X13 + PSRLL $0x19, X12 + PXOR X12, X13 + MOVOA X1, X0 + PADDL X6, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X15 + PSRLL $0x17, X12 + PXOR X12, X15 + MOVOA X2, X0 + PADDL X13, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X9 + PSRLL $0x17, X12 + PXOR X12, X9 + MOVOA X6, X0 + PADDL X15, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X11 + PSRLL $0x13, X12 + PXOR X12, X11 + MOVOA X13, X0 + PADDL X9, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X3 + PSRLL $0x13, X12 + PXOR X12, X3 + MOVOA X15, X0 + PADDL X11, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA X9, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 320(R12), X12 + MOVOA 336(R12), X0 + SUBQ $0x02, DX + JA MAINLOOP1 + PADDL 112(R12), X12 + PADDL 176(R12), X7 + PADDL 224(R12), X10 + PADDL 272(R12), X4 + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL (SI), DX + XORL 4(SI), CX + XORL 8(SI), R8 + XORL 12(SI), R9 + MOVL DX, (DI) + MOVL CX, 4(DI) + MOVL R8, 8(DI) + MOVL R9, 12(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 64(SI), DX + XORL 68(SI), CX + XORL 72(SI), R8 + XORL 76(SI), R9 + MOVL DX, 64(DI) + MOVL CX, 68(DI) + MOVL R8, 72(DI) + MOVL R9, 76(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 128(SI), DX + XORL 132(SI), CX + XORL 136(SI), R8 + XORL 140(SI), R9 + MOVL DX, 128(DI) + MOVL CX, 132(DI) + MOVL R8, 136(DI) + MOVL R9, 140(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + XORL 192(SI), DX + XORL 196(SI), CX + XORL 200(SI), R8 + XORL 204(SI), R9 + MOVL DX, 192(DI) + MOVL CX, 196(DI) + MOVL R8, 200(DI) + MOVL R9, 204(DI) + PADDL 240(R12), X14 + PADDL 64(R12), X0 + PADDL 128(R12), X5 + PADDL 192(R12), X8 + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 16(SI), DX + XORL 20(SI), CX + XORL 24(SI), R8 + XORL 28(SI), R9 + MOVL DX, 16(DI) + MOVL CX, 20(DI) + MOVL R8, 24(DI) + MOVL R9, 28(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 80(SI), DX + XORL 84(SI), CX + XORL 88(SI), R8 + XORL 92(SI), R9 + MOVL DX, 80(DI) + MOVL CX, 84(DI) + MOVL R8, 88(DI) + MOVL R9, 92(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 144(SI), DX + XORL 148(SI), CX + XORL 152(SI), R8 + XORL 156(SI), R9 + MOVL DX, 144(DI) + MOVL CX, 148(DI) + MOVL R8, 152(DI) + MOVL R9, 156(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + XORL 208(SI), DX + XORL 212(SI), CX + XORL 216(SI), R8 + XORL 220(SI), R9 + MOVL DX, 208(DI) + MOVL CX, 212(DI) + MOVL R8, 216(DI) + MOVL R9, 220(DI) + PADDL 288(R12), X15 + PADDL 304(R12), X11 + PADDL 80(R12), X1 + PADDL 144(R12), X6 + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 32(SI), DX + XORL 36(SI), CX + XORL 40(SI), R8 + XORL 44(SI), R9 + MOVL DX, 32(DI) + MOVL CX, 36(DI) + MOVL R8, 40(DI) + MOVL R9, 44(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 96(SI), DX + XORL 100(SI), CX + XORL 104(SI), R8 + XORL 108(SI), R9 + MOVL DX, 96(DI) + MOVL CX, 100(DI) + MOVL R8, 104(DI) + MOVL R9, 108(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 160(SI), DX + XORL 164(SI), CX + XORL 168(SI), R8 + XORL 172(SI), R9 + MOVL DX, 160(DI) + MOVL CX, 164(DI) + MOVL R8, 168(DI) + MOVL R9, 172(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + XORL 224(SI), DX + XORL 228(SI), CX + XORL 232(SI), R8 + XORL 236(SI), R9 + MOVL DX, 224(DI) + MOVL CX, 228(DI) + MOVL R8, 232(DI) + MOVL R9, 236(DI) + PADDL 160(R12), X13 + PADDL 208(R12), X9 + PADDL 256(R12), X3 + PADDL 96(R12), X2 + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 48(SI), DX + XORL 52(SI), CX + XORL 56(SI), R8 + XORL 60(SI), R9 + MOVL DX, 48(DI) + MOVL CX, 52(DI) + MOVL R8, 56(DI) + MOVL R9, 60(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 112(SI), DX + XORL 116(SI), CX + XORL 120(SI), R8 + XORL 124(SI), R9 + MOVL DX, 112(DI) + MOVL CX, 116(DI) + MOVL R8, 120(DI) + MOVL R9, 124(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 176(SI), DX + XORL 180(SI), CX + XORL 184(SI), R8 + XORL 188(SI), R9 + MOVL DX, 176(DI) + MOVL CX, 180(DI) + MOVL R8, 184(DI) + MOVL R9, 188(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + XORL 240(SI), DX + XORL 244(SI), CX + XORL 248(SI), R8 + XORL 252(SI), R9 + MOVL DX, 240(DI) + MOVL CX, 244(DI) + MOVL R8, 248(DI) + MOVL R9, 252(DI) + MOVQ 352(R12), R9 + SUBQ $0x00000100, R9 + ADDQ $0x00000100, SI + ADDQ $0x00000100, DI + CMPQ R9, $0x00000100 + JAE BYTESATLEAST256 + CMPQ R9, $0x00 + JBE DONE + +BYTESBETWEEN1AND255: + CMPQ R9, $0x40 + JAE NOCOPY + MOVQ DI, DX + LEAQ 360(R12), DI + MOVQ R9, CX + REP; MOVSB + LEAQ 360(R12), DI + LEAQ 360(R12), SI + +NOCOPY: + MOVQ R9, 352(R12) + MOVOA 48(R12), X0 + MOVOA (R12), X1 + MOVOA 16(R12), X2 + MOVOA 32(R12), X3 + MOVOA X1, X4 + MOVQ $0x00000014, CX + +MAINLOOP2: + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + SUBQ $0x04, CX + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PXOR X7, X7 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + JA MAINLOOP2 + PADDL 48(R12), X0 + PADDL (R12), X1 + PADDL 16(R12), X2 + PADDL 32(R12), X3 + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL (SI), CX + XORL 48(SI), R8 + XORL 32(SI), R9 + XORL 16(SI), AX + MOVL CX, (DI) + MOVL R8, 48(DI) + MOVL R9, 32(DI) + MOVL AX, 16(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 20(SI), CX + XORL 4(SI), R8 + XORL 52(SI), R9 + XORL 36(SI), AX + MOVL CX, 20(DI) + MOVL R8, 4(DI) + MOVL R9, 52(DI) + MOVL AX, 36(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 40(SI), CX + XORL 24(SI), R8 + XORL 8(SI), R9 + XORL 56(SI), AX + MOVL CX, 40(DI) + MOVL R8, 24(DI) + MOVL R9, 8(DI) + MOVL AX, 56(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + XORL 60(SI), CX + XORL 44(SI), R8 + XORL 28(SI), R9 + XORL 12(SI), AX + MOVL CX, 60(DI) + MOVL R8, 44(DI) + MOVL R9, 28(DI) + MOVL AX, 12(DI) + MOVQ 352(R12), R9 + MOVL 16(R12), CX + MOVL 36(R12), R8 + ADDQ $0x01, CX + SHLQ $0x20, R8 + ADDQ R8, CX + MOVQ CX, R8 + SHRQ $0x20, R8 + MOVL CX, 16(R12) + MOVL R8, 36(R12) + CMPQ R9, $0x40 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI, SI + MOVQ DX, DI + MOVQ R9, CX + REP; MOVSB + +BYTESATLEAST64: +DONE: + RET + +BYTESATLEAST65: + SUBQ $0x40, R9 + ADDQ $0x40, DI + ADDQ $0x40, SI + JMP BYTESBETWEEN1AND255 diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 000000000..9448760f2 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 000000000..e5cdb9a25 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,233 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/doc.go b/tools/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 000000000..bbf391fe6 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,66 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// All types in this package also implement [encoding.BinaryMarshaler], +// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and +// unmarshal the internal state of the hash. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/tools/vendor/golang.org/x/crypto/sha3/hashes.go b/tools/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 000000000..31fffbe04 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,128 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "crypto" + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + return new224() +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + return new256() +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + return new384() +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +const ( + dsbyteSHA3 = 0b00000110 + dsbyteKeccak = 0b00000001 + dsbyteShake = 0b00011111 + dsbyteCShake = 0b00000100 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK448 = (1600 - 448) / 8 + rateK512 = (1600 - 512) / 8 + rateK768 = (1600 - 768) / 8 + rateK1024 = (1600 - 1024) / 8 +) + +func new224Generic() *state { + return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} +} + +func new256Generic() *state { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} +} + +func new384Generic() *state { + return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} +} + +func new512Generic() *state { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 000000000..9d85fb621 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/keccakf.go b/tools/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 000000000..ce48b1dd3 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,414 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package sha3 + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 000000000..b908696be --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 000000000..99e2f16e9 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,5419 @@ +// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. + +//go:build amd64 && !purego && gc + +// func keccakF1600(a *[25]uint64) +TEXT ·keccakF1600(SB), $200-8 + MOVQ a+0(FP), DI + + // Convert the user state into an internal state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) + + // Execute the KeccakF permutation + MOVQ (DI), SI + MOVQ 8(DI), BP + MOVQ 32(DI), R15 + XORQ 40(DI), SI + XORQ 48(DI), BP + XORQ 72(DI), R15 + XORQ 80(DI), SI + XORQ 88(DI), BP + XORQ 112(DI), R15 + XORQ 120(DI), SI + XORQ 128(DI), BP + XORQ 152(DI), R15 + XORQ 160(DI), SI + XORQ 168(DI), BP + MOVQ 176(DI), DX + MOVQ 184(DI), R8 + XORQ 192(DI), R15 + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000008082, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000808a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008000, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000008a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000088, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000008b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008089, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008003, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008002, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000000080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000800a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008008, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + NOP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + NOP + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + NOP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + NOP + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + NOP + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + NOP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + NOP + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + NOP + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + NOP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + NOP + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + NOP + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + NOP + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + NOP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Revert the internal state to the user state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) + RET diff --git a/tools/vendor/golang.org/x/crypto/sha3/sha3.go b/tools/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 000000000..6658c4447 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,244 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "unsafe" + + "golang.org/x/sys/cpu" +) + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +type state struct { + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the buffer indexes, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.n = 0 +} + +func (d *state) clone() *state { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. +func (d *state) permute() { + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute() { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in the sponge because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.a[d.n] ^= d.dsbyte + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.a[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (n int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + + n = len(p) + + for len(p) > 0 { + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute() + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + // Apply the permutation if we've squeezed the sponge dry. + if d.n == d.rate { + d.permute() + } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} + +const ( + magicSHA3 = "sha\x08" + magicShake = "sha\x09" + magicCShake = "sha\x0a" + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteSHA3: + b = append(b, magicSHA3...) + case dsbyteShake: + b = append(b, magicShake...) + case dsbyteCShake: + b = append(b, magicCShake...) + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicSHA3)]) + b = b[len(magicSHA3):] + switch { + case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: + case magic == magicShake && d.dsbyte == dsbyteShake: + case magic == magicCShake && d.dsbyte == dsbyteCShake: + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 000000000..00d8034ae --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,303 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +// +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +// +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length for full security + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + s.outputLen = 32 + case shake_256: + s.rate = 136 + s.outputLen = 64 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return new224Generic() +} + +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return new256Generic() +} + +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return new384Generic() +} + +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return new512Generic() +} + +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return newShake128Generic() +} + +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return newShake256Generic() +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 000000000..826b862c7 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/tools/vendor/golang.org/x/crypto/sha3/shake.go b/tools/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 000000000..a6b3a4281 --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,193 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "bytes" + "encoding/binary" + "errors" + "hash" + "io" + "math/bits" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +func bytepad(data []byte, rate int) []byte { + out := make([]byte, 0, 9+len(data)+rate-1) + out = append(out, leftEncode(uint64(rate))...) + out = append(out, data...) + if padlen := rate - len(out)%rate; padlen < rate { + out = append(out, make([]byte, padlen)...) + } + return out +} + +func leftEncode(x uint64) []byte { + // Let n be the smallest positive integer for which 2^(8n) > x. + n := (bits.Len64(x) + 7) / 8 + if n == 0 { + n = 1 + } + // Return n || x with n as a byte and x an n bytes in big-endian order. + b := make([]byte, 9) + binary.BigEndian.PutUint64(b[1:], x) + b = b[9-n-1:] + b[0] = byte(n) + return b +} + +func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} + c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +func (c *cshakeState) MarshalBinary() ([]byte, error) { + return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) +} + +func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { + b, err := c.state.AppendBinary(b) + if err != nil { + return nil, err + } + b = append(b, c.initBlock...) + return b, nil +} + +func (c *cshakeState) UnmarshalBinary(b []byte) error { + if len(b) <= marshaledSize { + return errors.New("sha3: invalid hash state") + } + if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { + return err + } + c.initBlock = bytes.Clone(b[marshaledSize:]) + return nil +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + return newShake128() +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { + return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rateK256, 32, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rateK512, 64, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 000000000..4276ba4ab --- /dev/null +++ b/tools/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/tools/vendor/golang.org/x/exp/maps/maps.go b/tools/vendor/golang.org/x/exp/maps/maps.go index c25939b92..4a9747ef4 100644 --- a/tools/vendor/golang.org/x/exp/maps/maps.go +++ b/tools/vendor/golang.org/x/exp/maps/maps.go @@ -7,17 +7,13 @@ package maps import "maps" -// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of -// these functions except Keys and Values should be annotated -// (provisionally with "//go:fix inline") so that tools can safely and -// automatically replace calls to exp/maps with calls to std maps by -// inlining them. - // Keys returns the keys of the map m. // The keys will be in an indeterminate order. +// +// The simplest true equivalent using the standard library is: +// +// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)) func Keys[M ~map[K]V, K comparable, V any](m M) []K { - // The simplest true equivalent using std is: - // return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)). r := make([]K, 0, len(m)) for k := range m { @@ -28,9 +24,11 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K { // Values returns the values of the map m. // The values will be in an indeterminate order. +// +// The simplest true equivalent using the standard library is: +// +// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)) func Values[M ~map[K]V, K comparable, V any](m M) []V { - // The simplest true equivalent using std is: - // return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)). r := make([]V, 0, len(m)) for _, v := range m { @@ -41,23 +39,31 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V { // Equal reports whether two maps contain the same key/value pairs. // Values are compared using ==. +// +//go:fix inline func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { return maps.Equal(m1, m2) } // EqualFunc is like Equal, but compares values using eq. // Keys are still compared with ==. +// +//go:fix inline func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { return maps.EqualFunc(m1, m2, eq) } // Clear removes all entries from m, leaving it empty. +// +//go:fix inline func Clear[M ~map[K]V, K comparable, V any](m M) { clear(m) } // Clone returns a copy of m. This is a shallow clone: // the new keys and values are set using ordinary assignment. +// +//go:fix inline func Clone[M ~map[K]V, K comparable, V any](m M) M { return maps.Clone(m) } @@ -66,11 +72,15 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M { // When a key in src is already present in dst, // the value in dst will be overwritten by the value associated // with the key in src. +// +//go:fix inline func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { maps.Copy(dst, src) } // DeleteFunc deletes any key/value pairs from m for which del returns true. +// +//go:fix inline func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { maps.DeleteFunc(m, del) } diff --git a/tools/vendor/golang.org/x/exp/slices/slices.go b/tools/vendor/golang.org/x/exp/slices/slices.go index 757383ea1..da0df370d 100644 --- a/tools/vendor/golang.org/x/exp/slices/slices.go +++ b/tools/vendor/golang.org/x/exp/slices/slices.go @@ -10,16 +10,13 @@ import ( "slices" ) -// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of -// these functions should be annotated (provisionally with "//go:fix -// inline") so that tools can safely and automatically replace calls -// to exp/slices with calls to std slices by inlining them. - // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. +// +//go:fix inline func Equal[S ~[]E, E comparable](s1, s2 S) bool { return slices.Equal(s1, s2) } @@ -29,6 +26,8 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool { // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. +// +//go:fix inline func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { return slices.EqualFunc(s1, s2, eq) } @@ -40,6 +39,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// +//go:fix inline func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { return slices.Compare(s1, s2) } @@ -49,29 +50,39 @@ func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). +// +//go:fix inline func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { return slices.CompareFunc(s1, s2, cmp) } // Index returns the index of the first occurrence of v in s, // or -1 if not present. +// +//go:fix inline func Index[S ~[]E, E comparable](s S, v E) int { return slices.Index(s, v) } // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. +// +//go:fix inline func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { return slices.IndexFunc(s, f) } // Contains reports whether v is present in s. +// +//go:fix inline func Contains[S ~[]E, E comparable](s S, v E) bool { return slices.Contains(s, v) } // ContainsFunc reports whether at least one // element e of s satisfies f(e). +// +//go:fix inline func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return slices.ContainsFunc(s, f) } @@ -83,6 +94,8 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). +// +//go:fix inline func Insert[S ~[]E, E any](s S, i int, v ...E) S { return slices.Insert(s, i, v...) } @@ -92,6 +105,8 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { // Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. +// +//go:fix inline func Delete[S ~[]E, E any](s S, i, j int) S { return slices.Delete(s, i, j) } @@ -99,6 +114,8 @@ func Delete[S ~[]E, E any](s S, i, j int) S { // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. // DeleteFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { return slices.DeleteFunc(s, del) } @@ -106,12 +123,16 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. // When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +// +//go:fix inline func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { return slices.Replace(s, i, j, v...) } // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. +// +//go:fix inline func Clone[S ~[]E, E any](s S) S { return slices.Clone(s) } @@ -121,6 +142,8 @@ func Clone[S ~[]E, E any](s S) S { // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. // Compact zeroes the elements between the new length and the original length. +// +//go:fix inline func Compact[S ~[]E, E comparable](s S) S { return slices.Compact(s) } @@ -128,6 +151,8 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. // CompactFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { return slices.CompactFunc(s, eq) } @@ -136,16 +161,22 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // another n elements. After Grow(n), at least n elements can be appended // to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. +// +//go:fix inline func Grow[S ~[]E, E any](s S, n int) S { return slices.Grow(s, n) } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// +//go:fix inline func Clip[S ~[]E, E any](s S) S { return slices.Clip(s) } // Reverse reverses the elements of the slice in place. +// +//go:fix inline func Reverse[S ~[]E, E any](s S) { slices.Reverse(s) } diff --git a/tools/vendor/golang.org/x/exp/slices/sort.go b/tools/vendor/golang.org/x/exp/slices/sort.go index e270a7465..bd91a8d40 100644 --- a/tools/vendor/golang.org/x/exp/slices/sort.go +++ b/tools/vendor/golang.org/x/exp/slices/sort.go @@ -9,11 +9,10 @@ import ( "slices" ) -// TODO(adonovan): add a "//go:fix inline" annotation to each function -// in this file; see https://go.dev/issue/32816. - // Sort sorts a slice of any ordered type in ascending order. // When sorting floating-point numbers, NaNs are ordered before other values. +// +//go:fix inline func Sort[S ~[]E, E cmp.Ordered](x S) { slices.Sort(x) } @@ -27,23 +26,31 @@ func Sort[S ~[]E, E cmp.Ordered](x S) { // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. // To indicate 'uncomparable', return 0 from the function. +// +//go:fix inline func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { slices.SortFunc(x, cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal // elements, using cmp to compare elements in the same way as [SortFunc]. +// +//go:fix inline func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { slices.SortStableFunc(x, cmp) } // IsSorted reports whether x is sorted in ascending order. +// +//go:fix inline func IsSorted[S ~[]E, E cmp.Ordered](x S) bool { return slices.IsSorted(x) } // IsSortedFunc reports whether x is sorted in ascending order, with cmp as the // comparison function as defined by [SortFunc]. +// +//go:fix inline func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { return slices.IsSortedFunc(x, cmp) } @@ -51,6 +58,8 @@ func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { // Min returns the minimal value in x. It panics if x is empty. // For floating-point numbers, Min propagates NaNs (any NaN value in x // forces the output to be NaN). +// +//go:fix inline func Min[S ~[]E, E cmp.Ordered](x S) E { return slices.Min(x) } @@ -58,6 +67,8 @@ func Min[S ~[]E, E cmp.Ordered](x S) E { // MinFunc returns the minimal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one minimal element // according to the cmp function, MinFunc returns the first one. +// +//go:fix inline func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { return slices.MinFunc(x, cmp) } @@ -65,6 +76,8 @@ func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { // Max returns the maximal value in x. It panics if x is empty. // For floating-point E, Max propagates NaNs (any NaN value in x // forces the output to be NaN). +// +//go:fix inline func Max[S ~[]E, E cmp.Ordered](x S) E { return slices.Max(x) } @@ -72,6 +85,8 @@ func Max[S ~[]E, E cmp.Ordered](x S) E { // MaxFunc returns the maximal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one maximal element // according to the cmp function, MaxFunc returns the first one. +// +//go:fix inline func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { return slices.MaxFunc(x, cmp) } @@ -80,6 +95,8 @@ func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. +// +//go:fix inline func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { return slices.BinarySearch(x, target) } @@ -91,6 +108,8 @@ func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { // or a positive number if the slice element follows the target. // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +// +//go:fix inline func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { return slices.BinarySearchFunc(x, target, cmp) } diff --git a/tools/vendor/golang.org/x/mod/modfile/rule.go b/tools/vendor/golang.org/x/mod/modfile/rule.go index 3e4a1d0ab..a86ee4fd8 100644 --- a/tools/vendor/golang.org/x/mod/modfile/rule.go +++ b/tools/vendor/golang.org/x/mod/modfile/rule.go @@ -20,10 +20,11 @@ package modfile import ( + "cmp" "errors" "fmt" "path/filepath" - "sort" + "slices" "strconv" "strings" "unicode" @@ -44,6 +45,7 @@ type File struct { Replace []*Replace Retract []*Retract Tool []*Tool + Ignore []*Ignore Syntax *FileSyntax } @@ -100,6 +102,12 @@ type Tool struct { Syntax *Line } +// An Ignore is a single ignore statement. +type Ignore struct { + Path string + Syntax *Line +} + // A VersionInterval represents a range of versions with upper and lower bounds. // Intervals are closed: both bounds are included. When Low is equal to High, // the interval may refer to a single version ('v1.2.3') or an interval @@ -304,7 +312,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "godebug", "require", "exclude", "replace", "retract", "tool": + case "module", "godebug", "require", "exclude", "replace", "retract", "tool", "ignore": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -337,7 +345,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a // and simply ignore those statements. if !strict { switch verb { - case "go", "module", "retract", "require": + case "go", "module", "retract", "require", "ignore": // want these even for dependency go.mods default: return @@ -531,6 +539,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Path: s, Syntax: line, }) + + case "ignore": + if len(args) != 1 { + errorf("ignore directive expects exactly one argument") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Ignore = append(f.Ignore, &Ignore{ + Path: s, + Syntax: line, + }) } } @@ -1619,6 +1642,36 @@ func (f *File) DropTool(path string) error { return nil } +// AddIgnore adds a new ignore directive with the given path. +// It does nothing if the ignore line already exists. +func (f *File) AddIgnore(path string) error { + for _, t := range f.Ignore { + if t.Path == path { + return nil + } + } + + f.Ignore = append(f.Ignore, &Ignore{ + Path: path, + Syntax: f.Syntax.addLine(nil, "ignore", path), + }) + + f.SortBlocks() + return nil +} + +// DropIgnore removes a ignore directive with the given path. +// It does nothing if no such ignore directive exists. +func (f *File) DropIgnore(path string) error { + for _, t := range f.Ignore { + if t.Path == path { + t.Syntax.markRemoved() + *t = Ignore{} + } + } + return nil +} + func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe @@ -1633,15 +1686,13 @@ func (f *File) SortBlocks() { if !ok { continue } - less := lineLess + less := compareLine if block.Token[0] == "exclude" && useSemanticSortForExclude { - less = lineExcludeLess + less = compareLineExclude } else if block.Token[0] == "retract" { - less = lineRetractLess + less = compareLineRetract } - sort.SliceStable(block.Line, func(i, j int) bool { - return less(block.Line[i], block.Line[j]) - }) + slices.SortStableFunc(block.Line, less) } } @@ -1657,10 +1708,10 @@ func (f *File) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { - removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool) + removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool, &f.Ignore) } -func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) { +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool, ignore *[]*Ignore) { kill := make(map[*Line]bool) // Remove duplicate excludes. @@ -1719,6 +1770,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, to *tool = newTool } + if ignore != nil { + haveIgnore := make(map[string]bool) + for _, i := range *ignore { + if haveIgnore[i.Path] { + kill[i.Syntax] = true + continue + } + haveIgnore[i.Path] = true + } + var newIgnore []*Ignore + for _, i := range *ignore { + if !kill[i.Syntax] { + newIgnore = append(newIgnore, i) + } + } + *ignore = newIgnore + } + // Duplicate require and retract directives are not removed. // Drop killed statements from the syntax tree. @@ -1746,39 +1815,38 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, to syntax.Stmt = stmts } -// lineLess returns whether li should be sorted before lj. It sorts -// lexicographically without assigning any special meaning to tokens. -func lineLess(li, lj *Line) bool { +// compareLine compares li and lj. It sorts lexicographically without assigning +// any special meaning to tokens. +func compareLine(li, lj *Line) int { for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { if li.Token[k] != lj.Token[k] { - return li.Token[k] < lj.Token[k] + return cmp.Compare(li.Token[k], lj.Token[k]) } } - return len(li.Token) < len(lj.Token) + return cmp.Compare(len(li.Token), len(lj.Token)) } -// lineExcludeLess reports whether li should be sorted before lj for lines in -// an "exclude" block. -func lineExcludeLess(li, lj *Line) bool { +// compareLineExclude compares li and lj for lines in an "exclude" block. +func compareLineExclude(li, lj *Line) int { if len(li.Token) != 2 || len(lj.Token) != 2 { // Not a known exclude specification. // Fall back to sorting lexicographically. - return lineLess(li, lj) + return compareLine(li, lj) } // An exclude specification has two tokens: ModulePath and Version. // Compare module path by string order and version by semver rules. if pi, pj := li.Token[0], lj.Token[0]; pi != pj { - return pi < pj + return cmp.Compare(pi, pj) } - return semver.Compare(li.Token[1], lj.Token[1]) < 0 + return semver.Compare(li.Token[1], lj.Token[1]) } -// lineRetractLess returns whether li should be sorted before lj for lines in -// a "retract" block. It treats each line as a version interval. Single versions -// are compared as if they were intervals with the same low and high version. +// compareLineRetract compares li and lj for lines in a "retract" block. +// It treats each line as a version interval. Single versions are compared as +// if they were intervals with the same low and high version. // Intervals are sorted in descending order, first by low version, then by -// high version, using semver.Compare. -func lineRetractLess(li, lj *Line) bool { +// high version, using [semver.Compare]. +func compareLineRetract(li, lj *Line) int { interval := func(l *Line) VersionInterval { if len(l.Token) == 1 { return VersionInterval{Low: l.Token[0], High: l.Token[0]} @@ -1792,9 +1860,9 @@ func lineRetractLess(li, lj *Line) bool { vii := interval(li) vij := interval(lj) if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { - return cmp > 0 + return -cmp } - return semver.Compare(vii.High, vij.High) > 0 + return -semver.Compare(vii.High, vij.High) } // checkCanonicalVersion returns a non-nil error if vers is not a canonical diff --git a/tools/vendor/golang.org/x/mod/modfile/work.go b/tools/vendor/golang.org/x/mod/modfile/work.go index 5387d0c26..09df5ea3c 100644 --- a/tools/vendor/golang.org/x/mod/modfile/work.go +++ b/tools/vendor/golang.org/x/mod/modfile/work.go @@ -6,7 +6,7 @@ package modfile import ( "fmt" - "sort" + "slices" "strings" ) @@ -315,9 +315,7 @@ func (f *WorkFile) SortBlocks() { if !ok { continue } - sort.SliceStable(block.Line, func(i, j int) bool { - return lineLess(block.Line[i], block.Line[j]) - }) + slices.SortStableFunc(block.Line, compareLine) } } @@ -331,5 +329,5 @@ func (f *WorkFile) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *WorkFile) removeDups() { - removeDups(f.Syntax, nil, &f.Replace, nil) + removeDups(f.Syntax, nil, &f.Replace, nil, nil) } diff --git a/tools/vendor/golang.org/x/mod/module/module.go b/tools/vendor/golang.org/x/mod/module/module.go index 2a364b229..16e1aa7ab 100644 --- a/tools/vendor/golang.org/x/mod/module/module.go +++ b/tools/vendor/golang.org/x/mod/module/module.go @@ -96,10 +96,11 @@ package module // Changes to the semantics in this file require approval from rsc. import ( + "cmp" "errors" "fmt" "path" - "sort" + "slices" "strings" "unicode" "unicode/utf8" @@ -657,17 +658,15 @@ func CanonicalVersion(v string) string { // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { - sort.Slice(list, func(i, j int) bool { - mi := list[i] - mj := list[j] - if mi.Path != mj.Path { - return mi.Path < mj.Path + slices.SortFunc(list, func(i, j Version) int { + if i.Path != j.Path { + return strings.Compare(i.Path, j.Path) } // To help go.sum formatting, allow version/file. // Compare semver prefix by semver rules, // file by string order. - vi := mi.Version - vj := mj.Version + vi := i.Version + vj := j.Version var fi, fj string if k := strings.Index(vi, "/"); k >= 0 { vi, fi = vi[:k], vi[k:] @@ -676,9 +675,9 @@ func Sort(list []Version) { vj, fj = vj[:k], vj[k:] } if vi != vj { - return semver.Compare(vi, vj) < 0 + return semver.Compare(vi, vj) } - return fi < fj + return cmp.Compare(fi, fj) }) } diff --git a/tools/vendor/golang.org/x/mod/semver/semver.go b/tools/vendor/golang.org/x/mod/semver/semver.go index 9a2dfd33a..628f8fd68 100644 --- a/tools/vendor/golang.org/x/mod/semver/semver.go +++ b/tools/vendor/golang.org/x/mod/semver/semver.go @@ -22,7 +22,10 @@ // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. package semver -import "sort" +import ( + "slices" + "strings" +) // parsed returns the parsed form of a semantic version string. type parsed struct { @@ -154,19 +157,22 @@ func Max(v, w string) string { // ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string -func (vs ByVersion) Len() int { return len(vs) } -func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs ByVersion) Less(i, j int) bool { - cmp := Compare(vs[i], vs[j]) - if cmp != 0 { - return cmp < 0 - } - return vs[i] < vs[j] -} +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 } -// Sort sorts a list of semantic version strings using [ByVersion]. +// Sort sorts a list of semantic version strings using [Compare] and falls back +// to use [strings.Compare] if both versions are considered equal. func Sort(list []string) { - sort.Sort(ByVersion(list)) + slices.SortFunc(list, compareVersion) +} + +func compareVersion(a, b string) int { + cmp := Compare(a, b) + if cmp != 0 { + return cmp + } + return strings.Compare(a, b) } func parse(v string) (p parsed, ok bool) { diff --git a/tools/vendor/golang.org/x/net/context/context.go b/tools/vendor/golang.org/x/net/context/context.go index db1c95fab..d3cb95175 100644 --- a/tools/vendor/golang.org/x/net/context/context.go +++ b/tools/vendor/golang.org/x/net/context/context.go @@ -6,7 +6,7 @@ // cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name [context], and migrating to it can be done automatically with [go fix]. +// name [context]. // // Incoming requests to a server should create a [Context], and outgoing // calls to servers should accept a Context. The chain of function @@ -38,8 +38,6 @@ // // See https://go.dev/blog/context for example code for a server that uses // Contexts. -// -// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs package context import ( @@ -51,36 +49,37 @@ import ( // API boundaries. // // Context's methods may be called by multiple goroutines simultaneously. +// +//go:fix inline type Context = context.Context // Canceled is the error returned by [Context.Err] when the context is canceled // for some reason other than its deadline passing. +// +//go:fix inline var Canceled = context.Canceled // DeadlineExceeded is the error returned by [Context.Err] when the context is canceled // due to its deadline passing. +// +//go:fix inline var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. -func Background() Context { - return background -} +// +//go:fix inline +func Background() Context { return context.Background() } // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). -func TODO() Context { - return todo -} - -var ( - background = context.Background() - todo = context.TODO() -) +// +//go:fix inline +func TODO() Context { return context.TODO() } // A CancelFunc tells an operation to abandon its work. // A CancelFunc does not wait for the work to stop. @@ -95,6 +94,8 @@ type CancelFunc = context.CancelFunc // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { return context.WithCancel(parent) } @@ -108,6 +109,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this [Context] complete. +// +//go:fix inline func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { return context.WithDeadline(parent, d) } @@ -122,6 +125,8 @@ func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } +// +//go:fix inline func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return context.WithTimeout(parent, timeout) } @@ -139,6 +144,8 @@ func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { // interface{}, context keys often have concrete type // struct{}. Alternatively, exported context key variables' static // type should be a pointer or interface. +// +//go:fix inline func WithValue(parent Context, key, val interface{}) Context { return context.WithValue(parent, key, val) } diff --git a/tools/vendor/golang.org/x/net/html/escape.go b/tools/vendor/golang.org/x/net/html/escape.go index 04c6bec21..12f227370 100644 --- a/tools/vendor/golang.org/x/net/html/escape.go +++ b/tools/vendor/golang.org/x/net/html/escape.go @@ -299,7 +299,7 @@ func escape(w writer, s string) error { case '\r': esc = " " default: - panic("unrecognized escape character") + panic("html: unrecognized escape character") } s = s[i+1:] if _, err := w.WriteString(esc); err != nil { diff --git a/tools/vendor/golang.org/x/net/html/parse.go b/tools/vendor/golang.org/x/net/html/parse.go index 518ee4c94..88fc0056a 100644 --- a/tools/vendor/golang.org/x/net/html/parse.go +++ b/tools/vendor/golang.org/x/net/html/parse.go @@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { return -1 } default: - panic("unreachable") + panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s)) } } switch s { @@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) { return } default: - panic("unreachable") + panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s)) } } } @@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) { } if n.Type == ElementNode { - p.oe = append(p.oe, n) + p.insertOpenElement(n) + } +} + +func (p *parser) insertOpenElement(n *Node) { + p.oe = append(p.oe, n) + if len(p.oe) > 512 { + panic("html: open stack of elements exceeds 512 nodes") } } @@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool { p.im = inFramesetIM return true case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: - p.oe = append(p.oe, p.head) + p.insertOpenElement(p.head) defer p.oe.remove(p.head) return inHeadIM(p) case a.Head: @@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool { return inTableIM(p) } -// Section 12.2.6.4.14. +// Section 13.2.6.4.14. func inRowIM(p *parser) bool { switch p.tok.Type { case StartTagToken: @@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool { p.im = inCellIM return true case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return false } @@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool { case EndTagToken: switch p.tok.DataAtom { case a.Tr: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return true } // Ignore the token. return true case a.Table: - if p.popUntil(tableScope, a.Tr) { + if p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() p.im = inTableBodyIM return false } // Ignore the token. return true case a.Tbody, a.Tfoot, a.Thead: - if p.elementInScope(tableScope, p.tok.DataAtom) { - p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String()) + if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) { + p.clearStackToContext(tableRowScope) + p.oe.pop() + p.im = inTableBodyIM return false } // Ignore the token. @@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool { p.acknowledgeSelfClosingTag() } case EndTagToken: + if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) { + p.oe = p.oe[:len(p.oe)-1] + return true + } for i := len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].Namespace == "" { - return p.im(p) - } if strings.EqualFold(p.oe[i].Data, p.tok.Data) { p.oe = p.oe[:i] + return true + } + if i > 0 && p.oe[i-1].Namespace == "" { break } } - return true + return p.im(p) default: // Ignore the token. } @@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() { } } -func (p *parser) parse() error { +func (p *parser) parse() (err error) { + defer func() { + if panicErr := recover(); panicErr != nil { + err = fmt.Errorf("%s", panicErr) + } + }() // Iterate until EOF. Any other error will cause an early return. - var err error for err != io.EOF { // CDATA sections are allowed only in foreign content. n := p.oe.top() @@ -2343,6 +2366,8 @@ func (p *parser) parse() error { // s. Conversely, explicit s in r's data can be silently dropped, // with no corresponding node in the resulting tree. // +// Parse will reject HTML that is nested deeper than 512 elements. +// // The input is assumed to be UTF-8 encoded. func Parse(r io.Reader) (*Node, error) { return ParseWithOptions(r) diff --git a/tools/vendor/golang.org/x/net/html/render.go b/tools/vendor/golang.org/x/net/html/render.go index e8c123345..0157d89e1 100644 --- a/tools/vendor/golang.org/x/net/html/render.go +++ b/tools/vendor/golang.org/x/net/html/render.go @@ -184,7 +184,7 @@ func render1(w writer, n *Node) error { return err } - // Add initial newline where there is danger of a newline beging ignored. + // Add initial newline where there is danger of a newline being ignored. if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { switch n.Data { case "pre", "listing", "textarea": diff --git a/tools/vendor/golang.org/x/net/http2/config.go b/tools/vendor/golang.org/x/net/http2/config.go index ca645d9a1..8a7a89d01 100644 --- a/tools/vendor/golang.org/x/net/http2/config.go +++ b/tools/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/tools/vendor/golang.org/x/net/http2/config_go124.go b/tools/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55f..000000000 --- a/tools/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/tools/vendor/golang.org/x/net/http2/config_go125.go b/tools/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 000000000..b4373fe33 --- /dev/null +++ b/tools/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/tools/vendor/golang.org/x/net/http2/config_go126.go b/tools/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 000000000..6b071c149 --- /dev/null +++ b/tools/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/tools/vendor/golang.org/x/net/http2/config_pre_go124.go b/tools/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c64..000000000 --- a/tools/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/tools/vendor/golang.org/x/net/http2/frame.go b/tools/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f..93bcaab03 100644 --- a/tools/vendor/golang.org/x/net/http2/frame.go +++ b/tools/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -347,7 +347,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } return nil, ErrFrameTooLarge } @@ -1152,6 +1152,15 @@ type PriorityFrame struct { PriorityParam } +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + // PriorityParam are the stream prioritzation parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the @@ -1167,6 +1176,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/tools/vendor/golang.org/x/net/http2/gotrack.go b/tools/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8c..9921ca096 100644 --- a/tools/vendor/golang.org/x/net/http2/gotrack.go +++ b/tools/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/tools/vendor/golang.org/x/net/http2/http2.go b/tools/vendor/golang.org/x/net/http2/http2.go index 6c18ea230..105fe12fe 100644 --- a/tools/vendor/golang.org/x/net/http2/http2.go +++ b/tools/vendor/golang.org/x/net/http2/http2.go @@ -11,13 +11,10 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -37,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/tools/vendor/golang.org/x/net/http2/server.go b/tools/vendor/golang.org/x/net/http2/server.go index 51fca38f6..bdc5520eb 100644 --- a/tools/vendor/golang.org/x/net/http2/server.go +++ b/tools/vendor/golang.org/x/net/http2/server.go @@ -176,44 +176,15 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +636,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +685,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +846,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +878,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } diff --git a/tools/vendor/golang.org/x/net/http2/timer.go b/tools/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b81..000000000 --- a/tools/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/tools/vendor/golang.org/x/net/http2/transport.go b/tools/vendor/golang.org/x/net/http2/transport.go index f26356b9c..be759b606 100644 --- a/tools/vendor/golang.org/x/net/http2/transport.go +++ b/tools/vendor/golang.org/x/net/http2/transport.go @@ -193,50 +193,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +322,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -399,6 +355,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -534,14 +491,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +506,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +605,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +654,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -829,7 +785,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -838,14 +795,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +811,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +859,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +870,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1067,7 +1020,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1120,7 +1073,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1139,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1209,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1427,7 +1378,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1558,9 +1508,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1753,7 +1703,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2092,10 +2042,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2121,7 +2071,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2188,9 +2137,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2250,9 +2199,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2998,7 +2947,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3228,7 +3176,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/tools/vendor/golang.org/x/net/http2/writesched.go b/tools/vendor/golang.org/x/net/http2/writesched.go index cc893adc2..4d3890f99 100644 --- a/tools/vendor/golang.org/x/net/http2/writesched.go +++ b/tools/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. diff --git a/tools/vendor/golang.org/x/net/http2/writesched_priority.go b/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go similarity index 78% rename from tools/vendor/golang.org/x/net/http2/writesched_priority.go rename to tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index f6783339d..6d24d6a1b 100644 --- a/tools/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -11,7 +11,7 @@ import ( ) // RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { @@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler } } - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler return ws } -type priorityNodeState int +type priorityNodeStateRFC7540 int const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *priorityNode) setParent(parent *priorityNode) { +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) { } } -func (n *priorityNode) addBytes(b int64) { +func (n *priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) + openParent = openParent || (n.state == priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(sortPriorityNodeSiblings(*tmp)) + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -207,11 +207,11 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f return false } -type sortPriorityNodeSiblings []*priorityNode +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) @@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type priorityWriteScheduler struct { +type priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root priorityNode + root priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode + nodes map[uint32]*priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -239,7 +239,7 @@ type priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode + closedNodes, idleNodes []*priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -248,19 +248,19 @@ type priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode + tmp []*priorityNodeRFC7540 // pool of empty queues for reuse. queuePool writeQueuePool } -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { + if curr.state != priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = priorityNodeOpen + curr.state = priorityNodeOpenRFC7540 return } @@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream if parent == nil { parent = &ws.root } - n := &priorityNode{ + n := &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -285,19 +285,19 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream } } -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != priorityNodeOpen { + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = priorityNodeClosed + n.state = priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q @@ -310,7 +310,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -324,11 +324,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit return } ws.maxID = streamID - n = &priorityNode{ + n = &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -340,7 +340,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = priorityDefaultWeight + n.weight = priorityDefaultWeightRFC7540 return } @@ -381,8 +381,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit n.weight = priority.Weight } -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -401,8 +401,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { n.q.push(wr) } -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -428,7 +428,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { return wr, ok } -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { if maxSize == 0 { return } @@ -442,7 +442,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max *list = append(*list, n) } -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } diff --git a/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go new file mode 100644 index 000000000..9b5b8808e --- /dev/null +++ b/tools/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9128() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe86322..737cff9ec 100644 --- a/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/tools/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/tools/vendor/golang.org/x/net/internal/httpcommon/request.go b/tools/vendor/golang.org/x/net/internal/httpcommon/request.go index 4b7055317..1e10f89eb 100644 --- a/tools/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/tools/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -51,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -399,7 +399,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/tools/vendor/golang.org/x/net/internal/socks/socks.go b/tools/vendor/golang.org/x/net/internal/socks/socks.go index 84fcc32b6..8eedb84ce 100644 --- a/tools/vendor/golang.org/x/net/internal/socks/socks.go +++ b/tools/vendor/golang.org/x/net/internal/socks/socks.go @@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, b = append(b, up.Username...) b = append(b, byte(len(up.Password))) b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if + // TODO(mikio): handle IO deadlines and cancellation if // necessary if _, err := rw.Write(b); err != nil { return err diff --git a/tools/vendor/golang.org/x/net/trace/events.go b/tools/vendor/golang.org/x/net/trace/events.go index c646a6952..3aaffdd1f 100644 --- a/tools/vendor/golang.org/x/net/trace/events.go +++ b/tools/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/tools/vendor/golang.org/x/oauth2/internal/doc.go b/tools/vendor/golang.org/x/oauth2/internal/doc.go index 03265e888..8c7c475f2 100644 --- a/tools/vendor/golang.org/x/oauth2/internal/doc.go +++ b/tools/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/tools/vendor/golang.org/x/oauth2/internal/oauth2.go b/tools/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989beaf..71ea6ad1f 100644 --- a/tools/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/tools/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/tools/vendor/golang.org/x/oauth2/internal/token.go b/tools/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef0..8389f2462 100644 --- a/tools/vendor/golang.org/x/oauth2/internal/token.go +++ b/tools/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/tools/vendor/golang.org/x/oauth2/internal/transport.go b/tools/vendor/golang.org/x/oauth2/internal/transport.go index b9db01ddf..afc0aeb27 100644 --- a/tools/vendor/golang.org/x/oauth2/internal/transport.go +++ b/tools/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/tools/vendor/golang.org/x/oauth2/oauth2.go b/tools/vendor/golang.org/x/oauth2/oauth2.go index eacdd7fd9..de34feb84 100644 --- a/tools/vendor/golang.org/x/oauth2/oauth2.go +++ b/tools/vendor/golang.org/x/oauth2/oauth2.go @@ -22,9 +22,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +46,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -135,7 +135,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -305,8 +305,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -368,7 +367,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -376,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -397,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/tools/vendor/golang.org/x/oauth2/pkce.go b/tools/vendor/golang.org/x/oauth2/pkce.go index 6a95da975..cea8374d5 100644 --- a/tools/vendor/golang.org/x/oauth2/pkce.go +++ b/tools/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/tools/vendor/golang.org/x/oauth2/token.go b/tools/vendor/golang.org/x/oauth2/token.go index 8c31136c4..239ec3296 100644 --- a/tools/vendor/golang.org/x/oauth2/token.go +++ b/tools/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,6 +163,7 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } diff --git a/tools/vendor/golang.org/x/oauth2/transport.go b/tools/vendor/golang.org/x/oauth2/transport.go index 90657915f..8bbebbac9 100644 --- a/tools/vendor/golang.org/x/oauth2/transport.go +++ b/tools/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/tools/vendor/golang.org/x/sync/errgroup/errgroup.go b/tools/vendor/golang.org/x/sync/errgroup/errgroup.go index f8c3c0926..1d8cffae8 100644 --- a/tools/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/tools/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -61,12 +61,14 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// // The first call to Go must happen before a Wait. // It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. +// goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group's context, if the -// group was created by calling WithContext. The error will be returned by Wait. +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} @@ -76,6 +78,18 @@ func (g *Group) Go(f func() error) { go func() { defer g.done() + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + if err := f(); err != nil { g.errOnce.Do(func() { g.err = err diff --git a/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..269e173ca --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 000000000..ec2acfe54 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/tools/vendor/golang.org/x/sys/cpu/byteorder.go b/tools/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..271055be0 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu.go b/tools/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..63541994e --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,338 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + HasZvbb bool // Vector Basic Bit-manipulation + HasZvbc bool // Vector Carryless Multiplication + HasZvkb bool // Vector Cryptography Bit-manipulation + HasZvkt bool // Vector Data-Independent Execution Latency + HasZvkg bool // Vector GCM/GMAC + HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512) + HasZvknc bool // NIST Algorithm Suite with carryless multiply + HasZvkng bool // NIST Algorithm Suite with GCM + HasZvks bool // ShangMi Algorithm Suite + HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication + HasZvksg bool // ShangMi Algorithm Suite with GCM + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go b/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 000000000..9bf0c32eb --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go b/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..301b752e9 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..af2aa99f9 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..22cc99844 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 000000000..b838cb9e9 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..6ac6e1efb --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..c8ae6ddc1 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..32a44514e --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s new file mode 100644 index 000000000..ce208ce6d --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB), NOSPLIT, $0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..7f1946780 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..9526d2ce3 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..3f73a05dc --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..170d21ddf --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..743eb5435 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..f1caf0f78 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + + hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 000000000..4f3411432 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..4686c1d54 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..a428dec9c --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 + +package cpu + +func doinit() {} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..197188e67 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 000000000..ad741536f --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,160 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_EXT_ZVBB = 0x20000 + riscv_HWPROBE_EXT_ZVBC = 0x40000 + riscv_HWPROBE_EXT_ZVKB = 0x80000 + riscv_HWPROBE_EXT_ZVKG = 0x100000 + riscv_HWPROBE_EXT_ZVKNED = 0x200000 + riscv_HWPROBE_EXT_ZVKNHB = 0x800000 + riscv_HWPROBE_EXT_ZVKSED = 0x1000000 + riscv_HWPROBE_EXT_ZVKSH = 0x2000000 + riscv_HWPROBE_EXT_ZVKT = 0x4000000 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB) + RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC) + RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB) + RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG) + RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT) + // Cryptography shorthand extensions + RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) && + isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc + RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg + RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) && + isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc + RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..45ecb29ae --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + +func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 000000000..71cbaf1ce --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..fedb00cc4 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..ffb4ec7eb --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000..85b64d5cc --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000..054ba05d6 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..e9ecf2a45 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..5341e7f88 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..5f8f2419a --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000..89608fba2 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000..5ab87808f --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 000000000..a0fd7e2f7 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..c14f12b14 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..0f617aef5 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + // RISC-V Cryptography Extensions + {Name: "zvbb", Feature: &RISCV64.HasZvbb}, + {Name: "zvbc", Feature: &RISCV64.HasZvbc}, + {Name: "zvkb", Feature: &RISCV64.HasZvkb}, + {Name: "zvkg", Feature: &RISCV64.HasZvkg}, + {Name: "zvkt", Feature: &RISCV64.HasZvkt}, + {Name: "zvkn", Feature: &RISCV64.HasZvkn}, + {Name: "zvknc", Feature: &RISCV64.HasZvknc}, + {Name: "zvkng", Feature: &RISCV64.HasZvkng}, + {Name: "zvks", Feature: &RISCV64.HasZvks}, + {Name: "zvksc", Feature: &RISCV64.HasZvksc}, + {Name: "zvksg", Feature: &RISCV64.HasZvksg}, + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..1fb4b7013 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..384787ea3 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go b/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..1e642f330 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/tools/vendor/golang.org/x/sys/cpu/parse.go b/tools/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000..56a7e1a17 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := range len(rel) { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := range len(rel) { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000..4cd64c704 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 000000000..5f92ac9a2 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 000000000..4c9788ea8 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..1b9ccb091 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..e8b6cdbe9 --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 000000000..4d0888b0c --- /dev/null +++ b/tools/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de74..000000000 --- a/tools/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb9458218..7a76489db 100644 --- a/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/tools/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/tools/vendor/golang.org/x/sys/unix/affinity_linux.go b/tools/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd..3ea470387 100644 --- a/tools/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/tools/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/tools/vendor/golang.org/x/sys/unix/fdset.go b/tools/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd..62ed12645 100644 --- a/tools/vendor/golang.org/x/sys/unix/fdset.go +++ b/tools/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go b/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4..309f5a2b0 100644 --- a/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/tools/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/tools/vendor/golang.org/x/sys/unix/mkall.sh b/tools/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374..d0ed61191 100644 --- a/tools/vendor/golang.org/x/sys/unix/mkall.sh +++ b/tools/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/tools/vendor/golang.org/x/sys/unix/mkerrors.sh b/tools/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c3..d1c8b2640 100644 --- a/tools/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/tools/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go b/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3..7838ca5db 100644 --- a/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/tools/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/tools/vendor/golang.org/x/sys/unix/syscall_linux.go b/tools/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a6570..9439af961 100644 --- a/tools/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/tools/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } diff --git a/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099a..34a467697 100644 --- a/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/tools/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go b/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc395547..18a3d9bda 100644 --- a/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/tools/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8..b6db27d93 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,6 +319,7 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f AUDIT_IPE_ACCESS = 0x58c @@ -327,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -491,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -527,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -554,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -843,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -936,11 +942,10 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -1203,13 +1208,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1224,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1240,6 +1253,7 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 @@ -1247,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1266,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1574,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1625,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1687,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1809,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2485,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2644,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2724,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2787,7 +2813,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2864,10 +2890,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2917,11 +2945,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2970,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2987,11 +3018,12 @@ const ( RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x7f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3271,6 +3303,7 @@ const ( STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3322,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3392,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3503,6 +3534,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3515,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3559,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3673,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c..1c37f9fbc 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -372,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda53..6f54d34ae 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -373,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab8..783ec5c12 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -378,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d..ca83d3ba1 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa..607e611c0 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -365,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37..b9cb5bd3c 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96..65b078a63 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902..5298a3033 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e2..7bc557c87 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -371,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c22427261..152399bb0 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -426,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee1..1a1ce2409 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941..4231a1fb5 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -430,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc4..21c0e9526 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -362,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb8..f00d1cd7c 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -434,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e5751..bc8d539e6 100644 --- a/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/tools/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -473,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c..b4609c20c 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff30..aca56ee49 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695..2ea1ef58c 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e502974..d22c8af31 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51..5ee264ae9 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a1..f9f03ebf5 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336..87c2118e8 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b996227..391ad102f 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e..565615775 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc2..0482b52e3 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb..71806f08f 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b44636502..e35a71058 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c18..2aea47670 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 840539169..6c9bb4e56 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d..680bc9915 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9..620f27105 100644 --- a/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/tools/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe647..944e75a11 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -114,8 +114,10 @@ type Statx_t struct { Atomic_write_unit_min uint32 Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 _ [1]uint32 - _ [9]uint64 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -629,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -686,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -737,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -2226,8 +2241,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2314,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2594,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3041,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3075,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3802,7 +3854,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2d + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3903,17 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3949,7 +4020,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4015,7 +4091,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x6 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4101,6 +4179,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4613,6 +4704,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4623,6 +4715,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4682,6 +4775,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4717,6 +4811,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4747,9 +4842,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4774,9 +4870,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4809,12 +4908,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4943,7 +5044,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4978,6 +5081,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -5001,6 +5106,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -5032,6 +5141,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -5117,7 +5229,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5161,6 +5274,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5180,6 +5294,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5247,6 +5362,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5262,6 +5378,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5281,9 +5398,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5295,8 +5415,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5343,7 +5465,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5351,12 +5476,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5364,8 +5491,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5430,6 +5560,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5458,9 +5589,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5703,11 +5835,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5753,6 +5890,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5770,14 +5909,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5788,7 +5932,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5849,6 +5996,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -6007,6 +6155,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -6038,6 +6193,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43..485f2d3a1 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e186..ecbd1ad8b 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b..02f0463a4 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1..6f4d400d2 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c..cd532cfa5 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f..413362085 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d4535..eaa37eb71 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea186..98ae6a1e4 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c4..cae196159 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 835972875..6ce3b4e02 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c6..c7429c6a1 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62..4bf4baf4c 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ad05b51a6..e9709d70a 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce9003..fb44268ca 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739..9c38265c7 100644 --- a/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/tools/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index fc1835d8a..bc1ce4360 100644 --- a/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall } func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class * } func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { } func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { } func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3 } func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint } func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype } func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) diff --git a/tools/vendor/golang.org/x/sys/windows/security_windows.go b/tools/vendor/golang.org/x/sys/windows/security_windows.go index b6e1ab76f..a8b0364c7 100644 --- a/tools/vendor/golang.org/x/sys/windows/security_windows.go +++ b/tools/vendor/golang.org/x/sys/windows/security_windows.go @@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE return nil, err } if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + absoluteSD = new(SECURITY_DESCRIPTOR) + if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) { + panic("sizeof(SECURITY_DESCRIPTOR) too small") + } } var ( dacl *ACL @@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE group *SID ) if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize)))) } if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize)))) } if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize)))) } if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize)))) } + // We call into Windows via makeAbsoluteSD, which sets up + // pointers within absoluteSD that point to other chunks of memory + // we pass into makeAbsoluteSD, and that happens outside the view of the GC. + // We therefore take some care here to then verify the pointers are as we expect + // and set them explicitly in view of the GC. See https://go.dev/issue/73199. + // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575. err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + if err != nil { + // Don't return absoluteSD, which might be partially initialized. + return nil, err + } + // Before using any fields, verify absoluteSD is in the format we expect according to Windows. + // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors + absControl, _, err := absoluteSD.Control() + if err != nil { + panic("absoluteSD: " + err.Error()) + } + if absControl&SE_SELF_RELATIVE != 0 { + panic("absoluteSD not in absolute format") + } + if absoluteSD.dacl != dacl { + panic("dacl pointer mismatch") + } + if absoluteSD.sacl != sacl { + panic("sacl pointer mismatch") + } + if absoluteSD.owner != owner { + panic("owner pointer mismatch") + } + if absoluteSD.group != group { + panic("group pointer mismatch") + } + absoluteSD.dacl = dacl + absoluteSD.sacl = sacl + absoluteSD.owner = owner + absoluteSD.group = group + return } diff --git a/tools/vendor/golang.org/x/sys/windows/syscall_windows.go b/tools/vendor/golang.org/x/sys/windows/syscall_windows.go index 4a3254386..bd5133730 100644 --- a/tools/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/tools/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -870,6 +872,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo //sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -1698,8 +1701,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - slice := unsafe.Slice(s.Buffer, s.MaximumLength) - return slice[:s.Length] + // Note: this rounds the length down, if it happens + // to (incorrectly) be odd. Probably safer than rounding up. + return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2] } func (s *NTUnicodeString) String() string { diff --git a/tools/vendor/golang.org/x/sys/windows/types_windows.go b/tools/vendor/golang.org/x/sys/windows/types_windows.go index ad67df2fd..358be3c7f 100644 --- a/tools/vendor/golang.org/x/sys/windows/types_windows.go +++ b/tools/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -1976,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2700,6 +2722,8 @@ type CommTimeouts struct { // NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. type NTUnicodeString struct { + // Note: Length and MaximumLength are in *bytes*, not uint16s. + // They should always be even. Length uint16 MaximumLength uint16 Buffer *uint16 @@ -3628,3 +3652,213 @@ const ( KLF_NOTELLSHELL = 0x00000080 KLF_SETFORPROCESS = 0x00000100 ) + +// Virtual Key codes +// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_IME_ON = 0x16 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_IME_OFF = 0x1A + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Mouse button constants. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001 + RIGHTMOST_BUTTON_PRESSED = 0x0002 + FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004 + FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008 + FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010 +) + +// Control key state constaints. +// https://docs.microsoft.com/en-us/windows/console/key-event-record-str +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 + LEFT_ALT_PRESSED = 0x0002 + LEFT_CTRL_PRESSED = 0x0008 + NUMLOCK_ON = 0x0020 + RIGHT_ALT_PRESSED = 0x0001 + RIGHT_CTRL_PRESSED = 0x0004 + SCROLLLOCK_ON = 0x0040 + SHIFT_PRESSED = 0x0010 +) + +// Mouse event record event flags. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + MOUSE_MOVED = 0x0001 + DOUBLE_CLICK = 0x0002 + MOUSE_WHEELED = 0x0004 + MOUSE_HWHEELED = 0x0008 +) + +// Input Record Event Types +// https://learn.microsoft.com/en-us/windows/console/input-record-str +const ( + FOCUS_EVENT = 0x0010 + KEY_EVENT = 0x0001 + MENU_EVENT = 0x0008 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 +) diff --git a/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 01c0716c2..426151a01 100644 --- a/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/tools/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -238,6 +238,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +285,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -511,6 +513,7 @@ var ( procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -545,25 +548,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -573,7 +576,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -585,7 +588,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -593,7 +596,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -601,7 +604,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -609,7 +612,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -617,7 +620,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -625,7 +628,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -633,7 +636,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -641,7 +644,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -649,7 +652,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -657,7 +660,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -674,7 +677,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -682,7 +685,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -690,7 +693,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -702,7 +705,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -710,7 +713,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -719,7 +722,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -727,7 +730,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -735,7 +738,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -743,7 +746,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -751,7 +754,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -759,7 +762,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -767,7 +770,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -775,7 +778,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -783,7 +786,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -791,13 +794,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -805,7 +808,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -813,7 +816,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -828,7 +831,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -836,7 +839,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -852,7 +855,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -866,7 +869,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -875,7 +878,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -885,7 +888,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -894,7 +897,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -910,7 +913,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -920,7 +923,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -928,25 +931,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -954,7 +957,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -962,7 +965,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -978,7 +981,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -986,7 +989,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -995,25 +998,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1021,7 +1024,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1029,7 +1032,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1037,7 +1040,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1045,7 +1048,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1053,7 +1056,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1061,7 +1064,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1069,7 +1072,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1078,7 +1081,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1091,7 +1094,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1099,7 +1102,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1107,7 +1110,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1119,7 +1122,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1127,7 +1130,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1135,7 +1138,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1143,7 +1146,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1151,7 +1154,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1159,7 +1162,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1175,7 +1178,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1183,7 +1186,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1191,7 +1194,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1199,7 +1202,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1207,7 +1210,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1216,7 +1219,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1225,7 +1228,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1233,7 +1236,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1241,7 +1244,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1249,7 +1252,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1266,7 +1269,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1274,7 +1277,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1290,7 +1293,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1302,7 +1305,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1314,7 +1317,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1322,7 +1325,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1335,7 +1338,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1343,7 +1346,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1351,7 +1354,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1359,7 +1362,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1367,7 +1370,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1375,7 +1378,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1383,7 +1386,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1391,7 +1394,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1399,7 +1402,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1407,7 +1410,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1416,7 +1419,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1424,13 +1427,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1439,7 +1442,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1448,7 +1451,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1457,18 +1460,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1476,7 +1479,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1484,13 +1487,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1499,7 +1502,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1508,7 +1511,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1520,7 +1523,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1529,7 +1532,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1537,7 +1540,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1545,7 +1548,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1553,7 +1556,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1561,7 +1564,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1570,7 +1573,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1585,7 +1588,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1593,12 +1596,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1606,7 +1609,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1614,7 +1617,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1622,7 +1625,7 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { } func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1630,7 +1633,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1638,7 +1641,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1646,7 +1649,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1654,7 +1657,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1662,7 +1665,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1674,7 +1677,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1686,7 +1689,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1694,7 +1697,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1703,7 +1706,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1711,7 +1714,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1719,7 +1722,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1727,7 +1730,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1735,7 +1738,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1743,7 +1746,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1751,12 +1754,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1764,7 +1767,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1772,7 +1775,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1781,7 +1784,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1790,7 +1793,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1799,7 +1802,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1808,7 +1811,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1816,7 +1819,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1825,7 +1828,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1834,7 +1837,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1847,7 +1850,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1856,7 +1859,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1865,7 +1868,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1877,7 +1880,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1885,7 +1888,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1893,7 +1896,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1901,7 +1904,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1910,7 +1913,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1918,7 +1921,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1926,12 +1929,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1939,7 +1942,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1947,7 +1950,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1959,7 +1962,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1967,7 +1970,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1975,12 +1978,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1989,7 +1992,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1997,7 +2000,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2018,7 +2021,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2027,7 +2030,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2036,7 +2039,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2045,7 +2048,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2054,7 +2057,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2062,7 +2065,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2070,7 +2073,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2078,7 +2081,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2086,7 +2089,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2095,7 +2098,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2103,7 +2106,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2111,7 +2122,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2119,7 +2130,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2131,7 +2142,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2140,7 +2151,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2148,7 +2159,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2156,7 +2167,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2164,19 +2175,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2184,7 +2195,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2192,7 +2203,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2200,13 +2211,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2214,7 +2225,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2222,7 +2233,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2231,7 +2242,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2239,7 +2250,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2248,7 +2259,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2256,7 +2267,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2265,19 +2276,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2285,13 +2296,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2300,7 +2311,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2309,7 +2320,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2317,7 +2328,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2325,7 +2336,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2334,7 +2345,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2342,7 +2353,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2350,7 +2361,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2358,7 +2369,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2367,7 +2378,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2376,7 +2387,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2385,13 +2396,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2399,7 +2410,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2408,7 +2419,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2417,7 +2428,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2426,13 +2437,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2441,7 +2452,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2449,7 +2460,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2457,7 +2468,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2465,7 +2476,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2473,7 +2484,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2485,7 +2504,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2493,7 +2512,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2511,7 +2530,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2520,7 +2539,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2529,7 +2548,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2537,7 +2556,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2545,7 +2564,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2553,12 +2572,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2566,7 +2585,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2575,12 +2594,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2589,7 +2608,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2598,7 +2617,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2606,17 +2625,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2625,7 +2644,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2634,7 +2653,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2642,13 +2661,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2657,7 +2676,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2665,7 +2684,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2674,7 +2693,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2682,7 +2701,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2690,7 +2709,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2698,7 +2717,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2706,7 +2725,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2714,7 +2733,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2723,7 +2742,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2735,7 +2754,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2748,7 +2767,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2765,7 +2784,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2783,7 +2802,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2792,7 +2811,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2801,7 +2820,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2810,7 +2829,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2819,7 +2838,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2827,7 +2846,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2836,7 +2855,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2845,7 +2864,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2853,7 +2872,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2861,7 +2880,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2869,7 +2888,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2877,7 +2896,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2890,7 +2909,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2903,7 +2922,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2916,7 +2935,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2929,7 +2948,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2938,7 +2957,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2946,7 +2965,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2954,7 +2973,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2962,7 +2981,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2970,7 +2989,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2978,7 +2997,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2986,7 +3005,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2995,7 +3014,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3003,7 +3022,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3011,7 +3030,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3023,7 +3042,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3035,7 +3054,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3043,7 +3062,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3051,7 +3070,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3059,7 +3078,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3067,7 +3086,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3075,7 +3094,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3083,7 +3102,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3091,7 +3110,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3100,7 +3119,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3108,7 +3127,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3116,7 +3135,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3124,7 +3143,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3132,7 +3151,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3140,7 +3159,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3148,7 +3167,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3156,7 +3175,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3164,7 +3183,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3172,7 +3191,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3189,7 +3208,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3197,7 +3216,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3205,7 +3224,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3213,13 +3232,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3227,7 +3246,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3235,7 +3254,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3243,7 +3262,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3251,7 +3270,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3260,7 +3279,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3268,7 +3287,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3276,7 +3295,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3284,7 +3303,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3293,7 +3312,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3301,7 +3320,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3313,7 +3332,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3321,7 +3340,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3329,7 +3348,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3337,7 +3356,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3345,7 +3364,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3353,7 +3372,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3361,7 +3380,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3369,7 +3388,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3382,13 +3401,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3396,7 +3415,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3404,7 +3423,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3412,7 +3431,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3420,7 +3439,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3428,7 +3447,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3436,7 +3455,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3444,7 +3463,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3453,7 +3472,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3461,7 +3480,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3469,7 +3488,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3477,7 +3496,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3485,7 +3504,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3493,7 +3512,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3501,7 +3520,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3509,13 +3528,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3527,7 +3546,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3536,7 +3555,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3545,7 +3564,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3557,7 +3576,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3565,7 +3584,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3573,7 +3592,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3581,12 +3600,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3594,7 +3613,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3602,7 +3621,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3610,7 +3629,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3618,7 +3637,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3626,7 +3645,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3634,7 +3653,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3642,7 +3661,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3650,7 +3669,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3658,7 +3677,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3666,7 +3685,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3674,7 +3693,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3682,13 +3701,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3696,13 +3715,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3710,7 +3729,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3718,18 +3737,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3737,23 +3756,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3761,7 +3780,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3769,7 +3788,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3777,7 +3796,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3785,23 +3804,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3809,7 +3828,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3817,7 +3836,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3825,7 +3844,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3833,7 +3852,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3841,7 +3860,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3849,7 +3868,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3861,7 +3880,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3873,12 +3892,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3886,7 +3905,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3894,7 +3913,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3902,7 +3921,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3910,7 +3929,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3918,7 +3937,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3926,7 +3945,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3934,7 +3953,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3943,7 +3962,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3951,7 +3970,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3959,7 +3978,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3967,7 +3986,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3975,7 +3994,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3983,7 +4002,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3992,7 +4011,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4000,7 +4019,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4008,7 +4027,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4016,7 +4035,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4024,7 +4043,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4032,7 +4051,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4040,7 +4059,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4048,7 +4067,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4056,7 +4075,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4064,7 +4083,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4073,7 +4092,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4081,7 +4100,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4089,7 +4108,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4097,7 +4116,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4105,7 +4124,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4113,7 +4132,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4121,7 +4140,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4130,7 +4149,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4138,7 +4157,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4146,12 +4165,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4159,7 +4178,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4167,7 +4186,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4176,19 +4195,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4196,19 +4215,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4217,25 +4236,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4244,7 +4263,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4253,13 +4272,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4271,7 +4290,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4279,7 +4298,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4287,7 +4306,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4304,7 +4323,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4322,7 +4341,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4339,7 +4358,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4347,7 +4366,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4355,7 +4374,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4363,7 +4382,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4371,12 +4390,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4384,15 +4403,23 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } return } +func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4405,7 +4432,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4413,7 +4440,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4421,7 +4448,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4429,7 +4456,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4437,7 +4464,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4445,7 +4472,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4453,7 +4480,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4461,7 +4488,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4469,7 +4496,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4477,7 +4504,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4486,7 +4513,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4494,7 +4521,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4502,7 +4529,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4510,7 +4537,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4527,7 +4554,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4536,7 +4563,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4553,7 +4580,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4576,7 +4603,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4585,7 +4612,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4593,7 +4620,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4601,7 +4628,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4609,7 +4636,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4619,7 +4646,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4632,7 +4659,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4640,7 +4667,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4648,7 +4675,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4656,7 +4683,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4665,7 +4692,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4673,12 +4700,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/tools/vendor/golang.org/x/term/term_windows.go b/tools/vendor/golang.org/x/term/term_windows.go index df6bf948e..0ddd81c02 100644 --- a/tools/vendor/golang.org/x/term/term_windows.go +++ b/tools/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/tools/vendor/golang.org/x/term/terminal.go b/tools/vendor/golang.org/x/term/terminal.go index 14f89470a..bddb2e2ae 100644 --- a/tools/vendor/golang.org/x/term/terminal.go +++ b/tools/vendor/golang.org/x/term/terminal.go @@ -6,6 +6,7 @@ package term import ( "bytes" + "fmt" "io" "runtime" "strconv" @@ -36,6 +37,26 @@ var vt100EscapeCodes = EscapeCodes{ Reset: []byte{keyEscape, '[', '0', 'm'}, } +// A History provides a (possibly bounded) queue of input lines read by [Terminal.ReadLine]. +type History interface { + // Add will be called by [Terminal.ReadLine] to add + // a new, most recent entry to the history. + // It is allowed to drop any entry, including + // the entry being added (e.g., if it's deemed an invalid entry), + // the least-recent entry (e.g., to keep the history bounded), + // or any other entry. + Add(entry string) + + // Len returns the number of entries in the history. + Len() int + + // At returns an entry from the history. + // Index 0 is the most-recently added entry and + // index Len()-1 is the least-recently added entry. + // If index is < 0 or >= Len(), it panics. + At(idx int) string +} + // Terminal contains the state for running a VT100 terminal that is capable of // reading lines of input. type Terminal struct { @@ -86,9 +107,14 @@ type Terminal struct { remainder []byte inBuf [256]byte - // history contains previously entered commands so that they can be - // accessed with the up and down keys. - history stRingBuffer + // History records and retrieves lines of input read by [ReadLine] which + // a user can retrieve and navigate using the up and down arrow keys. + // + // It is not safe to call ReadLine concurrently with any methods on History. + // + // [NewTerminal] sets this to a default implementation that records the + // last 100 lines of input. + History History // historyIndex stores the currently accessed history entry, where zero // means the immediately previous entry. historyIndex int @@ -111,6 +137,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal { termHeight: 24, echo: true, historyIndex: -1, + History: &stRingBuffer{}, } } @@ -119,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -450,10 +478,27 @@ func visualLength(runes []rune) int { return length } +// histroryAt unlocks the terminal and relocks it while calling History.At. +func (t *Terminal) historyAt(idx int) (string, bool) { + t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. + defer t.lock.Lock() // panic in At (or Len) protection. + if idx < 0 || idx >= t.History.Len() { + return "", false + } + return t.History.At(idx), true +} + +// historyAdd unlocks the terminal and relocks it while calling History.Add. +func (t *Terminal) historyAdd(entry string) { + t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. + defer t.lock.Lock() // panic in Add protection. + t.History.Add(entry) +} + // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -497,7 +542,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.pos = len(t.line) t.moveCursorToPos(t.pos) case keyUp: - entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + entry, ok := t.historyAt(t.historyIndex + 1) if !ok { return "", false } @@ -516,14 +561,14 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) t.historyIndex-- default: - entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + entry, ok := t.historyAt(t.historyIndex - 1) if ok { t.historyIndex-- runes := []rune(entry) t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -768,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { @@ -781,7 +830,7 @@ func (t *Terminal) readLine() (line string, err error) { if lineOk { if t.echo { t.historyIndex = -1 - t.history.Add(line) + t.historyAdd(line) } if lineIsPasted { err = ErrPasteIndicator @@ -938,19 +987,23 @@ func (s *stRingBuffer) Add(a string) { } } -// NthPreviousEntry returns the value passed to the nth previous call to Add. +func (s *stRingBuffer) Len() int { + return s.size +} + +// At returns the value passed to the nth previous call to Add. // If n is zero then the immediately prior value is returned, if one, then the // next most recent, and so on. If such an element doesn't exist then ok is // false. -func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { +func (s *stRingBuffer) At(n int) string { if n < 0 || n >= s.size { - return "", false + panic(fmt.Sprintf("term: history index [%d] out of range [0,%d)", n, s.size)) } index := s.head - n if index < 0 { index += s.max } - return s.entries[index], true + return s.entries[index] } // readPasswordLine reads from reader until it finds \n or io.EOF. diff --git a/tools/vendor/golang.org/x/text/unicode/bidi/core.go b/tools/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b..fb8273236 100644 --- a/tools/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/tools/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/tools/vendor/golang.org/x/time/rate/rate.go b/tools/vendor/golang.org/x/time/rate/rate.go index 93a798ab6..794b2e32b 100644 --- a/tools/vendor/golang.org/x/time/rate/rate.go +++ b/tools/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/tools/vendor/golang.org/x/time/rate/sometimes.go b/tools/vendor/golang.org/x/time/rate/sometimes.go index 6ba99ddb6..9b8393269 100644 --- a/tools/vendor/golang.org/x/time/rate/sometimes.go +++ b/tools/vendor/golang.org/x/time/rate/sometimes.go @@ -61,7 +61,9 @@ func (s *Sometimes) Do(f func()) { (s.Every > 0 && s.count%s.Every == 0) || (s.Interval > 0 && time.Since(s.last) >= s.Interval) { f() - s.last = time.Now() + if s.Interval > 0 { + s.last = time.Now() + } } s.count++ } diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6e34df461..0fb4e7eea 100644 --- a/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) @@ -207,6 +207,9 @@ func childrenOf(n ast.Node) []ast.Node { return false // no recursion }) + // TODO(adonovan): be more careful about missing (!Pos.Valid) + // tokens in trees produced from invalid input. + // Then add fake Nodes for bare tokens. switch n := n.(type) { case *ast.ArrayType: @@ -226,9 +229,12 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, tok(n.OpPos, len(n.Op.String()))) case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) + if n.Lbrace.IsValid() { + children = append(children, tok(n.Lbrace, len("{"))) + } + if n.Rbrace.IsValid() { + children = append(children, tok(n.Rbrace, len("}"))) + } case *ast.BranchStmt: children = append(children, @@ -304,9 +310,12 @@ func childrenOf(n ast.Node) []ast.Node { // TODO(adonovan): Field.{Doc,Comment,Tag}? case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), // or len("[") - tok(n.Closing, len(")"))) // or len("]") + if n.Opening.IsValid() { + children = append(children, tok(n.Opening, len("("))) + } + if n.Closing.IsValid() { + children = append(children, tok(n.Closing, len(")"))) + } case *ast.File: // TODO test: Doc diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go index a6b5ed0a8..5e5601aa4 100644 --- a/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "slices" "strconv" "strings" ) @@ -186,7 +187,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 5c8dbbb7a..4ad054930 100644 --- a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -67,6 +67,10 @@ var abort = new(int) // singleton, to signal termination of Apply // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. +// +// This type is not to be confused with [inspector.Cursor] from +// package [golang.org/x/tools/go/ast/inspector], which provides +// stateless navigation of immutable syntax trees. type Cursor struct { parent ast.Node name string diff --git a/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 65fe2628e..7b90bc923 100644 --- a/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -193,10 +193,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, return pkg, err default: - l := len(data) - if l > 10 { - l = 10 - } + l := min(len(data), 10) return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) } } diff --git a/tools/vendor/golang.org/x/tools/go/packages/doc.go b/tools/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10e..366aab6b2 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/doc.go +++ b/tools/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/tools/vendor/golang.org/x/tools/go/packages/external.go b/tools/vendor/golang.org/x/tools/go/packages/external.go index 91bd62e83..f37bc6510 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/external.go +++ b/tools/vendor/golang.org/x/tools/go/packages/external.go @@ -90,7 +90,7 @@ func findExternalDriver(cfg *Config) driver { const toolPrefix = "GOPACKAGESDRIVER=" tool := "" for _, env := range cfg.Env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { + if val, ok := strings.CutPrefix(env, toolPrefix); ok { tool = val } } diff --git a/tools/vendor/golang.org/x/tools/go/packages/golist.go b/tools/vendor/golang.org/x/tools/go/packages/golist.go index 0458b4f9c..89f89dd2d 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/golist.go +++ b/tools/vendor/golang.org/x/tools/go/packages/golist.go @@ -224,13 +224,22 @@ extractQueries: return response.dr, nil } +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. - pattern, err := filepath.Abs(fdir) + pattern, err := state.cfg.abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } @@ -703,9 +712,8 @@ func (state *golistState) getGoVersion() (int, error) { // getPkgPath finds the package path of a directory if it's relative to a root // directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") } roots, err := state.determineRootDirs() if err != nil { @@ -715,7 +723,7 @@ func (state *golistState) getPkgPath(dir string) (string, bool, error) { for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { + if !strings.HasPrefix(dir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -851,8 +859,6 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { cfg := state.cfg return gocommand.Invocation{ BuildFlags: cfg.BuildFlags, - ModFile: cfg.modFile, - ModFlag: cfg.modFlag, CleanEnv: cfg.Env != nil, Env: cfg.Env, Logf: cfg.Logf, diff --git a/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go index d823c474a..d9d5a45cd 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/tools/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -55,7 +55,7 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) + absDir, err := state.cfg.abs(mod.Dir) if err != nil { return nil, err } diff --git a/tools/vendor/golang.org/x/tools/go/packages/packages.go b/tools/vendor/golang.org/x/tools/go/packages/packages.go index 6665a04c1..060ab08ef 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/packages.go +++ b/tools/vendor/golang.org/x/tools/go/packages/packages.go @@ -229,14 +229,6 @@ type Config struct { // consistent package metadata about unsaved files. However, // drivers may vary in their level of support for overlays. Overlay map[string][]byte - - // -- Hidden configuration fields only for use in x/tools -- - - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string } // Load loads and returns the Go packages named by the given patterns. @@ -569,12 +561,6 @@ func init() { packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config any, value string) { - config.(*Config).modFile = value - } - packagesinternal.SetModFlag = func(config any, value string) { - config.(*Config).modFlag = value - } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) } diff --git a/tools/vendor/golang.org/x/tools/go/packages/visit.go b/tools/vendor/golang.org/x/tools/go/packages/visit.go index df14ffd94..af6a60d75 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/visit.go +++ b/tools/vendor/golang.org/x/tools/go/packages/visit.go @@ -5,9 +5,11 @@ package packages import ( + "cmp" "fmt" + "iter" "os" - "sort" + "slices" ) // Visit visits all the packages in the import graph whose roots are @@ -16,6 +18,20 @@ import ( // package's dependencies have been visited (postorder). // The boolean result of pre(pkg) determines whether // the imports of package pkg are visited. +// +// Example: +// +// pkgs, err := Load(...) +// if err != nil { ... } +// Visit(pkgs, nil, func(pkg *Package) { +// log.Println(pkg) +// }) +// +// In most cases, it is more convenient to use [Postorder]: +// +// for pkg := range Postorder(pkgs) { +// log.Println(pkg) +// } func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen := make(map[*Package]bool) var visit func(*Package) @@ -24,13 +40,8 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen[pkg] = true if pre == nil || pre(pkg) { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // Imports is a map, this makes visit stable - for _, path := range paths { - visit(pkg.Imports[path]) + for _, imp := range sorted(pkg.Imports) { // for determinism + visit(imp) } } @@ -50,7 +61,7 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { func PrintErrors(pkgs []*Package) int { var n int errModules := make(map[*Module]bool) - Visit(pkgs, nil, func(pkg *Package) { + for pkg := range Postorder(pkgs) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ @@ -63,6 +74,60 @@ func PrintErrors(pkgs []*Package) int { fmt.Fprintln(os.Stderr, mod.Error.Err) n++ } - }) + } return n } + +// Postorder returns an iterator over the the packages in +// the import graph whose roots are pkg. +// Packages are enumerated in dependencies-first order. +func Postorder(pkgs []*Package) iter.Seq[*Package] { + return func(yield func(*Package) bool) { + seen := make(map[*Package]bool) + var visit func(*Package) bool + visit = func(pkg *Package) bool { + if !seen[pkg] { + seen[pkg] = true + for _, imp := range sorted(pkg.Imports) { // for determinism + if !visit(imp) { + return false + } + } + if !yield(pkg) { + return false + } + } + return true + } + for _, pkg := range pkgs { + if !visit(pkg) { + break + } + } + } +} + +// -- copied from golang.org.x/tools/gopls/internal/util/moremaps -- + +// sorted returns an iterator over the entries of m in key order. +func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] { + // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted. + return func(yield func(K, V) bool) { + keys := keySlice(m) + slices.Sort(keys) + for _, k := range keys { + if !yield(k, m[k]) { + break + } + } + } +} + +// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)). +func keySlice[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} diff --git a/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 16ed3c178..6c0c74968 100644 --- a/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/tools/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -603,7 +603,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { type hasTypeParams interface { TypeParams() *types.TypeParamList } - // abstraction of *types.{Named,TypeParam} + // abstraction of *types.{Alias,Named,TypeParam} type hasObj interface { Obj() *types.TypeName } @@ -698,7 +698,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we // simply assume that aliases are not enabled. - // TODO(adonovan): replace with "if true {" when go1.24 is assured. + // + // Now that go1.24 is assured, we should be able to + // replace this with "if true {", but it causes tests + // to fail. TODO(adonovan): investigate. return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) } diff --git a/tools/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/tools/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 754380351..5f10f56cb 100644 --- a/tools/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/tools/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -7,45 +7,23 @@ package typeutil import ( "go/ast" "go/types" - - "golang.org/x/tools/internal/typeparams" + _ "unsafe" // for linkname ) // Callee returns the named target of a function call, if any: // a function, method, builtin, or variable. // // Functions and methods may potentially have type parameters. +// +// Note: for calls of instantiated functions and methods, Callee returns +// the corresponding generic function or method on the generic type. func Callee(info *types.Info, call *ast.CallExpr) types.Object { - fun := ast.Unparen(call.Fun) - - // Look through type instantiation if necessary. - isInstance := false - switch fun.(type) { - case *ast.IndexExpr, *ast.IndexListExpr: - // When extracting the callee from an *IndexExpr, we need to check that - // it is a *types.Func and not a *types.Var. - // Example: Don't match a slice m within the expression `m[0]()`. - isInstance = true - fun, _, _, _ = typeparams.UnpackIndexExpr(fun) - } - - var obj types.Object - switch fun := fun.(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } + obj := info.Uses[usedIdent(info, call.Fun)] + if obj == nil { + return nil } if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - // A Func is required to match instantiations. - if _, ok := obj.(*types.Func); isInstance && !ok { - return nil // Was not a Func. + return nil } return obj } @@ -56,13 +34,52 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object { // Note: for calls of instantiated functions and methods, StaticCallee returns // the corresponding generic function or method on the generic type. func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f + obj := info.Uses[usedIdent(info, call.Fun)] + fn, _ := obj.(*types.Func) + if fn == nil || interfaceMethod(fn) { + return nil + } + return fn +} + +// usedIdent is the implementation of [internal/typesinternal.UsedIdent]. +// It returns the identifier associated with e. +// See typesinternal.UsedIdent for a fuller description. +// This function should live in typesinternal, but cannot because it would +// create an import cycle. +// +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident { + if info.Types == nil || info.Uses == nil { + panic("one of info.Types or info.Uses is nil; both must be populated") + } + // Look through type instantiation if necessary. + switch d := ast.Unparen(e).(type) { + case *ast.IndexExpr: + if info.Types[d.Index].IsType() { + e = d.X + } + case *ast.IndexListExpr: + e = d.X + } + + switch e := ast.Unparen(e).(type) { + // info.Uses always has the object we want, even for selector expressions. + // We don't need info.Selections. + // See go/types/recording.go:recordSelection. + case *ast.Ident: + return e + case *ast.SelectorExpr: + return e.Sel } return nil } +// interfaceMethod reports whether its argument is a method of an interface. +// This function should live in typesinternal, but cannot because it would create an import cycle. +// +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() + recv := f.Signature().Recv() return recv != nil && types.IsInterface(recv.Type()) } diff --git a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go index b6d542c64..f035a0b6b 100644 --- a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -11,7 +11,6 @@ import ( "fmt" "go/types" "hash/maphash" - "unsafe" "golang.org/x/tools/internal/typeparams" ) @@ -380,22 +379,8 @@ var theSeed = maphash.MakeSeed() func (hasher) hashTypeName(tname *types.TypeName) uint32 { // Since types.Identical uses == to compare TypeNames, // the Hash function uses maphash.Comparable. - // TODO(adonovan): or will, when it becomes available in go1.24. - // In the meantime we use the pointer's numeric value. - // - // hash := maphash.Comparable(theSeed, tname) - // - // (Another approach would be to hash the name and package - // path, and whether or not it is a package-level typename. It - // is rare for a package to define multiple local types with - // the same name.) - ptr := uintptr(unsafe.Pointer(tname)) - if unsafe.Sizeof(ptr) == 8 { - hash := uint64(ptr) - return uint32(hash ^ (hash >> 32)) - } else { - return uint32(ptr) - } + hash := maphash.Comparable(theSeed, tname) + return uint32(hash ^ (hash >> 32)) } // shallowHash computes a hash of t without looking at any of its diff --git a/tools/vendor/golang.org/x/tools/internal/event/label/label.go b/tools/vendor/golang.org/x/tools/internal/event/label/label.go index 7c00ca2a6..92a391057 100644 --- a/tools/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/tools/vendor/golang.org/x/tools/internal/event/label/label.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "reflect" + "slices" "unsafe" ) @@ -154,10 +155,8 @@ func (f *filter) Valid(index int) bool { func (f *filter) Label(index int) Label { l := f.underlying.Label(index) - for _, f := range f.keys { - if l.Key() == f { - return Label{} - } + if slices.Contains(f.keys, l.Key()) { + return Label{} } return l } diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 253d6493c..780873e3a 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -236,6 +236,7 @@ import ( "io" "math/big" "reflect" + "slices" "sort" "strconv" "strings" @@ -271,10 +272,10 @@ import ( // file system, be sure to include a cryptographic digest of the executable in // the key to avoid version skew. // -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during export. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. +// If the provided reportf func is non-nil, it is used for reporting +// bugs (e.g. recovered panics) encountered during export, enabling us +// to obtain via telemetry the stack that would otherwise be lost by +// merely returning an error. func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of @@ -283,7 +284,7 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) // TODO(adonovan): use byte slices throughout, avoiding copying. const bundle, shallow = false, true var out bytes.Buffer - err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf) return out.Bytes(), err } @@ -323,20 +324,27 @@ const bundleVersion = 0 // so that calls to IImportData can override with a provided package path. func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { const bundle, shallow = false, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil) } // IExportBundle writes an indexed export bundle for pkgs to out. func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { const bundle, shallow = true, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil) } -func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) { if !debug { defer func() { if e := recover(); e != nil { + // Report the stack via telemetry (see #71067). + if reportf != nil { + reportf("panic in exporter") + } if ierr, ok := e.(internalError); ok { + // internalError usually means we exported a + // bad go/types data structure: a violation + // of an implicit precondition of Export. err = ierr return } @@ -458,7 +466,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) w.uint64(size) // Sort the set of needed offsets. Duplicates are harmless. - sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + slices.Sort(needed) lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) @@ -812,7 +820,7 @@ func (p *iexporter) doDecl(obj types.Object) { n := named.NumMethods() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { m := named.Method(i) w.pos(m.Pos()) w.string(m.Name()) @@ -1089,7 +1097,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.pkg(fieldPkg) w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { f := t.Field(i) if w.p.shallow { w.objectPath(f) @@ -1138,7 +1146,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { + for i := range nt { term := t.Term(i) w.bool(term.Tilde()) w.typ(term.Type(), pkg) @@ -1267,7 +1275,7 @@ func tparamName(exportName string) string { func (w *exportWriter) paramList(tup *types.Tuple) { n := tup.Len() w.uint64(uint64(n)) - for i := 0; i < n; i++ { + for i := range n { w.param(tup.At(i)) } } diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index bc6c9741e..82e6c9d2d 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -16,6 +16,7 @@ import ( "go/types" "io" "math/big" + "slices" "sort" "strings" @@ -314,7 +315,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte pkgs = pkgList[:1] // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) + list := slices.Clone(pkgList[1:]) sort.Sort(byPath(list)) pkgs[0].SetImports(list) } diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go deleted file mode 100644 index 7586bfaca..000000000 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 && !go1.24 - -package gcimporter - -import ( - "go/token" - "go/types" - "unsafe" -) - -// TODO(rfindley): delete this workaround once go1.24 is assured. - -func init() { - // Update markBlack so that it correctly sets the color - // of imported TypeNames. - // - // See the doc comment for markBlack for details. - - type color uint32 - const ( - white color = iota - black - grey - ) - type object struct { - _ *types.Scope - _ token.Pos - _ *types.Package - _ string - _ types.Type - _ uint32 - color_ color - _ token.Pos - } - type typeName struct { - object - } - - // If the size of types.TypeName changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) - var _ [-delta * delta]int - - markBlack = func(obj *types.TypeName) { - type uP = unsafe.Pointer - var ptr *typeName - *(*uP)(uP(&ptr)) = uP(obj) - ptr.color_ = black - } -} diff --git a/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 7ea901344..58721202d 100644 --- a/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/tools/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -141,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // Wait for all in-progress go commands to return before proceeding, // to avoid load concurrency errors. - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { select { case <-ctx.Done(): return ctx.Err(), ctx.Err() diff --git a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 984b79c2a..5252144d0 100644 --- a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -14,6 +14,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "strings" "sync" "time" @@ -195,10 +196,8 @@ func (w *walker) getIgnoredDirs(path string) []string { // shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(dir string) bool { - for _, ignoredDir := range w.ignoredDirs { - if dir == ignoredDir { - return true - } + if slices.Contains(w.ignoredDirs, dir) { + return true } if w.skip != nil { // Check with the user specified callback. diff --git a/tools/vendor/golang.org/x/tools/internal/imports/fix.go b/tools/vendor/golang.org/x/tools/internal/imports/fix.go index 737a9bfae..50b6ca51a 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,6 +27,8 @@ import ( "unicode" "unicode/utf8" + "maps" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -289,8 +291,8 @@ func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) erro return nil } -// if there is a trailing major version, remove it -func withoutVersion(nm string) string { +// WithoutVersion removes a trailing major version, if there is one. +func WithoutVersion(nm string) string { if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { if _, err := strconv.Atoi(v[1:]); err == nil { // this is, for instance, called with rand/v2 and returns rand @@ -312,7 +314,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.Name != "" { - return withoutVersion(known.Name) + return WithoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -585,7 +587,7 @@ func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, f srcDir := filepath.Dir(abs) if logf != nil { - logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) + logf("fixImports(filename=%q), srcDir=%q ...", filename, srcDir) } // First pass: looking only at f, and using the naive algorithm to @@ -968,9 +970,7 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv { resolver: nil, Env: map[string]string{}, } - for k, v := range e.Env { - copy.Env[k] = v - } + maps.Copy(copy.Env, e.Env) return copy } @@ -1003,9 +1003,7 @@ func (e *ProcessEnv) init() error { if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { return err } - for k, v := range goEnv { - e.Env[k] = v - } + maps.Copy(e.Env, goEnv) e.initialized = true return nil } diff --git a/tools/vendor/golang.org/x/tools/internal/imports/imports.go b/tools/vendor/golang.org/x/tools/internal/imports/imports.go index 2215a1288..b5f5218b5 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/imports.go @@ -93,7 +93,7 @@ func FixImports(ctx context.Context, filename string, src []byte, goroot string, // env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { // Don't use parse() -- we don't care about fragments or statement lists - // here, and we need to work with unparseable files. + // here, and we need to work with unparsable files. fileSet := token.NewFileSet() parserMode := parser.SkipObjectResolution if opt.Comments { diff --git a/tools/vendor/golang.org/x/tools/internal/imports/mod.go b/tools/vendor/golang.org/x/tools/internal/imports/mod.go index 8555e3f83..df94ec818 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,6 +13,7 @@ import ( "path" "path/filepath" "regexp" + "slices" "sort" "strconv" "strings" @@ -150,8 +151,8 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe Path: "", Dir: filepath.Join(filepath.Dir(goWork), "vendor"), } - r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) - r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByModPath = append(slices.Clone(mainModsVendor), r.dummyVendorMod) + r.modsByDir = append(slices.Clone(mainModsVendor), r.dummyVendorMod) } } else { // Vendor mode is off, so run go list -m ... to find everything. diff --git a/tools/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/tools/vendor/golang.org/x/tools/internal/imports/mod_cache.go index b1192696b..b96c9d4bf 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -128,7 +128,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener // are going to be. Setting an arbitrary limit makes it much easier. const maxInFlight = 10 sema := make(chan struct{}, maxInFlight) - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { sema <- struct{}{} } @@ -156,7 +156,7 @@ func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener d.mu.Lock() delete(d.listeners, cookie) d.mu.Unlock() - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { <-sema } } diff --git a/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go b/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go index da8194fd9..67c17bc43 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -11,6 +11,7 @@ import ( "go/ast" "go/token" "log" + "slices" "sort" "strconv" ) @@ -30,7 +31,7 @@ func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { if len(d.Specs) == 0 { // Empty import block, remove it. - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) } if !d.Lparen.IsValid() { @@ -91,7 +92,7 @@ func mergeImports(f *ast.File) { spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() first.Specs = append(first.Specs, spec) } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + f.Decls = slices.Delete(f.Decls, i, i+1) i-- } } diff --git a/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06c..ca745d4a1 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/tools/vendor/golang.org/x/tools/internal/modindex/directories.go b/tools/vendor/golang.org/x/tools/internal/modindex/directories.go index 1e1a02f23..9a963744b 100644 --- a/tools/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/tools/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// filterDirs groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
    = 4 {
     					sig := strings.Split(flds[3], " ")
    -					for i := 0; i < len(sig); i++ {
    +					for i := range sig {
     						// $ cannot otherwise occur. removing the spaces
     						// almost works, but for chan struct{}, e.g.
     						sig[i] = strings.Replace(sig[i], "$", " ", -1)
    @@ -136,7 +136,7 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
     
     func toFields(sig []string) []Field {
     	ans := make([]Field, len(sig)/2)
    -	for i := 0; i < len(ans); i++ {
    +	for i := range ans {
     		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
     	}
     	return ans
    diff --git a/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go b/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go
    index 355a53e71..5fa285d98 100644
    --- a/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go
    +++ b/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go
    @@ -2,17 +2,21 @@
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
     
    -// Package modindex contains code for building and searching an index to
    -// the Go module cache. The directory containing the index, returned by
    -// IndexDir(), contains a file index-name- that contains the name
    +// Package modindex contains code for building and searching an
    +// [Index] of the Go module cache.
    +package modindex
    +
    +// The directory containing the index, returned by
    +// [IndexDir], contains a file index-name- that contains the name
     // of the current index. We believe writing that short file is atomic.
    -// ReadIndex reads that file to get the file name of the index.
    +// [Read] reads that file to get the file name of the index.
     // WriteIndex writes an index with a unique name and then
     // writes that name into a new version of index-name-.
     // ( stands for the CurrentVersion of the index format.)
    -package modindex
     
     import (
    +	"maps"
    +	"os"
     	"path/filepath"
     	"slices"
     	"strings"
    @@ -21,144 +25,95 @@ import (
     	"golang.org/x/mod/semver"
     )
     
    -// Create always creates a new index for the go module cache that is in cachedir.
    -func Create(cachedir string) error {
    -	_, err := indexModCache(cachedir, true)
    -	return err
    -}
    -
    -// Update the index for the go module cache that is in cachedir,
    -// If there is no existing index it will build one.
    -// If there are changed directories since the last index, it will
    -// write a new one and return true. Otherwise it returns false.
    -func Update(cachedir string) (bool, error) {
    -	return indexModCache(cachedir, false)
    +// Update updates the index for the specified Go
    +// module cache directory, creating it as needed.
    +// On success it returns the current index.
    +func Update(gomodcache string) (*Index, error) {
    +	prev, err := Read(gomodcache)
    +	if err != nil {
    +		if !os.IsNotExist(err) {
    +			return nil, err
    +		}
    +		prev = nil
    +	}
    +	return update(gomodcache, prev)
     }
     
    -// indexModCache writes an index current as of when it is called.
    -// If clear is true the index is constructed from all of GOMODCACHE
    -// otherwise the index is constructed from the last previous index
    -// and the updates to the cache. It returns true if it wrote an index,
    -// false otherwise.
    -func indexModCache(cachedir string, clear bool) (bool, error) {
    -	cachedir, err := filepath.Abs(cachedir)
    +// update builds, writes, and returns the current index.
    +//
    +// If old is nil, the new index is built from all of GOMODCACHE;
    +// otherwise it is built from the old index plus cache updates
    +// since the previous index's time.
    +func update(gomodcache string, old *Index) (*Index, error) {
    +	gomodcache, err := filepath.Abs(gomodcache)
     	if err != nil {
    -		return false, err
    +		return nil, err
     	}
    -	cd := Abspath(cachedir)
    -	future := time.Now().Add(24 * time.Hour) // safely in the future
    -	ok, err := modindexTimed(future, cd, clear)
    +	new, changed, err := build(gomodcache, old)
     	if err != nil {
    -		return false, err
    +		return nil, err
     	}
    -	return ok, nil
    -}
    -
    -// modindexTimed writes an index current as of onlyBefore.
    -// If clear is true the index is constructed from all of GOMODCACHE
    -// otherwise the index is constructed from the last previous index
    -// and all the updates to the cache before onlyBefore.
    -// It returns true if it wrote a new index, false if it wrote nothing.
    -func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
    -	var curIndex *Index
    -	if !clear {
    -		var err error
    -		curIndex, err = ReadIndex(string(cachedir))
    -		if clear && err != nil {
    -			return false, err
    +	if old == nil || changed {
    +		if err := write(gomodcache, new); err != nil {
    +			return nil, err
     		}
    -		// TODO(pjw): check that most of those directories still exist
    -	}
    -	cfg := &work{
    -		onlyBefore: onlyBefore,
    -		oldIndex:   curIndex,
    -		cacheDir:   cachedir,
    -	}
    -	if curIndex != nil {
    -		cfg.onlyAfter = curIndex.Changed
    -	}
    -	if err := cfg.buildIndex(); err != nil {
    -		return false, err
     	}
    -	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
    -		// no changes from existing curIndex, don't write a new index
    -		return false, nil
    -	}
    -	if err := cfg.writeIndex(); err != nil {
    -		return false, err
    -	}
    -	return true, nil
    -}
    -
    -type work struct {
    -	onlyBefore time.Time // do not use directories later than this
    -	onlyAfter  time.Time // only interested in directories after this
    -	// directories from before onlyAfter come from oldIndex
    -	oldIndex *Index
    -	newIndex *Index
    -	cacheDir Abspath
    +	return new, nil
     }
     
    -func (w *work) buildIndex() error {
    -	// The effective date of the new index should be at least
    -	// slightly earlier than when the directories are scanned
    -	// so set it now.
    -	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
    -	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
    -	if len(dirs) == 0 {
    -		return nil
    +// build returns a new index for the specified Go module cache (an
    +// absolute path).
    +//
    +// If an old index is provided, only directories more recent than it
    +// that it are scanned; older directories are provided by the old
    +// Index.
    +//
    +// The boolean result indicates whether new entries were found.
    +func build(gomodcache string, old *Index) (*Index, bool, error) {
    +	// Set the time window.
    +	var start time.Time // = dawn of time
    +	if old != nil {
    +		start = old.ValidAt
     	}
    -	newdirs, err := byImportPath(dirs)
    +	now := time.Now()
    +	end := now.Add(24 * time.Hour) // safely in the future
    +
    +	// Enumerate GOMODCACHE package directories.
    +	// Choose the best (latest) package for each import path.
    +	pkgDirs := findDirs(gomodcache, start, end)
    +	dirByPath, err := bestDirByImportPath(pkgDirs)
     	if err != nil {
    -		return err
    +		return nil, false, err
     	}
    -	// for each import path it might occur only in newdirs,
    -	// only in w.oldIndex, or in both.
    -	// If it occurs in both, use the semantically later one
    -	if w.oldIndex != nil {
    -		for _, e := range w.oldIndex.Entries {
    -			found, ok := newdirs[e.ImportPath]
    -			if !ok {
    -				w.newIndex.Entries = append(w.newIndex.Entries, e)
    -				continue // use this one, there is no new one
    -			}
    -			if semver.Compare(found[0].version, e.Version) > 0 {
    -				// use the new one
    -			} else {
    -				// use the old one, forget the new one
    -				w.newIndex.Entries = append(w.newIndex.Entries, e)
    -				delete(newdirs, e.ImportPath)
    +
    +	// For each import path it might occur only in
    +	// dirByPath, only in old, or in both.
    +	// If both, use the semantically later one.
    +	var entries []Entry
    +	if old != nil {
    +		for _, entry := range old.Entries {
    +			dir, ok := dirByPath[entry.ImportPath]
    +			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
    +				// New dir is missing or not more recent; use old entry.
    +				entries = append(entries, entry)
    +				delete(dirByPath, entry.ImportPath)
     			}
     		}
     	}
    -	// get symbol information for all the new diredtories
    -	getSymbols(w.cacheDir, newdirs)
    -	// assemble the new index entries
    -	for k, v := range newdirs {
    -		d := v[0]
    -		pkg, names := processSyms(d.syms)
    -		if pkg == "" {
    -			continue // PJW: does this ever happen?
    -		}
    -		entry := Entry{
    -			PkgName:    pkg,
    -			Dir:        d.path,
    -			ImportPath: k,
    -			Version:    d.version,
    -			Names:      names,
    -		}
    -		w.newIndex.Entries = append(w.newIndex.Entries, entry)
    -	}
    -	// sort the entries in the new index
    -	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
    -		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
    +
    +	// Extract symbol information for all the new directories.
    +	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
    +	entries = append(entries, newEntries...)
    +	slices.SortFunc(entries, func(x, y Entry) int {
    +		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
     			return n
     		}
    -		return strings.Compare(l.ImportPath, r.ImportPath)
    +		return strings.Compare(x.ImportPath, y.ImportPath)
     	})
    -	return nil
    -}
     
    -func (w *work) writeIndex() error {
    -	return writeIndex(w.cacheDir, w.newIndex)
    +	return &Index{
    +		GOMODCACHE: gomodcache,
    +		ValidAt:    now, // time before the directories were scanned
    +		Entries:    entries,
    +	}, len(newEntries) > 0, nil
     }
    diff --git a/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go b/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
    index b918529d4..fe24db9b1 100644
    --- a/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
    +++ b/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
    @@ -10,11 +10,13 @@ import (
     	"go/parser"
     	"go/token"
     	"go/types"
    +	"iter"
     	"os"
     	"path/filepath"
     	"runtime"
     	"slices"
     	"strings"
    +	"sync"
     
     	"golang.org/x/sync/errgroup"
     )
    @@ -30,45 +32,69 @@ import (
     type symbol struct {
     	pkg  string // name of the symbols's package
     	name string // declared name
    -	kind string // T, C, V, or F, follwed by D if deprecated
    +	kind string // T, C, V, or F, followed by D if deprecated
     	sig  string // signature information, for F
     }
     
    -// find the symbols for the best directories
    -func getSymbols(cd Abspath, dirs map[string][]*directory) {
    +// extractSymbols returns a (new, unordered) array of Entries, one for
    +// each provided package directory, describing its exported symbols.
    +func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
    +	var (
    +		mu      sync.Mutex
    +		entries []Entry
    +	)
    +
     	var g errgroup.Group
     	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
    -	for _, vv := range dirs {
    -		// throttling some day?
    -		d := vv[0]
    +	for dir := range dirs {
     		g.Go(func() error {
    -			thedir := filepath.Join(string(cd), string(d.path))
    +			thedir := filepath.Join(cwd, string(dir.path))
     			mode := parser.SkipObjectResolution | parser.ParseComments
     
    -			fi, err := os.ReadDir(thedir)
    +			// Parse all Go files in dir and extract symbols.
    +			dirents, err := os.ReadDir(thedir)
     			if err != nil {
     				return nil // log this someday?
     			}
    -			for _, fx := range fi {
    -				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
    +			var syms []symbol
    +			for _, dirent := range dirents {
    +				if !strings.HasSuffix(dirent.Name(), ".go") ||
    +					strings.HasSuffix(dirent.Name(), "_test.go") {
     					continue
     				}
    -				fname := filepath.Join(thedir, fx.Name())
    +				fname := filepath.Join(thedir, dirent.Name())
     				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
     				if err != nil {
     					continue // ignore errors, someday log them?
     				}
    -				d.syms = append(d.syms, getFileExports(tr)...)
    +				syms = append(syms, getFileExports(tr)...)
    +			}
    +
    +			// Create an entry for the package.
    +			pkg, names := processSyms(syms)
    +			if pkg != "" {
    +				mu.Lock()
    +				defer mu.Unlock()
    +				entries = append(entries, Entry{
    +					PkgName:    pkg,
    +					Dir:        dir.path,
    +					ImportPath: dir.importPath,
    +					Version:    dir.version,
    +					Names:      names,
    +				})
     			}
    +
     			return nil
     		})
     	}
    -	g.Wait()
    +	g.Wait() // ignore error
    +
    +	return entries
     }
     
     func getFileExports(f *ast.File) []symbol {
     	pkg := f.Name.Name
    -	if pkg == "main" {
    +	if pkg == "main" || pkg == "" {
     		return nil
     	}
     	var ans []symbol
    @@ -110,7 +136,7 @@ func getFileExports(f *ast.File) []symbol {
     				// The only place a $ can occur seems to be in a struct tag, which
     				// can be an arbitrary string literal, and ExprString does not presently
     				// print struct tags. So for this to happen the type of a formal parameter
    -				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
    +				// has to be a explicit struct, e.g. foo(x struct{a int "$"}) and ExprString
     				// would have to show the struct tag. Even testing for this case seems
     				// a waste of effort, but let's remember the possibility
     				if strings.Contains(tp, "$") {
    @@ -202,17 +228,18 @@ func processSyms(syms []symbol) (string, []string) {
     	pkg := syms[0].pkg
     	var names []string
     	for _, s := range syms {
    +		if s.pkg != pkg {
    +			// Symbols came from two files in same dir
    +			// with different package declarations.
    +			continue
    +		}
     		var nx string
    -		if s.pkg == pkg {
    -			if s.sig != "" {
    -				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
    -			} else {
    -				nx = fmt.Sprintf("%s %s", s.name, s.kind)
    -			}
    -			names = append(names, nx)
    +		if s.sig != "" {
    +			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
     		} else {
    -			continue // PJW: do we want to keep track of these?
    +			nx = fmt.Sprintf("%s %s", s.name, s.kind)
     		}
    +		names = append(names, nx)
     	}
     	return pkg, names
     }
    diff --git a/tools/vendor/golang.org/x/tools/internal/modindex/types.go b/tools/vendor/golang.org/x/tools/internal/modindex/types.go
    deleted file mode 100644
    index ece448863..000000000
    --- a/tools/vendor/golang.org/x/tools/internal/modindex/types.go
    +++ /dev/null
    @@ -1,25 +0,0 @@
    -// Copyright 2024 The Go Authors. All rights reserved.
    -// Use of this source code is governed by a BSD-style
    -// license that can be found in the LICENSE file.
    -
    -package modindex
    -
    -import (
    -	"strings"
    -)
    -
    -// some special types to avoid confusions
    -
    -// distinguish various types of directory names. It's easy to get confused.
    -type Abspath string // absolute paths
    -type Relpath string // paths with GOMODCACHE prefix removed
    -
    -func toRelpath(cachedir Abspath, s string) Relpath {
    -	if strings.HasPrefix(s, string(cachedir)) {
    -		if s == string(cachedir) {
    -			return Relpath("")
    -		}
    -		return Relpath(s[len(cachedir)+1:])
    -	}
    -	return Relpath(s)
    -}
    diff --git a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    index 25ebab663..929b470be 100644
    --- a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    +++ b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
    @@ -5,6 +5,8 @@
     // Package packagesinternal exposes internal-only fields from go/packages.
     package packagesinternal
     
    +import "fmt"
    +
     var GetDepsErrors = func(p any) []*PackageError { return nil }
     
     type PackageError struct {
    @@ -13,8 +15,9 @@ type PackageError struct {
     	Err         string   // the error itself
     }
     
    +func (err PackageError) String() string {
    +	return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
    +}
    +
     var TypecheckCgo int
     var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
    -
    -var SetModFlag = func(config any, value string) {}
    -var SetModFile = func(config any, value string) {}
    diff --git a/tools/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/tools/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    index f6cb37c5c..c0aba26c4 100644
    --- a/tools/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    +++ b/tools/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
    @@ -259,7 +259,7 @@ func (r *Decoder) rawUvarint() uint64 {
     func readUvarint(r *strings.Reader) (uint64, error) {
     	var x uint64
     	var s uint
    -	for i := 0; i < binary.MaxVarintLen64; i++ {
    +	for i := range binary.MaxVarintLen64 {
     		b, err := r.ReadByte()
     		if err != nil {
     			if i > 0 && err == io.EOF {
    diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go
    index 7cca431cd..96ad6c582 100644
    --- a/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go
    +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go
    @@ -12,348 +12,354 @@ type pkginfo struct {
     }
     
     var deps = [...]pkginfo{
    -	{"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"},
    -	{"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"},
    -	{"bufio", "\x03k}E\x13"},
    -	{"bytes", "n+R\x03\fG\x02\x02"},
    +	{"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
    +	{"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
    +	{"bufio", "\x03k\x83\x01D\x14"},
    +	{"bytes", "n*Y\x03\fG\x02\x02"},
     	{"cmp", ""},
    -	{"compress/bzip2", "\x02\x02\xe7\x01B"},
    -	{"compress/flate", "\x02l\x03z\r\x024\x01\x03"},
    -	{"compress/gzip", "\x02\x04a\a\x03\x15eT"},
    -	{"compress/lzw", "\x02l\x03z"},
    -	{"compress/zlib", "\x02\x04a\a\x03\x13\x01f"},
    -	{"container/heap", "\xae\x02"},
    +	{"compress/bzip2", "\x02\x02\xed\x01A"},
    +	{"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"},
    +	{"compress/gzip", "\x02\x04a\a\x03\x14lT"},
    +	{"compress/lzw", "\x02l\x03\x80\x01"},
    +	{"compress/zlib", "\x02\x04a\a\x03\x12\x01m"},
    +	{"container/heap", "\xb3\x02"},
     	{"container/list", ""},
     	{"container/ring", ""},
    -	{"context", "n\\h\x01\f"},
    -	{"crypto", "\x84\x01gD"},
    -	{"crypto/aes", "\x10\n\a\x8e\x02"},
    -	{"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"},
    -	{"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"},
    -	{"crypto/dsa", "@\x04*}\x0e"},
    -	{"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"},
    -	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"},
    -	{"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"},
    -	{"crypto/elliptic", "0>}\x0e9"},
    -	{"crypto/fips140", " \x05\x91\x01"},
    -	{"crypto/hkdf", "-\x12\x01.\x16"},
    -	{"crypto/hmac", "\x1a\x14\x11\x01\x113"},
    -	{"crypto/internal/boring", "\x0e\x02\rg"},
    -	{"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
    -	{"crypto/internal/boring/bcache", "\xb3\x02\x12"},
    +	{"context", "n\\m\x01\r"},
    +	{"crypto", "\x83\x01nC"},
    +	{"crypto/aes", "\x10\n\a\x93\x02"},
    +	{"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"},
    +	{"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"},
    +	{"crypto/dsa", "A\x04)\x83\x01\r"},
    +	{"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"},
    +	{"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"},
    +	{"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"},
    +	{"crypto/elliptic", "0>\x83\x01\r9"},
    +	{"crypto/fips140", " \x05"},
    +	{"crypto/hkdf", "-\x13\x01-\x15"},
    +	{"crypto/hmac", "\x1a\x14\x12\x01\x111"},
    +	{"crypto/internal/boring", "\x0e\x02\rf"},
    +	{"crypto/internal/boring/bbig", "\x1a\xe4\x01M"},
    +	{"crypto/internal/boring/bcache", "\xb8\x02\x13"},
     	{"crypto/internal/boring/sig", ""},
    -	{"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"},
    -	{"crypto/internal/entropy", "E"},
    -	{"crypto/internal/fips140", ">0}9\f\x15"},
    -	{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"},
    -	{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"},
    -	{"crypto/internal/fips140/alias", "\xc5\x02"},
    -	{"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"},
    -	{"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
    -	{"crypto/internal/fips140/check/checktest", "%\xff\x01!"},
    -	{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"},
    -	{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"},
    -	{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"},
    -	{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"},
    -	{"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"},
    -	{"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"},
    -	{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"},
    -	{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"},
    -	{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"},
    -	{"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"},
    -	{"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"},
    -	{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"},
    -	{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"},
    -	{"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
    -	{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"},
    -	{"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"},
    -	{"crypto/internal/fips140/ssh", " \x05"},
    -	{"crypto/internal/fips140/subtle", "#\x19\xbe\x01"},
    -	{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"},
    -	{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"},
    +	{"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
    +	{"crypto/internal/entropy", "F"},
    +	{"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"},
    +	{"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"},
    +	{"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"},
    +	{"crypto/internal/fips140/alias", "\xcb\x02"},
    +	{"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"},
    +	{"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"},
    +	{"crypto/internal/fips140/check/checktest", "%\x85\x02!"},
    +	{"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"},
    +	{"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"},
    +	{"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"},
    +	{"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"},
    +	{"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"},
    +	{"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"},
    +	{"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"},
    +	{"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"},
    +	{"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"},
    +	{"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"},
    +	{"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"},
    +	{"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"},
    +	{"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"},
    +	{"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"},
    +	{"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"},
    +	{"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"},
    +	{"crypto/internal/fips140/ssh", "%^"},
    +	{"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"},
    +	{"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"},
    +	{"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"},
    +	{"crypto/internal/fips140cache", "\xaa\x02\r&"},
     	{"crypto/internal/fips140deps", ""},
    -	{"crypto/internal/fips140deps/byteorder", "\x9a\x01"},
    +	{"crypto/internal/fips140deps/byteorder", "\x99\x01"},
     	{"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
     	{"crypto/internal/fips140deps/godebug", "\xb6\x01"},
    -	{"crypto/internal/fips140hash", "5\x1a5\xc1\x01"},
    -	{"crypto/internal/fips140only", "'\r\x01\x01N25"},
    +	{"crypto/internal/fips140hash", "5\x1b3\xc8\x01"},
    +	{"crypto/internal/fips140only", "'\r\x01\x01M3;"},
     	{"crypto/internal/fips140test", ""},
    -	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"},
    -	{"crypto/internal/impl", "\xb0\x02"},
    -	{"crypto/internal/randutil", "\xeb\x01\x12"},
    -	{"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"},
    +	{"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"},
    +	{"crypto/internal/impl", "\xb5\x02"},
    +	{"crypto/internal/randutil", "\xf1\x01\x12"},
    +	{"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"},
     	{"crypto/internal/sysrand/internal/seccomp", "n"},
    -	{"crypto/md5", "\x0e2.\x16\x16`"},
    +	{"crypto/md5", "\x0e3-\x15\x16g"},
     	{"crypto/mlkem", "/"},
    -	{"crypto/pbkdf2", "2\r\x01.\x16"},
    -	{"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"},
    -	{"crypto/rc4", "#\x1d.\xc1\x01"},
    -	{"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"},
    -	{"crypto/sha1", "\x0e\f&.\x16\x16\x14L"},
    -	{"crypto/sha256", "\x0e\f\x1aP"},
    -	{"crypto/sha3", "\x0e'O\xc1\x01"},
    -	{"crypto/sha512", "\x0e\f\x1cN"},
    -	{"crypto/subtle", "8\x98\x01T"},
    -	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"},
    -	{"crypto/tls/internal/fips140tls", " \x93\x02"},
    -	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"},
    -	{"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"},
    -	{"crypto/x509/pkix", "d\x06\a\x88\x01F"},
    -	{"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"},
    -	{"database/sql/driver", "\ra\x03\xae\x01\x10\x10"},
    -	{"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"},
    -	{"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"},
    -	{"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"},
    -	{"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"},
    -	{"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"},
    -	{"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"},
    -	{"debug/plan9obj", "g\a\x03`\x1a,"},
    -	{"embed", "n+:\x18\x01S"},
    +	{"crypto/pbkdf2", "2\x0e\x01-\x15"},
    +	{"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"},
    +	{"crypto/rc4", "#\x1e-\xc6\x01"},
    +	{"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"},
    +	{"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"},
    +	{"crypto/sha256", "\x0e\f\x1aO"},
    +	{"crypto/sha3", "\x0e'N\xc8\x01"},
    +	{"crypto/sha512", "\x0e\f\x1cM"},
    +	{"crypto/subtle", "8\x9b\x01W"},
    +	{"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
    +	{"crypto/tls/internal/fips140tls", "\x17\xa1\x02"},
    +	{"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
    +	{"crypto/x509/pkix", "d\x06\a\x8d\x01G"},
    +	{"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
    +	{"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"},
    +	{"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"},
    +	{"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"},
    +	{"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"},
    +	{"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"},
    +	{"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"},
    +	{"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"},
    +	{"debug/plan9obj", "g\a\x03e\x1b,"},
    +	{"embed", "n*@\x19\x01S"},
     	{"embed/internal/embedtest", ""},
     	{"encoding", ""},
    -	{"encoding/ascii85", "\xeb\x01D"},
    -	{"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"},
    -	{"encoding/base32", "\xeb\x01B\x02"},
    -	{"encoding/base64", "\x9a\x01QB\x02"},
    -	{"encoding/binary", "n}\r'\x0e\x05"},
    -	{"encoding/csv", "\x02\x01k\x03zE\x11\x02"},
    -	{"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"},
    -	{"encoding/hex", "n\x03zB\x03"},
    -	{"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"},
    -	{"encoding/pem", "\x03c\b}B\x03"},
    -	{"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"},
    -	{"errors", "\xca\x01{"},
    -	{"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"},
    -	{"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"},
    -	{"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"},
    -	{"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"},
    -	{"go/ast/internal/tests", ""},
    -	{"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"},
    -	{"go/build/constraint", "n\xc1\x01\x01\x11\x02"},
    -	{"go/constant", "q\x10w\x01\x015\x01\x02\x11"},
    -	{"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"},
    -	{"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"},
    -	{"go/format", "\x03n\x01\f\x01\x02jE"},
    -	{"go/importer", "t\a\x01\x01\x04\x01i9"},
    -	{"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"},
    -	{"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"},
    -	{"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"},
    -	{"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"},
    -	{"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"},
    -	{"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"},
    -	{"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"},
    -	{"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"},
    -	{"go/version", "\xbb\x01u"},
    -	{"hash", "\xeb\x01"},
    -	{"hash/adler32", "n\x16\x16"},
    -	{"hash/crc32", "n\x16\x16\x14\x84\x01\x01"},
    -	{"hash/crc64", "n\x16\x16\x98\x01"},
    -	{"hash/fnv", "n\x16\x16`"},
    -	{"hash/maphash", "\x95\x01\x05\x1b\x03@M"},
    -	{"html", "\xb0\x02\x02\x11"},
    -	{"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"},
    -	{"image", "\x02l\x1f^\x0f5\x03\x01"},
    +	{"encoding/ascii85", "\xf1\x01C"},
    +	{"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"},
    +	{"encoding/base32", "\xf1\x01A\x02"},
    +	{"encoding/base64", "\x99\x01XA\x02"},
    +	{"encoding/binary", "n\x83\x01\f(\r\x05"},
    +	{"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"},
    +	{"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
    +	{"encoding/hex", "n\x03\x80\x01A\x03"},
    +	{"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
    +	{"encoding/pem", "\x03c\b\x83\x01A\x03"},
    +	{"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"},
    +	{"errors", "\xca\x01\x81\x01"},
    +	{"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"},
    +	{"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"},
    +	{"fmt", "nE>\f \b\r\x02\x03\x12"},
    +	{"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"},
    +	{"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
    +	{"go/build/constraint", "n\xc6\x01\x01\x12\x02"},
    +	{"go/constant", "q\x0f}\x01\x024\x01\x02\x12"},
    +	{"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"},
    +	{"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"},
    +	{"go/format", "\x03n\x01\v\x01\x02qD"},
    +	{"go/importer", "s\a\x01\x01\x04\x01p9"},
    +	{"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"},
    +	{"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"},
    +	{"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"},
    +	{"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"},
    +	{"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"},
    +	{"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"},
    +	{"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"},
    +	{"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
    +	{"go/version", "\xbb\x01z"},
    +	{"hash", "\xf1\x01"},
    +	{"hash/adler32", "n\x15\x16"},
    +	{"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"},
    +	{"hash/crc64", "n\x15\x16\x9e\x01"},
    +	{"hash/fnv", "n\x15\x16g"},
    +	{"hash/maphash", "\x83\x01\x11!\x03\x93\x01"},
    +	{"html", "\xb5\x02\x02\x12"},
    +	{"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
    +	{"image", "\x02l\x1ee\x0f4\x03\x01"},
     	{"image/color", ""},
    -	{"image/color/palette", "\x8d\x01"},
    -	{"image/draw", "\x8c\x01\x01\x04"},
    -	{"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"},
    -	{"image/internal/imageutil", "\x8c\x01"},
    -	{"image/jpeg", "\x02l\x1e\x01\x04Z"},
    -	{"image/png", "\x02\a^\n\x13\x02\x06\x01^D"},
    -	{"index/suffixarray", "\x03d\a}\r*\v\x01"},
    -	{"internal/abi", "\xb5\x01\x90\x01"},
    -	{"internal/asan", "\xc5\x02"},
    -	{"internal/bisect", "\xa4\x02\x0e\x01"},
    -	{"internal/buildcfg", "qG_\x06\x02\x05\v\x01"},
    -	{"internal/bytealg", "\xae\x01\x97\x01"},
    +	{"image/color/palette", "\x8c\x01"},
    +	{"image/draw", "\x8b\x01\x01\x04"},
    +	{"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"},
    +	{"image/internal/imageutil", "\x8b\x01"},
    +	{"image/jpeg", "\x02l\x1d\x01\x04a"},
    +	{"image/png", "\x02\a^\n\x12\x02\x06\x01eC"},
    +	{"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"},
    +	{"internal/abi", "\xb5\x01\x96\x01"},
    +	{"internal/asan", "\xcb\x02"},
    +	{"internal/bisect", "\xaa\x02\r\x01"},
    +	{"internal/buildcfg", "qGe\x06\x02\x05\n\x01"},
    +	{"internal/bytealg", "\xae\x01\x9d\x01"},
     	{"internal/byteorder", ""},
     	{"internal/cfg", ""},
    -	{"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"},
    +	{"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"},
    +	{"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"},
     	{"internal/copyright", ""},
     	{"internal/coverage", ""},
     	{"internal/coverage/calloc", ""},
    -	{"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"},
    -	{"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"},
    -	{"internal/coverage/cmerge", "q-Z"},
    -	{"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"},
    -	{"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"},
    -	{"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"},
    -	{"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."},
    -	{"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"},
    -	{"internal/coverage/rtcov", "\xc5\x02"},
    -	{"internal/coverage/slicereader", "g\nzZ"},
    -	{"internal/coverage/slicewriter", "qz"},
    -	{"internal/coverage/stringtab", "q8\x04>"},
    +	{"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"},
    +	{"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"},
    +	{"internal/coverage/cmerge", "q-_"},
    +	{"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"},
    +	{"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"},
    +	{"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"},
    +	{"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."},
    +	{"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"},
    +	{"internal/coverage/rtcov", "\xcb\x02"},
    +	{"internal/coverage/slicereader", "g\n\x80\x01Z"},
    +	{"internal/coverage/slicewriter", "q\x80\x01"},
    +	{"internal/coverage/stringtab", "q8\x04D"},
     	{"internal/coverage/test", ""},
     	{"internal/coverage/uleb128", ""},
    -	{"internal/cpu", "\xc5\x02"},
    -	{"internal/dag", "\x04m\xbc\x01\x03"},
    -	{"internal/diff", "\x03n\xbd\x01\x02"},
    -	{"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"},
    -	{"internal/filepathlite", "n+:\x19A"},
    -	{"internal/fmtsort", "\x04\x9b\x02\x0e"},
    -	{"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"},
    +	{"internal/cpu", "\xcb\x02"},
    +	{"internal/dag", "\x04m\xc1\x01\x03"},
    +	{"internal/diff", "\x03n\xc2\x01\x02"},
    +	{"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"},
    +	{"internal/filepathlite", "n*@\x1a@"},
    +	{"internal/fmtsort", "\x04\xa1\x02\r"},
    +	{"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
     	{"internal/goarch", ""},
    -	{"internal/godebug", "\x97\x01 {\x01\x12"},
    +	{"internal/godebug", "\x96\x01!\x80\x01\x01\x13"},
     	{"internal/godebugs", ""},
     	{"internal/goexperiment", ""},
     	{"internal/goos", ""},
    -	{"internal/goroot", "\x97\x02\x01\x05\x13\x02"},
    +	{"internal/goroot", "\x9d\x02\x01\x05\x12\x02"},
     	{"internal/gover", "\x04"},
     	{"internal/goversion", ""},
     	{"internal/itoa", ""},
    -	{"internal/lazyregexp", "\x97\x02\v\x0e\x02"},
    -	{"internal/lazytemplate", "\xeb\x01,\x19\x02\v"},
    -	{"internal/msan", "\xc5\x02"},
    +	{"internal/lazyregexp", "\x9d\x02\v\r\x02"},
    +	{"internal/lazytemplate", "\xf1\x01,\x18\x02\f"},
    +	{"internal/msan", "\xcb\x02"},
     	{"internal/nettrace", ""},
    -	{"internal/obscuretestdata", "f\x85\x01,"},
    +	{"internal/obscuretestdata", "f\x8b\x01,"},
     	{"internal/oserror", "n"},
    -	{"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"},
    +	{"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"},
     	{"internal/platform", ""},
    -	{"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"},
    -	{"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"},
    +	{"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"},
    +	{"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"},
     	{"internal/profilerecord", ""},
    -	{"internal/race", "\x95\x01\xb0\x01"},
    -	{"internal/reflectlite", "\x95\x01 3\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"},
    +	{"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"},
    +	{"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"},
    +	{"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"},
    +	{"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"},
    +	{"net/http/httptrace", "\rFnF\x14\n "},
    +	{"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"},
    +	{"net/http/internal", "\x02\x01k\x03\x80\x01"},
    +	{"net/http/internal/ascii", "\xb5\x02\x12"},
    +	{"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"},
    +	{"net/http/internal/testcert", "\xb5\x02"},
    +	{"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"},
    +	{"net/internal/cgotest", ""},
    +	{"net/internal/socktest", "q\xc6\x01\x02"},
    +	{"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"},
    +	{"net/netip", "\x04j*\x01$@\x034\x16"},
    +	{"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"},
    +	{"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"},
    +	{"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"},
    +	{"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"},
    +	{"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"},
    +	{"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"},
    +	{"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"},
    +	{"os/exec/internal/fdtest", "\xb9\x02"},
    +	{"os/signal", "\r\x90\x02\x15\x05\x02"},
    +	{"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"},
    +	{"path", "n*\xb1\x01"},
    +	{"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"},
    +	{"plugin", "n"},
    +	{"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"},
     	{"reflect/internal/example1", ""},
     	{"reflect/internal/example2", ""},
    -	{"regexp", "\x03\xe8\x018\n\x02\x01\x02\x0f\x02"},
    -	{"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"},
    -	{"runtime", "\x95\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x04\x01\x01\x01\x01\x03\x0fc"},
    -	{"runtime/cgo", "\xd0\x01b\x01\x12"},
    -	{"runtime/coverage", "\xa0\x01K"},
    -	{"runtime/debug", "qUQ\r\t\x02\x01\x0f\x06"},
    -	{"runtime/internal/wasitest", ""},
    -	{"runtime/metrics", "\xb7\x01A,!"},
    -	{"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03$3#\r\x1f\r\t\x01\x01\x01\x02\x02\b\x03\x06"},
    -	{"runtime/race", ""},
    -	{"runtime/trace", "\rdz9\x0e\x01\x12"},
    -	{"slices", "\x04\xea\x01\fK"},
    -	{"sort", "\xca\x0103"},
    -	{"strconv", "n+:%\x02I"},
    -	{"strings", "n'\x04:\x18\x03\f8\x0f\x02\x02"},
    +	{"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"},
    +	{"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"},
    +	{"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"},
    +	{"runtime/coverage", "\xa0\x01Q"},
    +	{"runtime/debug", "qUW\r\b\x02\x01\x10\x06"},
    +	{"runtime/metrics", "\xb7\x01F-!"},
    +	{"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"},
    +	{"runtime/race", "\xb0\x02"},
    +	{"runtime/race/internal/amd64v1", ""},
    +	{"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"},
    +	{"slices", "\x04\xf0\x01\fK"},
    +	{"sort", "\xca\x0162"},
    +	{"strconv", "n*@%\x03I"},
    +	{"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"},
     	{"structs", ""},
    -	{"sync", "\xc9\x01\vP\x0f\x12"},
    -	{"sync/atomic", "\xc5\x02"},
    -	{"syscall", "n'\x01\x03\x01\x1b\b\x03\x03\x06[\x0e\x01\x12"},
    -	{"testing", "\x03\na\x02\x01X\x0f\x13\r\x04\x1b\x06\x02\x05\x03\x05\x01\x02\x01\x02\x01\f\x02\x02\x02"},
    -	{"testing/fstest", "n\x03z\x01\v%\x11\x03\b\a"},
    -	{"testing/internal/testdeps", "\x02\v\xa7\x01'\x10,\x03\x05\x03\b\x06\x02\r"},
    -	{"testing/iotest", "\x03k\x03z\x04"},
    -	{"testing/quick", "p\x01\x87\x01\x04#\x11\x0f"},
    -	{"testing/slogtest", "\ra\x03\x80\x01.\x05\x11\n"},
    -	{"text/scanner", "\x03nz,*\x02"},
    -	{"text/tabwriter", "qzX"},
    -	{"text/template", "n\x03B8\x01\v\x1f\x01\x05\x01\x02\x05\f\x02\f\x03\x02"},
    -	{"text/template/parse", "\x03n\xb3\x01\v\x01\x11\x02"},
    -	{"time", "n+\x1d\x1d'*\x0e\x02\x11"},
    -	{"time/tzdata", "n\xc6\x01\x11"},
    +	{"sync", "\xc9\x01\x10\x01P\x0e\x13"},
    +	{"sync/atomic", "\xcb\x02"},
    +	{"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"},
    +	{"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"},
    +	{"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"},
    +	{"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"},
    +	{"testing/iotest", "\x03k\x03\x80\x01\x04"},
    +	{"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"},
    +	{"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"},
    +	{"testing/synctest", "\xda\x01`\x11"},
    +	{"text/scanner", "\x03n\x80\x01,*\x02"},
    +	{"text/tabwriter", "q\x80\x01X"},
    +	{"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"},
    +	{"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"},
    +	{"time", "n*\x1e\"(*\r\x02\x12"},
    +	{"time/tzdata", "n\xcb\x01\x12"},
     	{"unicode", ""},
     	{"unicode/utf16", ""},
     	{"unicode/utf8", ""},
    -	{"unique", "\x95\x01>\x01P\x0e\x13\x12"},
    +	{"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"},
     	{"unsafe", ""},
    -	{"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"},
    -	{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"},
    -	{"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"},
    +	{"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"},
    +	{"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"},
    +	{"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"},
     	{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
    -	{"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
    -	{"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"},
    +	{"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"},
    +	{"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"},
     	{"vendor/golang.org/x/net/dns/dnsmessage", "n"},
    -	{"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"},
    -	{"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"},
    -	{"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"},
    -	{"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"},
    -	{"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"},
    -	{"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"},
    -	{"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"},
    -	{"vendor/golang.org/x/text/transform", "\x03k}X"},
    -	{"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"},
    -	{"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"},
    -	{"weak", "\x95\x01\x8f\x01!"},
    +	{"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"},
    +	{"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"},
    +	{"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"},
    +	{"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"},
    +	{"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
    +	{"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"},
    +	{"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"},
    +	{"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"},
    +	{"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"},
    +	{"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"},
    +	{"weak", "\x94\x01\x96\x01!"},
     }
    diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    index 00776a31b..c1faa50d3 100644
    --- a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go
    @@ -8,17647 +8,17719 @@ package stdlib
     
     var PackageSymbols = map[string][]Symbol{
     	"archive/tar": {
    -		{"(*Header).FileInfo", Method, 1},
    -		{"(*Reader).Next", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Writer).AddFS", Method, 22},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteHeader", Method, 0},
    -		{"(Format).String", Method, 10},
    -		{"ErrFieldTooLong", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"ErrInsecurePath", Var, 20},
    -		{"ErrWriteAfterClose", Var, 0},
    -		{"ErrWriteTooLong", Var, 0},
    -		{"FileInfoHeader", Func, 1},
    -		{"FileInfoNames", Type, 23},
    -		{"Format", Type, 10},
    -		{"FormatGNU", Const, 10},
    -		{"FormatPAX", Const, 10},
    -		{"FormatUSTAR", Const, 10},
    -		{"FormatUnknown", Const, 10},
    -		{"Header", Type, 0},
    -		{"Header.AccessTime", Field, 0},
    -		{"Header.ChangeTime", Field, 0},
    -		{"Header.Devmajor", Field, 0},
    -		{"Header.Devminor", Field, 0},
    -		{"Header.Format", Field, 10},
    -		{"Header.Gid", Field, 0},
    -		{"Header.Gname", Field, 0},
    -		{"Header.Linkname", Field, 0},
    -		{"Header.ModTime", Field, 0},
    -		{"Header.Mode", Field, 0},
    -		{"Header.Name", Field, 0},
    -		{"Header.PAXRecords", Field, 10},
    -		{"Header.Size", Field, 0},
    -		{"Header.Typeflag", Field, 0},
    -		{"Header.Uid", Field, 0},
    -		{"Header.Uname", Field, 0},
    -		{"Header.Xattrs", Field, 3},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Reader", Type, 0},
    -		{"TypeBlock", Const, 0},
    -		{"TypeChar", Const, 0},
    -		{"TypeCont", Const, 0},
    -		{"TypeDir", Const, 0},
    -		{"TypeFifo", Const, 0},
    -		{"TypeGNULongLink", Const, 1},
    -		{"TypeGNULongName", Const, 1},
    -		{"TypeGNUSparse", Const, 3},
    -		{"TypeLink", Const, 0},
    -		{"TypeReg", Const, 0},
    -		{"TypeRegA", Const, 0},
    -		{"TypeSymlink", Const, 0},
    -		{"TypeXGlobalHeader", Const, 0},
    -		{"TypeXHeader", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*Header).FileInfo", Method, 1, ""},
    +		{"(*Reader).Next", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Writer).AddFS", Method, 22, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteHeader", Method, 0, ""},
    +		{"(Format).String", Method, 10, ""},
    +		{"ErrFieldTooLong", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"ErrInsecurePath", Var, 20, ""},
    +		{"ErrWriteAfterClose", Var, 0, ""},
    +		{"ErrWriteTooLong", Var, 0, ""},
    +		{"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"},
    +		{"FileInfoNames", Type, 23, ""},
    +		{"Format", Type, 10, ""},
    +		{"FormatGNU", Const, 10, ""},
    +		{"FormatPAX", Const, 10, ""},
    +		{"FormatUSTAR", Const, 10, ""},
    +		{"FormatUnknown", Const, 10, ""},
    +		{"Header", Type, 0, ""},
    +		{"Header.AccessTime", Field, 0, ""},
    +		{"Header.ChangeTime", Field, 0, ""},
    +		{"Header.Devmajor", Field, 0, ""},
    +		{"Header.Devminor", Field, 0, ""},
    +		{"Header.Format", Field, 10, ""},
    +		{"Header.Gid", Field, 0, ""},
    +		{"Header.Gname", Field, 0, ""},
    +		{"Header.Linkname", Field, 0, ""},
    +		{"Header.ModTime", Field, 0, ""},
    +		{"Header.Mode", Field, 0, ""},
    +		{"Header.Name", Field, 0, ""},
    +		{"Header.PAXRecords", Field, 10, ""},
    +		{"Header.Size", Field, 0, ""},
    +		{"Header.Typeflag", Field, 0, ""},
    +		{"Header.Uid", Field, 0, ""},
    +		{"Header.Uname", Field, 0, ""},
    +		{"Header.Xattrs", Field, 3, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"Reader", Type, 0, ""},
    +		{"TypeBlock", Const, 0, ""},
    +		{"TypeChar", Const, 0, ""},
    +		{"TypeCont", Const, 0, ""},
    +		{"TypeDir", Const, 0, ""},
    +		{"TypeFifo", Const, 0, ""},
    +		{"TypeGNULongLink", Const, 1, ""},
    +		{"TypeGNULongName", Const, 1, ""},
    +		{"TypeGNUSparse", Const, 3, ""},
    +		{"TypeLink", Const, 0, ""},
    +		{"TypeReg", Const, 0, ""},
    +		{"TypeRegA", Const, 0, ""},
    +		{"TypeSymlink", Const, 0, ""},
    +		{"TypeXGlobalHeader", Const, 0, ""},
    +		{"TypeXHeader", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"archive/zip": {
    -		{"(*File).DataOffset", Method, 2},
    -		{"(*File).FileInfo", Method, 0},
    -		{"(*File).ModTime", Method, 0},
    -		{"(*File).Mode", Method, 0},
    -		{"(*File).Open", Method, 0},
    -		{"(*File).OpenRaw", Method, 17},
    -		{"(*File).SetModTime", Method, 0},
    -		{"(*File).SetMode", Method, 0},
    -		{"(*FileHeader).FileInfo", Method, 0},
    -		{"(*FileHeader).ModTime", Method, 0},
    -		{"(*FileHeader).Mode", Method, 0},
    -		{"(*FileHeader).SetModTime", Method, 0},
    -		{"(*FileHeader).SetMode", Method, 0},
    -		{"(*ReadCloser).Close", Method, 0},
    -		{"(*ReadCloser).Open", Method, 16},
    -		{"(*ReadCloser).RegisterDecompressor", Method, 6},
    -		{"(*Reader).Open", Method, 16},
    -		{"(*Reader).RegisterDecompressor", Method, 6},
    -		{"(*Writer).AddFS", Method, 22},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Copy", Method, 17},
    -		{"(*Writer).Create", Method, 0},
    -		{"(*Writer).CreateHeader", Method, 0},
    -		{"(*Writer).CreateRaw", Method, 17},
    -		{"(*Writer).Flush", Method, 4},
    -		{"(*Writer).RegisterCompressor", Method, 6},
    -		{"(*Writer).SetComment", Method, 10},
    -		{"(*Writer).SetOffset", Method, 5},
    -		{"Compressor", Type, 2},
    -		{"Decompressor", Type, 2},
    -		{"Deflate", Const, 0},
    -		{"ErrAlgorithm", Var, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrFormat", Var, 0},
    -		{"ErrInsecurePath", Var, 20},
    -		{"File", Type, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.CRC32", Field, 0},
    -		{"FileHeader.Comment", Field, 0},
    -		{"FileHeader.CompressedSize", Field, 0},
    -		{"FileHeader.CompressedSize64", Field, 1},
    -		{"FileHeader.CreatorVersion", Field, 0},
    -		{"FileHeader.ExternalAttrs", Field, 0},
    -		{"FileHeader.Extra", Field, 0},
    -		{"FileHeader.Flags", Field, 0},
    -		{"FileHeader.Method", Field, 0},
    -		{"FileHeader.Modified", Field, 10},
    -		{"FileHeader.ModifiedDate", Field, 0},
    -		{"FileHeader.ModifiedTime", Field, 0},
    -		{"FileHeader.Name", Field, 0},
    -		{"FileHeader.NonUTF8", Field, 10},
    -		{"FileHeader.ReaderVersion", Field, 0},
    -		{"FileHeader.UncompressedSize", Field, 0},
    -		{"FileHeader.UncompressedSize64", Field, 1},
    -		{"FileInfoHeader", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"OpenReader", Func, 0},
    -		{"ReadCloser", Type, 0},
    -		{"ReadCloser.Reader", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.Comment", Field, 0},
    -		{"Reader.File", Field, 0},
    -		{"RegisterCompressor", Func, 2},
    -		{"RegisterDecompressor", Func, 2},
    -		{"Store", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*File).DataOffset", Method, 2, ""},
    +		{"(*File).FileInfo", Method, 0, ""},
    +		{"(*File).ModTime", Method, 0, ""},
    +		{"(*File).Mode", Method, 0, ""},
    +		{"(*File).Open", Method, 0, ""},
    +		{"(*File).OpenRaw", Method, 17, ""},
    +		{"(*File).SetModTime", Method, 0, ""},
    +		{"(*File).SetMode", Method, 0, ""},
    +		{"(*FileHeader).FileInfo", Method, 0, ""},
    +		{"(*FileHeader).ModTime", Method, 0, ""},
    +		{"(*FileHeader).Mode", Method, 0, ""},
    +		{"(*FileHeader).SetModTime", Method, 0, ""},
    +		{"(*FileHeader).SetMode", Method, 0, ""},
    +		{"(*ReadCloser).Close", Method, 0, ""},
    +		{"(*ReadCloser).Open", Method, 16, ""},
    +		{"(*ReadCloser).RegisterDecompressor", Method, 6, ""},
    +		{"(*Reader).Open", Method, 16, ""},
    +		{"(*Reader).RegisterDecompressor", Method, 6, ""},
    +		{"(*Writer).AddFS", Method, 22, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Copy", Method, 17, ""},
    +		{"(*Writer).Create", Method, 0, ""},
    +		{"(*Writer).CreateHeader", Method, 0, ""},
    +		{"(*Writer).CreateRaw", Method, 17, ""},
    +		{"(*Writer).Flush", Method, 4, ""},
    +		{"(*Writer).RegisterCompressor", Method, 6, ""},
    +		{"(*Writer).SetComment", Method, 10, ""},
    +		{"(*Writer).SetOffset", Method, 5, ""},
    +		{"Compressor", Type, 2, ""},
    +		{"Decompressor", Type, 2, ""},
    +		{"Deflate", Const, 0, ""},
    +		{"ErrAlgorithm", Var, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrFormat", Var, 0, ""},
    +		{"ErrInsecurePath", Var, 20, ""},
    +		{"File", Type, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.CRC32", Field, 0, ""},
    +		{"FileHeader.Comment", Field, 0, ""},
    +		{"FileHeader.CompressedSize", Field, 0, ""},
    +		{"FileHeader.CompressedSize64", Field, 1, ""},
    +		{"FileHeader.CreatorVersion", Field, 0, ""},
    +		{"FileHeader.ExternalAttrs", Field, 0, ""},
    +		{"FileHeader.Extra", Field, 0, ""},
    +		{"FileHeader.Flags", Field, 0, ""},
    +		{"FileHeader.Method", Field, 0, ""},
    +		{"FileHeader.Modified", Field, 10, ""},
    +		{"FileHeader.ModifiedDate", Field, 0, ""},
    +		{"FileHeader.ModifiedTime", Field, 0, ""},
    +		{"FileHeader.Name", Field, 0, ""},
    +		{"FileHeader.NonUTF8", Field, 10, ""},
    +		{"FileHeader.ReaderVersion", Field, 0, ""},
    +		{"FileHeader.UncompressedSize", Field, 0, ""},
    +		{"FileHeader.UncompressedSize64", Field, 1, ""},
    +		{"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"},
    +		{"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"},
    +		{"ReadCloser", Type, 0, ""},
    +		{"ReadCloser.Reader", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Comment", Field, 0, ""},
    +		{"Reader.File", Field, 0, ""},
    +		{"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"},
    +		{"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"},
    +		{"Store", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"bufio": {
    -		{"(*Reader).Buffered", Method, 0},
    -		{"(*Reader).Discard", Method, 5},
    -		{"(*Reader).Peek", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadBytes", Method, 0},
    -		{"(*Reader).ReadLine", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).ReadSlice", Method, 0},
    -		{"(*Reader).ReadString", Method, 0},
    -		{"(*Reader).Reset", Method, 2},
    -		{"(*Reader).Size", Method, 10},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"(*Scanner).Buffer", Method, 6},
    -		{"(*Scanner).Bytes", Method, 1},
    -		{"(*Scanner).Err", Method, 1},
    -		{"(*Scanner).Scan", Method, 1},
    -		{"(*Scanner).Split", Method, 1},
    -		{"(*Scanner).Text", Method, 1},
    -		{"(*Writer).Available", Method, 0},
    -		{"(*Writer).AvailableBuffer", Method, 18},
    -		{"(*Writer).Buffered", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).ReadFrom", Method, 1},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Size", Method, 10},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteByte", Method, 0},
    -		{"(*Writer).WriteRune", Method, 0},
    -		{"(*Writer).WriteString", Method, 0},
    -		{"(ReadWriter).Available", Method, 0},
    -		{"(ReadWriter).AvailableBuffer", Method, 18},
    -		{"(ReadWriter).Discard", Method, 5},
    -		{"(ReadWriter).Flush", Method, 0},
    -		{"(ReadWriter).Peek", Method, 0},
    -		{"(ReadWriter).Read", Method, 0},
    -		{"(ReadWriter).ReadByte", Method, 0},
    -		{"(ReadWriter).ReadBytes", Method, 0},
    -		{"(ReadWriter).ReadFrom", Method, 1},
    -		{"(ReadWriter).ReadLine", Method, 0},
    -		{"(ReadWriter).ReadRune", Method, 0},
    -		{"(ReadWriter).ReadSlice", Method, 0},
    -		{"(ReadWriter).ReadString", Method, 0},
    -		{"(ReadWriter).UnreadByte", Method, 0},
    -		{"(ReadWriter).UnreadRune", Method, 0},
    -		{"(ReadWriter).Write", Method, 0},
    -		{"(ReadWriter).WriteByte", Method, 0},
    -		{"(ReadWriter).WriteRune", Method, 0},
    -		{"(ReadWriter).WriteString", Method, 0},
    -		{"(ReadWriter).WriteTo", Method, 1},
    -		{"ErrAdvanceTooFar", Var, 1},
    -		{"ErrBadReadCount", Var, 15},
    -		{"ErrBufferFull", Var, 0},
    -		{"ErrFinalToken", Var, 6},
    -		{"ErrInvalidUnreadByte", Var, 0},
    -		{"ErrInvalidUnreadRune", Var, 0},
    -		{"ErrNegativeAdvance", Var, 1},
    -		{"ErrNegativeCount", Var, 0},
    -		{"ErrTooLong", Var, 1},
    -		{"MaxScanTokenSize", Const, 1},
    -		{"NewReadWriter", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReaderSize", Func, 0},
    -		{"NewScanner", Func, 1},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterSize", Func, 0},
    -		{"ReadWriter", Type, 0},
    -		{"ReadWriter.Reader", Field, 0},
    -		{"ReadWriter.Writer", Field, 0},
    -		{"Reader", Type, 0},
    -		{"ScanBytes", Func, 1},
    -		{"ScanLines", Func, 1},
    -		{"ScanRunes", Func, 1},
    -		{"ScanWords", Func, 1},
    -		{"Scanner", Type, 1},
    -		{"SplitFunc", Type, 1},
    -		{"Writer", Type, 0},
    +		{"(*Reader).Buffered", Method, 0, ""},
    +		{"(*Reader).Discard", Method, 5, ""},
    +		{"(*Reader).Peek", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadBytes", Method, 0, ""},
    +		{"(*Reader).ReadLine", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).ReadSlice", Method, 0, ""},
    +		{"(*Reader).ReadString", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 2, ""},
    +		{"(*Reader).Size", Method, 10, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"(*Scanner).Buffer", Method, 6, ""},
    +		{"(*Scanner).Bytes", Method, 1, ""},
    +		{"(*Scanner).Err", Method, 1, ""},
    +		{"(*Scanner).Scan", Method, 1, ""},
    +		{"(*Scanner).Split", Method, 1, ""},
    +		{"(*Scanner).Text", Method, 1, ""},
    +		{"(*Writer).Available", Method, 0, ""},
    +		{"(*Writer).AvailableBuffer", Method, 18, ""},
    +		{"(*Writer).Buffered", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).ReadFrom", Method, 1, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Size", Method, 10, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteByte", Method, 0, ""},
    +		{"(*Writer).WriteRune", Method, 0, ""},
    +		{"(*Writer).WriteString", Method, 0, ""},
    +		{"(ReadWriter).Available", Method, 0, ""},
    +		{"(ReadWriter).AvailableBuffer", Method, 18, ""},
    +		{"(ReadWriter).Discard", Method, 5, ""},
    +		{"(ReadWriter).Flush", Method, 0, ""},
    +		{"(ReadWriter).Peek", Method, 0, ""},
    +		{"(ReadWriter).Read", Method, 0, ""},
    +		{"(ReadWriter).ReadByte", Method, 0, ""},
    +		{"(ReadWriter).ReadBytes", Method, 0, ""},
    +		{"(ReadWriter).ReadFrom", Method, 1, ""},
    +		{"(ReadWriter).ReadLine", Method, 0, ""},
    +		{"(ReadWriter).ReadRune", Method, 0, ""},
    +		{"(ReadWriter).ReadSlice", Method, 0, ""},
    +		{"(ReadWriter).ReadString", Method, 0, ""},
    +		{"(ReadWriter).UnreadByte", Method, 0, ""},
    +		{"(ReadWriter).UnreadRune", Method, 0, ""},
    +		{"(ReadWriter).Write", Method, 0, ""},
    +		{"(ReadWriter).WriteByte", Method, 0, ""},
    +		{"(ReadWriter).WriteRune", Method, 0, ""},
    +		{"(ReadWriter).WriteString", Method, 0, ""},
    +		{"(ReadWriter).WriteTo", Method, 1, ""},
    +		{"ErrAdvanceTooFar", Var, 1, ""},
    +		{"ErrBadReadCount", Var, 15, ""},
    +		{"ErrBufferFull", Var, 0, ""},
    +		{"ErrFinalToken", Var, 6, ""},
    +		{"ErrInvalidUnreadByte", Var, 0, ""},
    +		{"ErrInvalidUnreadRune", Var, 0, ""},
    +		{"ErrNegativeAdvance", Var, 1, ""},
    +		{"ErrNegativeCount", Var, 0, ""},
    +		{"ErrTooLong", Var, 1, ""},
    +		{"MaxScanTokenSize", Const, 1, ""},
    +		{"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"},
    +		{"NewReader", Func, 0, "func(rd io.Reader) *Reader"},
    +		{"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"},
    +		{"NewScanner", Func, 1, "func(r io.Reader) *Scanner"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"},
    +		{"ReadWriter", Type, 0, ""},
    +		{"ReadWriter.Reader", Field, 0, ""},
    +		{"ReadWriter.Writer", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
    +		{"Scanner", Type, 1, ""},
    +		{"SplitFunc", Type, 1, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"bytes": {
    -		{"(*Buffer).Available", Method, 21},
    -		{"(*Buffer).AvailableBuffer", Method, 21},
    -		{"(*Buffer).Bytes", Method, 0},
    -		{"(*Buffer).Cap", Method, 5},
    -		{"(*Buffer).Grow", Method, 1},
    -		{"(*Buffer).Len", Method, 0},
    -		{"(*Buffer).Next", Method, 0},
    -		{"(*Buffer).Read", Method, 0},
    -		{"(*Buffer).ReadByte", Method, 0},
    -		{"(*Buffer).ReadBytes", Method, 0},
    -		{"(*Buffer).ReadFrom", Method, 0},
    -		{"(*Buffer).ReadRune", Method, 0},
    -		{"(*Buffer).ReadString", Method, 0},
    -		{"(*Buffer).Reset", Method, 0},
    -		{"(*Buffer).String", Method, 0},
    -		{"(*Buffer).Truncate", Method, 0},
    -		{"(*Buffer).UnreadByte", Method, 0},
    -		{"(*Buffer).UnreadRune", Method, 0},
    -		{"(*Buffer).Write", Method, 0},
    -		{"(*Buffer).WriteByte", Method, 0},
    -		{"(*Buffer).WriteRune", Method, 0},
    -		{"(*Buffer).WriteString", Method, 0},
    -		{"(*Buffer).WriteTo", Method, 0},
    -		{"(*Reader).Len", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAt", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).Reset", Method, 7},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).Size", Method, 5},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"Buffer", Type, 0},
    -		{"Clone", Func, 20},
    -		{"Compare", Func, 0},
    -		{"Contains", Func, 0},
    -		{"ContainsAny", Func, 7},
    -		{"ContainsFunc", Func, 21},
    -		{"ContainsRune", Func, 7},
    -		{"Count", Func, 0},
    -		{"Cut", Func, 18},
    -		{"CutPrefix", Func, 20},
    -		{"CutSuffix", Func, 20},
    -		{"Equal", Func, 0},
    -		{"EqualFold", Func, 0},
    -		{"ErrTooLarge", Var, 0},
    -		{"Fields", Func, 0},
    -		{"FieldsFunc", Func, 0},
    -		{"FieldsFuncSeq", Func, 24},
    -		{"FieldsSeq", Func, 24},
    -		{"HasPrefix", Func, 0},
    -		{"HasSuffix", Func, 0},
    -		{"Index", Func, 0},
    -		{"IndexAny", Func, 0},
    -		{"IndexByte", Func, 0},
    -		{"IndexFunc", Func, 0},
    -		{"IndexRune", Func, 0},
    -		{"Join", Func, 0},
    -		{"LastIndex", Func, 0},
    -		{"LastIndexAny", Func, 0},
    -		{"LastIndexByte", Func, 5},
    -		{"LastIndexFunc", Func, 0},
    -		{"Lines", Func, 24},
    -		{"Map", Func, 0},
    -		{"MinRead", Const, 0},
    -		{"NewBuffer", Func, 0},
    -		{"NewBufferString", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"Reader", Type, 0},
    -		{"Repeat", Func, 0},
    -		{"Replace", Func, 0},
    -		{"ReplaceAll", Func, 12},
    -		{"Runes", Func, 0},
    -		{"Split", Func, 0},
    -		{"SplitAfter", Func, 0},
    -		{"SplitAfterN", Func, 0},
    -		{"SplitAfterSeq", Func, 24},
    -		{"SplitN", Func, 0},
    -		{"SplitSeq", Func, 24},
    -		{"Title", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToLowerSpecial", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToTitleSpecial", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"ToUpperSpecial", Func, 0},
    -		{"ToValidUTF8", Func, 13},
    -		{"Trim", Func, 0},
    -		{"TrimFunc", Func, 0},
    -		{"TrimLeft", Func, 0},
    -		{"TrimLeftFunc", Func, 0},
    -		{"TrimPrefix", Func, 1},
    -		{"TrimRight", Func, 0},
    -		{"TrimRightFunc", Func, 0},
    -		{"TrimSpace", Func, 0},
    -		{"TrimSuffix", Func, 1},
    +		{"(*Buffer).Available", Method, 21, ""},
    +		{"(*Buffer).AvailableBuffer", Method, 21, ""},
    +		{"(*Buffer).Bytes", Method, 0, ""},
    +		{"(*Buffer).Cap", Method, 5, ""},
    +		{"(*Buffer).Grow", Method, 1, ""},
    +		{"(*Buffer).Len", Method, 0, ""},
    +		{"(*Buffer).Next", Method, 0, ""},
    +		{"(*Buffer).Read", Method, 0, ""},
    +		{"(*Buffer).ReadByte", Method, 0, ""},
    +		{"(*Buffer).ReadBytes", Method, 0, ""},
    +		{"(*Buffer).ReadFrom", Method, 0, ""},
    +		{"(*Buffer).ReadRune", Method, 0, ""},
    +		{"(*Buffer).ReadString", Method, 0, ""},
    +		{"(*Buffer).Reset", Method, 0, ""},
    +		{"(*Buffer).String", Method, 0, ""},
    +		{"(*Buffer).Truncate", Method, 0, ""},
    +		{"(*Buffer).UnreadByte", Method, 0, ""},
    +		{"(*Buffer).UnreadRune", Method, 0, ""},
    +		{"(*Buffer).Write", Method, 0, ""},
    +		{"(*Buffer).WriteByte", Method, 0, ""},
    +		{"(*Buffer).WriteRune", Method, 0, ""},
    +		{"(*Buffer).WriteString", Method, 0, ""},
    +		{"(*Buffer).WriteTo", Method, 0, ""},
    +		{"(*Reader).Len", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAt", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 7, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).Size", Method, 5, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"Buffer", Type, 0, ""},
    +		{"Clone", Func, 20, "func(b []byte) []byte"},
    +		{"Compare", Func, 0, "func(a []byte, b []byte) int"},
    +		{"Contains", Func, 0, "func(b []byte, subslice []byte) bool"},
    +		{"ContainsAny", Func, 7, "func(b []byte, chars string) bool"},
    +		{"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"},
    +		{"ContainsRune", Func, 7, "func(b []byte, r rune) bool"},
    +		{"Count", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"},
    +		{"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"},
    +		{"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"},
    +		{"Equal", Func, 0, "func(a []byte, b []byte) bool"},
    +		{"EqualFold", Func, 0, "func(s []byte, t []byte) bool"},
    +		{"ErrTooLarge", Var, 0, ""},
    +		{"Fields", Func, 0, "func(s []byte) [][]byte"},
    +		{"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"},
    +		{"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"},
    +		{"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
    +		{"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"},
    +		{"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"},
    +		{"Index", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"IndexAny", Func, 0, "func(s []byte, chars string) int"},
    +		{"IndexByte", Func, 0, "func(b []byte, c byte) int"},
    +		{"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
    +		{"IndexRune", Func, 0, "func(s []byte, r rune) int"},
    +		{"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"},
    +		{"LastIndex", Func, 0, "func(s []byte, sep []byte) int"},
    +		{"LastIndexAny", Func, 0, "func(s []byte, chars string) int"},
    +		{"LastIndexByte", Func, 5, "func(s []byte, c byte) int"},
    +		{"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
    +		{"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
    +		{"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"},
    +		{"MinRead", Const, 0, ""},
    +		{"NewBuffer", Func, 0, "func(buf []byte) *Buffer"},
    +		{"NewBufferString", Func, 0, "func(s string) *Buffer"},
    +		{"NewReader", Func, 0, "func(b []byte) *Reader"},
    +		{"Reader", Type, 0, ""},
    +		{"Repeat", Func, 0, "func(b []byte, count int) []byte"},
    +		{"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"},
    +		{"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"},
    +		{"Runes", Func, 0, "func(s []byte) []rune"},
    +		{"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"},
    +		{"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"},
    +		{"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
    +		{"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
    +		{"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
    +		{"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
    +		{"Title", Func, 0, "func(s []byte) []byte"},
    +		{"ToLower", Func, 0, "func(s []byte) []byte"},
    +		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToTitle", Func, 0, "func(s []byte) []byte"},
    +		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToUpper", Func, 0, "func(s []byte) []byte"},
    +		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
    +		{"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"},
    +		{"Trim", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"},
    +		{"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"},
    +		{"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
    +		{"TrimSpace", Func, 0, "func(s []byte) []byte"},
    +		{"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"},
     	},
     	"cmp": {
    -		{"Compare", Func, 21},
    -		{"Less", Func, 21},
    -		{"Or", Func, 22},
    -		{"Ordered", Type, 21},
    +		{"Compare", Func, 21, "func[T Ordered](x T, y T) int"},
    +		{"Less", Func, 21, "func[T Ordered](x T, y T) bool"},
    +		{"Or", Func, 22, "func[T comparable](vals ...T) T"},
    +		{"Ordered", Type, 21, ""},
     	},
     	"compress/bzip2": {
    -		{"(StructuralError).Error", Method, 0},
    -		{"NewReader", Func, 0},
    -		{"StructuralError", Type, 0},
    +		{"(StructuralError).Error", Method, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"StructuralError", Type, 0, ""},
     	},
     	"compress/flate": {
    -		{"(*ReadError).Error", Method, 0},
    -		{"(*WriteError).Error", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(InternalError).Error", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"CorruptInputError", Type, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"HuffmanOnly", Const, 7},
    -		{"InternalError", Type, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReaderDict", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterDict", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"ReadError", Type, 0},
    -		{"ReadError.Err", Field, 0},
    -		{"ReadError.Offset", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Resetter", Type, 4},
    -		{"WriteError", Type, 0},
    -		{"WriteError.Err", Field, 0},
    -		{"WriteError.Offset", Field, 0},
    -		{"Writer", Type, 0},
    +		{"(*ReadError).Error", Method, 0, ""},
    +		{"(*WriteError).Error", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(InternalError).Error", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"HuffmanOnly", Const, 7, ""},
    +		{"InternalError", Type, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"},
    +		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"},
    +		{"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"ReadError", Type, 0, ""},
    +		{"ReadError.Err", Field, 0, ""},
    +		{"ReadError.Offset", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Resetter", Type, 4, ""},
    +		{"WriteError", Type, 0, ""},
    +		{"WriteError.Err", Field, 0, ""},
    +		{"WriteError.Offset", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"compress/gzip": {
    -		{"(*Reader).Close", Method, 0},
    -		{"(*Reader).Multistream", Method, 4},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).Reset", Method, 3},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 1},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"Header", Type, 0},
    -		{"Header.Comment", Field, 0},
    -		{"Header.Extra", Field, 0},
    -		{"Header.ModTime", Field, 0},
    -		{"Header.Name", Field, 0},
    -		{"Header.OS", Field, 0},
    -		{"HuffmanOnly", Const, 8},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterLevel", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.Header", Field, 0},
    -		{"Writer", Type, 0},
    -		{"Writer.Header", Field, 0},
    +		{"(*Reader).Close", Method, 0, ""},
    +		{"(*Reader).Multistream", Method, 4, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 3, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 1, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"Header", Type, 0, ""},
    +		{"Header.Comment", Field, 0, ""},
    +		{"Header.Extra", Field, 0, ""},
    +		{"Header.ModTime", Field, 0, ""},
    +		{"Header.Name", Field, 0, ""},
    +		{"Header.OS", Field, 0, ""},
    +		{"HuffmanOnly", Const, 8, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Header", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.Header", Field, 0, ""},
     	},
     	"compress/lzw": {
    -		{"(*Reader).Close", Method, 17},
    -		{"(*Reader).Read", Method, 17},
    -		{"(*Reader).Reset", Method, 17},
    -		{"(*Writer).Close", Method, 17},
    -		{"(*Writer).Reset", Method, 17},
    -		{"(*Writer).Write", Method, 17},
    -		{"LSB", Const, 0},
    -		{"MSB", Const, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Order", Type, 0},
    -		{"Reader", Type, 17},
    -		{"Writer", Type, 17},
    +		{"(*Reader).Close", Method, 17, ""},
    +		{"(*Reader).Read", Method, 17, ""},
    +		{"(*Reader).Reset", Method, 17, ""},
    +		{"(*Writer).Close", Method, 17, ""},
    +		{"(*Writer).Reset", Method, 17, ""},
    +		{"(*Writer).Write", Method, 17, ""},
    +		{"LSB", Const, 0, ""},
    +		{"MSB", Const, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"},
    +		{"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"},
    +		{"Order", Type, 0, ""},
    +		{"Reader", Type, 17, ""},
    +		{"Writer", Type, 17, ""},
     	},
     	"compress/zlib": {
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Reset", Method, 2},
    -		{"(*Writer).Write", Method, 0},
    -		{"BestCompression", Const, 0},
    -		{"BestSpeed", Const, 0},
    -		{"DefaultCompression", Const, 0},
    -		{"ErrChecksum", Var, 0},
    -		{"ErrDictionary", Var, 0},
    -		{"ErrHeader", Var, 0},
    -		{"HuffmanOnly", Const, 8},
    -		{"NewReader", Func, 0},
    -		{"NewReaderDict", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"NewWriterLevel", Func, 0},
    -		{"NewWriterLevelDict", Func, 0},
    -		{"NoCompression", Const, 0},
    -		{"Resetter", Type, 4},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Reset", Method, 2, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"BestCompression", Const, 0, ""},
    +		{"BestSpeed", Const, 0, ""},
    +		{"DefaultCompression", Const, 0, ""},
    +		{"ErrChecksum", Var, 0, ""},
    +		{"ErrDictionary", Var, 0, ""},
    +		{"ErrHeader", Var, 0, ""},
    +		{"HuffmanOnly", Const, 8, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"},
    +		{"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
    +		{"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
    +		{"NoCompression", Const, 0, ""},
    +		{"Resetter", Type, 4, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"container/heap": {
    -		{"Fix", Func, 2},
    -		{"Init", Func, 0},
    -		{"Interface", Type, 0},
    -		{"Pop", Func, 0},
    -		{"Push", Func, 0},
    -		{"Remove", Func, 0},
    +		{"Fix", Func, 2, "func(h Interface, i int)"},
    +		{"Init", Func, 0, "func(h Interface)"},
    +		{"Interface", Type, 0, ""},
    +		{"Pop", Func, 0, "func(h Interface) any"},
    +		{"Push", Func, 0, "func(h Interface, x any)"},
    +		{"Remove", Func, 0, "func(h Interface, i int) any"},
     	},
     	"container/list": {
    -		{"(*Element).Next", Method, 0},
    -		{"(*Element).Prev", Method, 0},
    -		{"(*List).Back", Method, 0},
    -		{"(*List).Front", Method, 0},
    -		{"(*List).Init", Method, 0},
    -		{"(*List).InsertAfter", Method, 0},
    -		{"(*List).InsertBefore", Method, 0},
    -		{"(*List).Len", Method, 0},
    -		{"(*List).MoveAfter", Method, 2},
    -		{"(*List).MoveBefore", Method, 2},
    -		{"(*List).MoveToBack", Method, 0},
    -		{"(*List).MoveToFront", Method, 0},
    -		{"(*List).PushBack", Method, 0},
    -		{"(*List).PushBackList", Method, 0},
    -		{"(*List).PushFront", Method, 0},
    -		{"(*List).PushFrontList", Method, 0},
    -		{"(*List).Remove", Method, 0},
    -		{"Element", Type, 0},
    -		{"Element.Value", Field, 0},
    -		{"List", Type, 0},
    -		{"New", Func, 0},
    +		{"(*Element).Next", Method, 0, ""},
    +		{"(*Element).Prev", Method, 0, ""},
    +		{"(*List).Back", Method, 0, ""},
    +		{"(*List).Front", Method, 0, ""},
    +		{"(*List).Init", Method, 0, ""},
    +		{"(*List).InsertAfter", Method, 0, ""},
    +		{"(*List).InsertBefore", Method, 0, ""},
    +		{"(*List).Len", Method, 0, ""},
    +		{"(*List).MoveAfter", Method, 2, ""},
    +		{"(*List).MoveBefore", Method, 2, ""},
    +		{"(*List).MoveToBack", Method, 0, ""},
    +		{"(*List).MoveToFront", Method, 0, ""},
    +		{"(*List).PushBack", Method, 0, ""},
    +		{"(*List).PushBackList", Method, 0, ""},
    +		{"(*List).PushFront", Method, 0, ""},
    +		{"(*List).PushFrontList", Method, 0, ""},
    +		{"(*List).Remove", Method, 0, ""},
    +		{"Element", Type, 0, ""},
    +		{"Element.Value", Field, 0, ""},
    +		{"List", Type, 0, ""},
    +		{"New", Func, 0, "func() *List"},
     	},
     	"container/ring": {
    -		{"(*Ring).Do", Method, 0},
    -		{"(*Ring).Len", Method, 0},
    -		{"(*Ring).Link", Method, 0},
    -		{"(*Ring).Move", Method, 0},
    -		{"(*Ring).Next", Method, 0},
    -		{"(*Ring).Prev", Method, 0},
    -		{"(*Ring).Unlink", Method, 0},
    -		{"New", Func, 0},
    -		{"Ring", Type, 0},
    -		{"Ring.Value", Field, 0},
    +		{"(*Ring).Do", Method, 0, ""},
    +		{"(*Ring).Len", Method, 0, ""},
    +		{"(*Ring).Link", Method, 0, ""},
    +		{"(*Ring).Move", Method, 0, ""},
    +		{"(*Ring).Next", Method, 0, ""},
    +		{"(*Ring).Prev", Method, 0, ""},
    +		{"(*Ring).Unlink", Method, 0, ""},
    +		{"New", Func, 0, "func(n int) *Ring"},
    +		{"Ring", Type, 0, ""},
    +		{"Ring.Value", Field, 0, ""},
     	},
     	"context": {
    -		{"AfterFunc", Func, 21},
    -		{"Background", Func, 7},
    -		{"CancelCauseFunc", Type, 20},
    -		{"CancelFunc", Type, 7},
    -		{"Canceled", Var, 7},
    -		{"Cause", Func, 20},
    -		{"Context", Type, 7},
    -		{"DeadlineExceeded", Var, 7},
    -		{"TODO", Func, 7},
    -		{"WithCancel", Func, 7},
    -		{"WithCancelCause", Func, 20},
    -		{"WithDeadline", Func, 7},
    -		{"WithDeadlineCause", Func, 21},
    -		{"WithTimeout", Func, 7},
    -		{"WithTimeoutCause", Func, 21},
    -		{"WithValue", Func, 7},
    -		{"WithoutCancel", Func, 21},
    +		{"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
    +		{"Background", Func, 7, "func() Context"},
    +		{"CancelCauseFunc", Type, 20, ""},
    +		{"CancelFunc", Type, 7, ""},
    +		{"Canceled", Var, 7, ""},
    +		{"Cause", Func, 20, "func(c Context) error"},
    +		{"Context", Type, 7, ""},
    +		{"DeadlineExceeded", Var, 7, ""},
    +		{"TODO", Func, 7, "func() Context"},
    +		{"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"},
    +		{"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"},
    +		{"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"},
    +		{"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"},
    +		{"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"},
    +		{"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"},
    +		{"WithValue", Func, 7, "func(parent Context, key any, val any) Context"},
    +		{"WithoutCancel", Func, 21, "func(parent Context) Context"},
     	},
     	"crypto": {
    -		{"(Hash).Available", Method, 0},
    -		{"(Hash).HashFunc", Method, 4},
    -		{"(Hash).New", Method, 0},
    -		{"(Hash).Size", Method, 0},
    -		{"(Hash).String", Method, 15},
    -		{"BLAKE2b_256", Const, 9},
    -		{"BLAKE2b_384", Const, 9},
    -		{"BLAKE2b_512", Const, 9},
    -		{"BLAKE2s_256", Const, 9},
    -		{"Decrypter", Type, 5},
    -		{"DecrypterOpts", Type, 5},
    -		{"Hash", Type, 0},
    -		{"MD4", Const, 0},
    -		{"MD5", Const, 0},
    -		{"MD5SHA1", Const, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PublicKey", Type, 2},
    -		{"RIPEMD160", Const, 0},
    -		{"RegisterHash", Func, 0},
    -		{"SHA1", Const, 0},
    -		{"SHA224", Const, 0},
    -		{"SHA256", Const, 0},
    -		{"SHA384", Const, 0},
    -		{"SHA3_224", Const, 4},
    -		{"SHA3_256", Const, 4},
    -		{"SHA3_384", Const, 4},
    -		{"SHA3_512", Const, 4},
    -		{"SHA512", Const, 0},
    -		{"SHA512_224", Const, 5},
    -		{"SHA512_256", Const, 5},
    -		{"Signer", Type, 4},
    -		{"SignerOpts", Type, 4},
    +		{"(Hash).Available", Method, 0, ""},
    +		{"(Hash).HashFunc", Method, 4, ""},
    +		{"(Hash).New", Method, 0, ""},
    +		{"(Hash).Size", Method, 0, ""},
    +		{"(Hash).String", Method, 15, ""},
    +		{"BLAKE2b_256", Const, 9, ""},
    +		{"BLAKE2b_384", Const, 9, ""},
    +		{"BLAKE2b_512", Const, 9, ""},
    +		{"BLAKE2s_256", Const, 9, ""},
    +		{"Decrypter", Type, 5, ""},
    +		{"DecrypterOpts", Type, 5, ""},
    +		{"Hash", Type, 0, ""},
    +		{"MD4", Const, 0, ""},
    +		{"MD5", Const, 0, ""},
    +		{"MD5SHA1", Const, 0, ""},
    +		{"MessageSigner", Type, 25, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PublicKey", Type, 2, ""},
    +		{"RIPEMD160", Const, 0, ""},
    +		{"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"},
    +		{"SHA1", Const, 0, ""},
    +		{"SHA224", Const, 0, ""},
    +		{"SHA256", Const, 0, ""},
    +		{"SHA384", Const, 0, ""},
    +		{"SHA3_224", Const, 4, ""},
    +		{"SHA3_256", Const, 4, ""},
    +		{"SHA3_384", Const, 4, ""},
    +		{"SHA3_512", Const, 4, ""},
    +		{"SHA512", Const, 0, ""},
    +		{"SHA512_224", Const, 5, ""},
    +		{"SHA512_256", Const, 5, ""},
    +		{"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
    +		{"Signer", Type, 4, ""},
    +		{"SignerOpts", Type, 4, ""},
     	},
     	"crypto/aes": {
    -		{"(KeySizeError).Error", Method, 0},
    -		{"BlockSize", Const, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"BlockSize", Const, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
     	},
     	"crypto/cipher": {
    -		{"(StreamReader).Read", Method, 0},
    -		{"(StreamWriter).Close", Method, 0},
    -		{"(StreamWriter).Write", Method, 0},
    -		{"AEAD", Type, 2},
    -		{"Block", Type, 0},
    -		{"BlockMode", Type, 0},
    -		{"NewCBCDecrypter", Func, 0},
    -		{"NewCBCEncrypter", Func, 0},
    -		{"NewCFBDecrypter", Func, 0},
    -		{"NewCFBEncrypter", Func, 0},
    -		{"NewCTR", Func, 0},
    -		{"NewGCM", Func, 2},
    -		{"NewGCMWithNonceSize", Func, 5},
    -		{"NewGCMWithRandomNonce", Func, 24},
    -		{"NewGCMWithTagSize", Func, 11},
    -		{"NewOFB", Func, 0},
    -		{"Stream", Type, 0},
    -		{"StreamReader", Type, 0},
    -		{"StreamReader.R", Field, 0},
    -		{"StreamReader.S", Field, 0},
    -		{"StreamWriter", Type, 0},
    -		{"StreamWriter.Err", Field, 0},
    -		{"StreamWriter.S", Field, 0},
    -		{"StreamWriter.W", Field, 0},
    +		{"(StreamReader).Read", Method, 0, ""},
    +		{"(StreamWriter).Close", Method, 0, ""},
    +		{"(StreamWriter).Write", Method, 0, ""},
    +		{"AEAD", Type, 2, ""},
    +		{"Block", Type, 0, ""},
    +		{"BlockMode", Type, 0, ""},
    +		{"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
    +		{"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
    +		{"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"},
    +		{"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"},
    +		{"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"},
    +		{"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"},
    +		{"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"},
    +		{"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"},
    +		{"Stream", Type, 0, ""},
    +		{"StreamReader", Type, 0, ""},
    +		{"StreamReader.R", Field, 0, ""},
    +		{"StreamReader.S", Field, 0, ""},
    +		{"StreamWriter", Type, 0, ""},
    +		{"StreamWriter.Err", Field, 0, ""},
    +		{"StreamWriter.S", Field, 0, ""},
    +		{"StreamWriter.W", Field, 0, ""},
     	},
     	"crypto/des": {
    -		{"(KeySizeError).Error", Method, 0},
    -		{"BlockSize", Const, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    -		{"NewTripleDESCipher", Func, 0},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"BlockSize", Const, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
    +		{"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
     	},
     	"crypto/dsa": {
    -		{"ErrInvalidPublicKey", Var, 0},
    -		{"GenerateKey", Func, 0},
    -		{"GenerateParameters", Func, 0},
    -		{"L1024N160", Const, 0},
    -		{"L2048N224", Const, 0},
    -		{"L2048N256", Const, 0},
    -		{"L3072N256", Const, 0},
    -		{"ParameterSizes", Type, 0},
    -		{"Parameters", Type, 0},
    -		{"Parameters.G", Field, 0},
    -		{"Parameters.P", Field, 0},
    -		{"Parameters.Q", Field, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PrivateKey.X", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.Parameters", Field, 0},
    -		{"PublicKey.Y", Field, 0},
    -		{"Sign", Func, 0},
    -		{"Verify", Func, 0},
    +		{"ErrInvalidPublicKey", Var, 0, ""},
    +		{"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"},
    +		{"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"},
    +		{"L1024N160", Const, 0, ""},
    +		{"L2048N224", Const, 0, ""},
    +		{"L2048N256", Const, 0, ""},
    +		{"L3072N256", Const, 0, ""},
    +		{"ParameterSizes", Type, 0, ""},
    +		{"Parameters", Type, 0, ""},
    +		{"Parameters.G", Field, 0, ""},
    +		{"Parameters.P", Field, 0, ""},
    +		{"Parameters.Q", Field, 0, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PrivateKey.X", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.Parameters", Field, 0, ""},
    +		{"PublicKey.Y", Field, 0, ""},
    +		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
    +		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
     	},
     	"crypto/ecdh": {
    -		{"(*PrivateKey).Bytes", Method, 20},
    -		{"(*PrivateKey).Curve", Method, 20},
    -		{"(*PrivateKey).ECDH", Method, 20},
    -		{"(*PrivateKey).Equal", Method, 20},
    -		{"(*PrivateKey).Public", Method, 20},
    -		{"(*PrivateKey).PublicKey", Method, 20},
    -		{"(*PublicKey).Bytes", Method, 20},
    -		{"(*PublicKey).Curve", Method, 20},
    -		{"(*PublicKey).Equal", Method, 20},
    -		{"Curve", Type, 20},
    -		{"P256", Func, 20},
    -		{"P384", Func, 20},
    -		{"P521", Func, 20},
    -		{"PrivateKey", Type, 20},
    -		{"PublicKey", Type, 20},
    -		{"X25519", Func, 20},
    +		{"(*PrivateKey).Bytes", Method, 20, ""},
    +		{"(*PrivateKey).Curve", Method, 20, ""},
    +		{"(*PrivateKey).ECDH", Method, 20, ""},
    +		{"(*PrivateKey).Equal", Method, 20, ""},
    +		{"(*PrivateKey).Public", Method, 20, ""},
    +		{"(*PrivateKey).PublicKey", Method, 20, ""},
    +		{"(*PublicKey).Bytes", Method, 20, ""},
    +		{"(*PublicKey).Curve", Method, 20, ""},
    +		{"(*PublicKey).Equal", Method, 20, ""},
    +		{"Curve", Type, 20, ""},
    +		{"P256", Func, 20, "func() Curve"},
    +		{"P384", Func, 20, "func() Curve"},
    +		{"P521", Func, 20, "func() Curve"},
    +		{"PrivateKey", Type, 20, ""},
    +		{"PublicKey", Type, 20, ""},
    +		{"X25519", Func, 20, "func() Curve"},
     	},
     	"crypto/ecdsa": {
    -		{"(*PrivateKey).ECDH", Method, 20},
    -		{"(*PrivateKey).Equal", Method, 15},
    -		{"(*PrivateKey).Public", Method, 4},
    -		{"(*PrivateKey).Sign", Method, 4},
    -		{"(*PublicKey).ECDH", Method, 20},
    -		{"(*PublicKey).Equal", Method, 15},
    -		{"(PrivateKey).Add", Method, 0},
    -		{"(PrivateKey).Double", Method, 0},
    -		{"(PrivateKey).IsOnCurve", Method, 0},
    -		{"(PrivateKey).Params", Method, 0},
    -		{"(PrivateKey).ScalarBaseMult", Method, 0},
    -		{"(PrivateKey).ScalarMult", Method, 0},
    -		{"(PublicKey).Add", Method, 0},
    -		{"(PublicKey).Double", Method, 0},
    -		{"(PublicKey).IsOnCurve", Method, 0},
    -		{"(PublicKey).Params", Method, 0},
    -		{"(PublicKey).ScalarBaseMult", Method, 0},
    -		{"(PublicKey).ScalarMult", Method, 0},
    -		{"GenerateKey", Func, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.D", Field, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.Curve", Field, 0},
    -		{"PublicKey.X", Field, 0},
    -		{"PublicKey.Y", Field, 0},
    -		{"Sign", Func, 0},
    -		{"SignASN1", Func, 15},
    -		{"Verify", Func, 0},
    -		{"VerifyASN1", Func, 15},
    +		{"(*PrivateKey).Bytes", Method, 25, ""},
    +		{"(*PrivateKey).ECDH", Method, 20, ""},
    +		{"(*PrivateKey).Equal", Method, 15, ""},
    +		{"(*PrivateKey).Public", Method, 4, ""},
    +		{"(*PrivateKey).Sign", Method, 4, ""},
    +		{"(*PublicKey).Bytes", Method, 25, ""},
    +		{"(*PublicKey).ECDH", Method, 20, ""},
    +		{"(*PublicKey).Equal", Method, 15, ""},
    +		{"(PrivateKey).Add", Method, 0, ""},
    +		{"(PrivateKey).Double", Method, 0, ""},
    +		{"(PrivateKey).IsOnCurve", Method, 0, ""},
    +		{"(PrivateKey).Params", Method, 0, ""},
    +		{"(PrivateKey).ScalarBaseMult", Method, 0, ""},
    +		{"(PrivateKey).ScalarMult", Method, 0, ""},
    +		{"(PublicKey).Add", Method, 0, ""},
    +		{"(PublicKey).Double", Method, 0, ""},
    +		{"(PublicKey).IsOnCurve", Method, 0, ""},
    +		{"(PublicKey).Params", Method, 0, ""},
    +		{"(PublicKey).ScalarBaseMult", Method, 0, ""},
    +		{"(PublicKey).ScalarMult", Method, 0, ""},
    +		{"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
    +		{"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
    +		{"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.D", Field, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.Curve", Field, 0, ""},
    +		{"PublicKey.X", Field, 0, ""},
    +		{"PublicKey.Y", Field, 0, ""},
    +		{"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
    +		{"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"},
    +		{"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
    +		{"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"},
     	},
     	"crypto/ed25519": {
    -		{"(*Options).HashFunc", Method, 20},
    -		{"(PrivateKey).Equal", Method, 15},
    -		{"(PrivateKey).Public", Method, 13},
    -		{"(PrivateKey).Seed", Method, 13},
    -		{"(PrivateKey).Sign", Method, 13},
    -		{"(PublicKey).Equal", Method, 15},
    -		{"GenerateKey", Func, 13},
    -		{"NewKeyFromSeed", Func, 13},
    -		{"Options", Type, 20},
    -		{"Options.Context", Field, 20},
    -		{"Options.Hash", Field, 20},
    -		{"PrivateKey", Type, 13},
    -		{"PrivateKeySize", Const, 13},
    -		{"PublicKey", Type, 13},
    -		{"PublicKeySize", Const, 13},
    -		{"SeedSize", Const, 13},
    -		{"Sign", Func, 13},
    -		{"SignatureSize", Const, 13},
    -		{"Verify", Func, 13},
    -		{"VerifyWithOptions", Func, 20},
    +		{"(*Options).HashFunc", Method, 20, ""},
    +		{"(PrivateKey).Equal", Method, 15, ""},
    +		{"(PrivateKey).Public", Method, 13, ""},
    +		{"(PrivateKey).Seed", Method, 13, ""},
    +		{"(PrivateKey).Sign", Method, 13, ""},
    +		{"(PublicKey).Equal", Method, 15, ""},
    +		{"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"},
    +		{"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"},
    +		{"Options", Type, 20, ""},
    +		{"Options.Context", Field, 20, ""},
    +		{"Options.Hash", Field, 20, ""},
    +		{"PrivateKey", Type, 13, ""},
    +		{"PrivateKeySize", Const, 13, ""},
    +		{"PublicKey", Type, 13, ""},
    +		{"PublicKeySize", Const, 13, ""},
    +		{"SeedSize", Const, 13, ""},
    +		{"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"},
    +		{"SignatureSize", Const, 13, ""},
    +		{"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"},
    +		{"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"},
     	},
     	"crypto/elliptic": {
    -		{"(*CurveParams).Add", Method, 0},
    -		{"(*CurveParams).Double", Method, 0},
    -		{"(*CurveParams).IsOnCurve", Method, 0},
    -		{"(*CurveParams).Params", Method, 0},
    -		{"(*CurveParams).ScalarBaseMult", Method, 0},
    -		{"(*CurveParams).ScalarMult", Method, 0},
    -		{"Curve", Type, 0},
    -		{"CurveParams", Type, 0},
    -		{"CurveParams.B", Field, 0},
    -		{"CurveParams.BitSize", Field, 0},
    -		{"CurveParams.Gx", Field, 0},
    -		{"CurveParams.Gy", Field, 0},
    -		{"CurveParams.N", Field, 0},
    -		{"CurveParams.Name", Field, 5},
    -		{"CurveParams.P", Field, 0},
    -		{"GenerateKey", Func, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalCompressed", Func, 15},
    -		{"P224", Func, 0},
    -		{"P256", Func, 0},
    -		{"P384", Func, 0},
    -		{"P521", Func, 0},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalCompressed", Func, 15},
    +		{"(*CurveParams).Add", Method, 0, ""},
    +		{"(*CurveParams).Double", Method, 0, ""},
    +		{"(*CurveParams).IsOnCurve", Method, 0, ""},
    +		{"(*CurveParams).Params", Method, 0, ""},
    +		{"(*CurveParams).ScalarBaseMult", Method, 0, ""},
    +		{"(*CurveParams).ScalarMult", Method, 0, ""},
    +		{"Curve", Type, 0, ""},
    +		{"CurveParams", Type, 0, ""},
    +		{"CurveParams.B", Field, 0, ""},
    +		{"CurveParams.BitSize", Field, 0, ""},
    +		{"CurveParams.Gx", Field, 0, ""},
    +		{"CurveParams.Gy", Field, 0, ""},
    +		{"CurveParams.N", Field, 0, ""},
    +		{"CurveParams.Name", Field, 5, ""},
    +		{"CurveParams.P", Field, 0, ""},
    +		{"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"},
    +		{"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
    +		{"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
    +		{"P224", Func, 0, "func() Curve"},
    +		{"P256", Func, 0, "func() Curve"},
    +		{"P384", Func, 0, "func() Curve"},
    +		{"P521", Func, 0, "func() Curve"},
    +		{"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
    +		{"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
     	},
     	"crypto/fips140": {
    -		{"Enabled", Func, 24},
    +		{"Enabled", Func, 24, "func() bool"},
     	},
     	"crypto/hkdf": {
    -		{"Expand", Func, 24},
    -		{"Extract", Func, 24},
    -		{"Key", Func, 24},
    +		{"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
    +		{"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"},
    +		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"},
     	},
     	"crypto/hmac": {
    -		{"Equal", Func, 1},
    -		{"New", Func, 0},
    +		{"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"},
    +		{"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"},
     	},
     	"crypto/md5": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Sum", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Sum", Func, 2, "func(data []byte) [16]byte"},
     	},
     	"crypto/mlkem": {
    -		{"(*DecapsulationKey1024).Bytes", Method, 24},
    -		{"(*DecapsulationKey1024).Decapsulate", Method, 24},
    -		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
    -		{"(*DecapsulationKey768).Bytes", Method, 24},
    -		{"(*DecapsulationKey768).Decapsulate", Method, 24},
    -		{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
    -		{"(*EncapsulationKey1024).Bytes", Method, 24},
    -		{"(*EncapsulationKey1024).Encapsulate", Method, 24},
    -		{"(*EncapsulationKey768).Bytes", Method, 24},
    -		{"(*EncapsulationKey768).Encapsulate", Method, 24},
    -		{"CiphertextSize1024", Const, 24},
    -		{"CiphertextSize768", Const, 24},
    -		{"DecapsulationKey1024", Type, 24},
    -		{"DecapsulationKey768", Type, 24},
    -		{"EncapsulationKey1024", Type, 24},
    -		{"EncapsulationKey768", Type, 24},
    -		{"EncapsulationKeySize1024", Const, 24},
    -		{"EncapsulationKeySize768", Const, 24},
    -		{"GenerateKey1024", Func, 24},
    -		{"GenerateKey768", Func, 24},
    -		{"NewDecapsulationKey1024", Func, 24},
    -		{"NewDecapsulationKey768", Func, 24},
    -		{"NewEncapsulationKey1024", Func, 24},
    -		{"NewEncapsulationKey768", Func, 24},
    -		{"SeedSize", Const, 24},
    -		{"SharedKeySize", Const, 24},
    +		{"(*DecapsulationKey1024).Bytes", Method, 24, ""},
    +		{"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
    +		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
    +		{"(*DecapsulationKey768).Bytes", Method, 24, ""},
    +		{"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
    +		{"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
    +		{"(*EncapsulationKey1024).Bytes", Method, 24, ""},
    +		{"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
    +		{"(*EncapsulationKey768).Bytes", Method, 24, ""},
    +		{"(*EncapsulationKey768).Encapsulate", Method, 24, ""},
    +		{"CiphertextSize1024", Const, 24, ""},
    +		{"CiphertextSize768", Const, 24, ""},
    +		{"DecapsulationKey1024", Type, 24, ""},
    +		{"DecapsulationKey768", Type, 24, ""},
    +		{"EncapsulationKey1024", Type, 24, ""},
    +		{"EncapsulationKey768", Type, 24, ""},
    +		{"EncapsulationKeySize1024", Const, 24, ""},
    +		{"EncapsulationKeySize768", Const, 24, ""},
    +		{"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"},
    +		{"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"},
    +		{"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"},
    +		{"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"},
    +		{"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"},
    +		{"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"},
    +		{"SeedSize", Const, 24, ""},
    +		{"SharedKeySize", Const, 24, ""},
     	},
     	"crypto/pbkdf2": {
    -		{"Key", Func, 24},
    +		{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
     	},
     	"crypto/rand": {
    -		{"Int", Func, 0},
    -		{"Prime", Func, 0},
    -		{"Read", Func, 0},
    -		{"Reader", Var, 0},
    -		{"Text", Func, 24},
    +		{"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"},
    +		{"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"},
    +		{"Read", Func, 0, "func(b []byte) (n int, err error)"},
    +		{"Reader", Var, 0, ""},
    +		{"Text", Func, 24, "func() string"},
     	},
     	"crypto/rc4": {
    -		{"(*Cipher).Reset", Method, 0},
    -		{"(*Cipher).XORKeyStream", Method, 0},
    -		{"(KeySizeError).Error", Method, 0},
    -		{"Cipher", Type, 0},
    -		{"KeySizeError", Type, 0},
    -		{"NewCipher", Func, 0},
    +		{"(*Cipher).Reset", Method, 0, ""},
    +		{"(*Cipher).XORKeyStream", Method, 0, ""},
    +		{"(KeySizeError).Error", Method, 0, ""},
    +		{"Cipher", Type, 0, ""},
    +		{"KeySizeError", Type, 0, ""},
    +		{"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"},
     	},
     	"crypto/rsa": {
    -		{"(*PSSOptions).HashFunc", Method, 4},
    -		{"(*PrivateKey).Decrypt", Method, 5},
    -		{"(*PrivateKey).Equal", Method, 15},
    -		{"(*PrivateKey).Precompute", Method, 0},
    -		{"(*PrivateKey).Public", Method, 4},
    -		{"(*PrivateKey).Sign", Method, 4},
    -		{"(*PrivateKey).Size", Method, 11},
    -		{"(*PrivateKey).Validate", Method, 0},
    -		{"(*PublicKey).Equal", Method, 15},
    -		{"(*PublicKey).Size", Method, 11},
    -		{"CRTValue", Type, 0},
    -		{"CRTValue.Coeff", Field, 0},
    -		{"CRTValue.Exp", Field, 0},
    -		{"CRTValue.R", Field, 0},
    -		{"DecryptOAEP", Func, 0},
    -		{"DecryptPKCS1v15", Func, 0},
    -		{"DecryptPKCS1v15SessionKey", Func, 0},
    -		{"EncryptOAEP", Func, 0},
    -		{"EncryptPKCS1v15", Func, 0},
    -		{"ErrDecryption", Var, 0},
    -		{"ErrMessageTooLong", Var, 0},
    -		{"ErrVerification", Var, 0},
    -		{"GenerateKey", Func, 0},
    -		{"GenerateMultiPrimeKey", Func, 0},
    -		{"OAEPOptions", Type, 5},
    -		{"OAEPOptions.Hash", Field, 5},
    -		{"OAEPOptions.Label", Field, 5},
    -		{"OAEPOptions.MGFHash", Field, 20},
    -		{"PKCS1v15DecryptOptions", Type, 5},
    -		{"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5},
    -		{"PSSOptions", Type, 2},
    -		{"PSSOptions.Hash", Field, 4},
    -		{"PSSOptions.SaltLength", Field, 2},
    -		{"PSSSaltLengthAuto", Const, 2},
    -		{"PSSSaltLengthEqualsHash", Const, 2},
    -		{"PrecomputedValues", Type, 0},
    -		{"PrecomputedValues.CRTValues", Field, 0},
    -		{"PrecomputedValues.Dp", Field, 0},
    -		{"PrecomputedValues.Dq", Field, 0},
    -		{"PrecomputedValues.Qinv", Field, 0},
    -		{"PrivateKey", Type, 0},
    -		{"PrivateKey.D", Field, 0},
    -		{"PrivateKey.Precomputed", Field, 0},
    -		{"PrivateKey.Primes", Field, 0},
    -		{"PrivateKey.PublicKey", Field, 0},
    -		{"PublicKey", Type, 0},
    -		{"PublicKey.E", Field, 0},
    -		{"PublicKey.N", Field, 0},
    -		{"SignPKCS1v15", Func, 0},
    -		{"SignPSS", Func, 2},
    -		{"VerifyPKCS1v15", Func, 0},
    -		{"VerifyPSS", Func, 2},
    +		{"(*PSSOptions).HashFunc", Method, 4, ""},
    +		{"(*PrivateKey).Decrypt", Method, 5, ""},
    +		{"(*PrivateKey).Equal", Method, 15, ""},
    +		{"(*PrivateKey).Precompute", Method, 0, ""},
    +		{"(*PrivateKey).Public", Method, 4, ""},
    +		{"(*PrivateKey).Sign", Method, 4, ""},
    +		{"(*PrivateKey).Size", Method, 11, ""},
    +		{"(*PrivateKey).Validate", Method, 0, ""},
    +		{"(*PublicKey).Equal", Method, 15, ""},
    +		{"(*PublicKey).Size", Method, 11, ""},
    +		{"CRTValue", Type, 0, ""},
    +		{"CRTValue.Coeff", Field, 0, ""},
    +		{"CRTValue.Exp", Field, 0, ""},
    +		{"CRTValue.R", Field, 0, ""},
    +		{"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"},
    +		{"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
    +		{"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
    +		{"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
    +		{"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
    +		{"ErrDecryption", Var, 0, ""},
    +		{"ErrMessageTooLong", Var, 0, ""},
    +		{"ErrVerification", Var, 0, ""},
    +		{"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"},
    +		{"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"},
    +		{"OAEPOptions", Type, 5, ""},
    +		{"OAEPOptions.Hash", Field, 5, ""},
    +		{"OAEPOptions.Label", Field, 5, ""},
    +		{"OAEPOptions.MGFHash", Field, 20, ""},
    +		{"PKCS1v15DecryptOptions", Type, 5, ""},
    +		{"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""},
    +		{"PSSOptions", Type, 2, ""},
    +		{"PSSOptions.Hash", Field, 4, ""},
    +		{"PSSOptions.SaltLength", Field, 2, ""},
    +		{"PSSSaltLengthAuto", Const, 2, ""},
    +		{"PSSSaltLengthEqualsHash", Const, 2, ""},
    +		{"PrecomputedValues", Type, 0, ""},
    +		{"PrecomputedValues.CRTValues", Field, 0, ""},
    +		{"PrecomputedValues.Dp", Field, 0, ""},
    +		{"PrecomputedValues.Dq", Field, 0, ""},
    +		{"PrecomputedValues.Qinv", Field, 0, ""},
    +		{"PrivateKey", Type, 0, ""},
    +		{"PrivateKey.D", Field, 0, ""},
    +		{"PrivateKey.Precomputed", Field, 0, ""},
    +		{"PrivateKey.Primes", Field, 0, ""},
    +		{"PrivateKey.PublicKey", Field, 0, ""},
    +		{"PublicKey", Type, 0, ""},
    +		{"PublicKey.E", Field, 0, ""},
    +		{"PublicKey.N", Field, 0, ""},
    +		{"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"},
    +		{"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"},
    +		{"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"},
    +		{"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"},
     	},
     	"crypto/sha1": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Sum", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Sum", Func, 2, "func(data []byte) [20]byte"},
     	},
     	"crypto/sha256": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"New224", Func, 0},
    -		{"Size", Const, 0},
    -		{"Size224", Const, 0},
    -		{"Sum224", Func, 2},
    -		{"Sum256", Func, 2},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"New224", Func, 0, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Size224", Const, 0, ""},
    +		{"Sum224", Func, 2, "func(data []byte) [28]byte"},
    +		{"Sum256", Func, 2, "func(data []byte) [32]byte"},
     	},
     	"crypto/sha3": {
    -		{"(*SHA3).AppendBinary", Method, 24},
    -		{"(*SHA3).BlockSize", Method, 24},
    -		{"(*SHA3).MarshalBinary", Method, 24},
    -		{"(*SHA3).Reset", Method, 24},
    -		{"(*SHA3).Size", Method, 24},
    -		{"(*SHA3).Sum", Method, 24},
    -		{"(*SHA3).UnmarshalBinary", Method, 24},
    -		{"(*SHA3).Write", Method, 24},
    -		{"(*SHAKE).AppendBinary", Method, 24},
    -		{"(*SHAKE).BlockSize", Method, 24},
    -		{"(*SHAKE).MarshalBinary", Method, 24},
    -		{"(*SHAKE).Read", Method, 24},
    -		{"(*SHAKE).Reset", Method, 24},
    -		{"(*SHAKE).UnmarshalBinary", Method, 24},
    -		{"(*SHAKE).Write", Method, 24},
    -		{"New224", Func, 24},
    -		{"New256", Func, 24},
    -		{"New384", Func, 24},
    -		{"New512", Func, 24},
    -		{"NewCSHAKE128", Func, 24},
    -		{"NewCSHAKE256", Func, 24},
    -		{"NewSHAKE128", Func, 24},
    -		{"NewSHAKE256", Func, 24},
    -		{"SHA3", Type, 24},
    -		{"SHAKE", Type, 24},
    -		{"Sum224", Func, 24},
    -		{"Sum256", Func, 24},
    -		{"Sum384", Func, 24},
    -		{"Sum512", Func, 24},
    -		{"SumSHAKE128", Func, 24},
    -		{"SumSHAKE256", Func, 24},
    +		{"(*SHA3).AppendBinary", Method, 24, ""},
    +		{"(*SHA3).BlockSize", Method, 24, ""},
    +		{"(*SHA3).Clone", Method, 25, ""},
    +		{"(*SHA3).MarshalBinary", Method, 24, ""},
    +		{"(*SHA3).Reset", Method, 24, ""},
    +		{"(*SHA3).Size", Method, 24, ""},
    +		{"(*SHA3).Sum", Method, 24, ""},
    +		{"(*SHA3).UnmarshalBinary", Method, 24, ""},
    +		{"(*SHA3).Write", Method, 24, ""},
    +		{"(*SHAKE).AppendBinary", Method, 24, ""},
    +		{"(*SHAKE).BlockSize", Method, 24, ""},
    +		{"(*SHAKE).MarshalBinary", Method, 24, ""},
    +		{"(*SHAKE).Read", Method, 24, ""},
    +		{"(*SHAKE).Reset", Method, 24, ""},
    +		{"(*SHAKE).UnmarshalBinary", Method, 24, ""},
    +		{"(*SHAKE).Write", Method, 24, ""},
    +		{"New224", Func, 24, "func() *SHA3"},
    +		{"New256", Func, 24, "func() *SHA3"},
    +		{"New384", Func, 24, "func() *SHA3"},
    +		{"New512", Func, 24, "func() *SHA3"},
    +		{"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"},
    +		{"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"},
    +		{"NewSHAKE128", Func, 24, "func() *SHAKE"},
    +		{"NewSHAKE256", Func, 24, "func() *SHAKE"},
    +		{"SHA3", Type, 24, ""},
    +		{"SHAKE", Type, 24, ""},
    +		{"Sum224", Func, 24, "func(data []byte) [28]byte"},
    +		{"Sum256", Func, 24, "func(data []byte) [32]byte"},
    +		{"Sum384", Func, 24, "func(data []byte) [48]byte"},
    +		{"Sum512", Func, 24, "func(data []byte) [64]byte"},
    +		{"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"},
    +		{"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"},
     	},
     	"crypto/sha512": {
    -		{"BlockSize", Const, 0},
    -		{"New", Func, 0},
    -		{"New384", Func, 0},
    -		{"New512_224", Func, 5},
    -		{"New512_256", Func, 5},
    -		{"Size", Const, 0},
    -		{"Size224", Const, 5},
    -		{"Size256", Const, 5},
    -		{"Size384", Const, 0},
    -		{"Sum384", Func, 2},
    -		{"Sum512", Func, 2},
    -		{"Sum512_224", Func, 5},
    -		{"Sum512_256", Func, 5},
    +		{"BlockSize", Const, 0, ""},
    +		{"New", Func, 0, "func() hash.Hash"},
    +		{"New384", Func, 0, "func() hash.Hash"},
    +		{"New512_224", Func, 5, "func() hash.Hash"},
    +		{"New512_256", Func, 5, "func() hash.Hash"},
    +		{"Size", Const, 0, ""},
    +		{"Size224", Const, 5, ""},
    +		{"Size256", Const, 5, ""},
    +		{"Size384", Const, 0, ""},
    +		{"Sum384", Func, 2, "func(data []byte) [48]byte"},
    +		{"Sum512", Func, 2, "func(data []byte) [64]byte"},
    +		{"Sum512_224", Func, 5, "func(data []byte) [28]byte"},
    +		{"Sum512_256", Func, 5, "func(data []byte) [32]byte"},
     	},
     	"crypto/subtle": {
    -		{"ConstantTimeByteEq", Func, 0},
    -		{"ConstantTimeCompare", Func, 0},
    -		{"ConstantTimeCopy", Func, 0},
    -		{"ConstantTimeEq", Func, 0},
    -		{"ConstantTimeLessOrEq", Func, 2},
    -		{"ConstantTimeSelect", Func, 0},
    -		{"WithDataIndependentTiming", Func, 24},
    -		{"XORBytes", Func, 20},
    +		{"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"},
    +		{"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"},
    +		{"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"},
    +		{"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"},
    +		{"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"},
    +		{"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"},
    +		{"WithDataIndependentTiming", Func, 24, "func(f func())"},
    +		{"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"},
     	},
     	"crypto/tls": {
    -		{"(*CertificateRequestInfo).Context", Method, 17},
    -		{"(*CertificateRequestInfo).SupportsCertificate", Method, 14},
    -		{"(*CertificateVerificationError).Error", Method, 20},
    -		{"(*CertificateVerificationError).Unwrap", Method, 20},
    -		{"(*ClientHelloInfo).Context", Method, 17},
    -		{"(*ClientHelloInfo).SupportsCertificate", Method, 14},
    -		{"(*ClientSessionState).ResumptionState", Method, 21},
    -		{"(*Config).BuildNameToCertificate", Method, 0},
    -		{"(*Config).Clone", Method, 8},
    -		{"(*Config).DecryptTicket", Method, 21},
    -		{"(*Config).EncryptTicket", Method, 21},
    -		{"(*Config).SetSessionTicketKeys", Method, 5},
    -		{"(*Conn).Close", Method, 0},
    -		{"(*Conn).CloseWrite", Method, 8},
    -		{"(*Conn).ConnectionState", Method, 0},
    -		{"(*Conn).Handshake", Method, 0},
    -		{"(*Conn).HandshakeContext", Method, 17},
    -		{"(*Conn).LocalAddr", Method, 0},
    -		{"(*Conn).NetConn", Method, 18},
    -		{"(*Conn).OCSPResponse", Method, 0},
    -		{"(*Conn).Read", Method, 0},
    -		{"(*Conn).RemoteAddr", Method, 0},
    -		{"(*Conn).SetDeadline", Method, 0},
    -		{"(*Conn).SetReadDeadline", Method, 0},
    -		{"(*Conn).SetWriteDeadline", Method, 0},
    -		{"(*Conn).VerifyHostname", Method, 0},
    -		{"(*Conn).Write", Method, 0},
    -		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
    -		{"(*Dialer).Dial", Method, 15},
    -		{"(*Dialer).DialContext", Method, 15},
    -		{"(*ECHRejectionError).Error", Method, 23},
    -		{"(*QUICConn).Close", Method, 21},
    -		{"(*QUICConn).ConnectionState", Method, 21},
    -		{"(*QUICConn).HandleData", Method, 21},
    -		{"(*QUICConn).NextEvent", Method, 21},
    -		{"(*QUICConn).SendSessionTicket", Method, 21},
    -		{"(*QUICConn).SetTransportParameters", Method, 21},
    -		{"(*QUICConn).Start", Method, 21},
    -		{"(*QUICConn).StoreSession", Method, 23},
    -		{"(*SessionState).Bytes", Method, 21},
    -		{"(AlertError).Error", Method, 21},
    -		{"(ClientAuthType).String", Method, 15},
    -		{"(CurveID).String", Method, 15},
    -		{"(QUICEncryptionLevel).String", Method, 21},
    -		{"(RecordHeaderError).Error", Method, 6},
    -		{"(SignatureScheme).String", Method, 15},
    -		{"AlertError", Type, 21},
    -		{"Certificate", Type, 0},
    -		{"Certificate.Certificate", Field, 0},
    -		{"Certificate.Leaf", Field, 0},
    -		{"Certificate.OCSPStaple", Field, 0},
    -		{"Certificate.PrivateKey", Field, 0},
    -		{"Certificate.SignedCertificateTimestamps", Field, 5},
    -		{"Certificate.SupportedSignatureAlgorithms", Field, 14},
    -		{"CertificateRequestInfo", Type, 8},
    -		{"CertificateRequestInfo.AcceptableCAs", Field, 8},
    -		{"CertificateRequestInfo.SignatureSchemes", Field, 8},
    -		{"CertificateRequestInfo.Version", Field, 14},
    -		{"CertificateVerificationError", Type, 20},
    -		{"CertificateVerificationError.Err", Field, 20},
    -		{"CertificateVerificationError.UnverifiedCertificates", Field, 20},
    -		{"CipherSuite", Type, 14},
    -		{"CipherSuite.ID", Field, 14},
    -		{"CipherSuite.Insecure", Field, 14},
    -		{"CipherSuite.Name", Field, 14},
    -		{"CipherSuite.SupportedVersions", Field, 14},
    -		{"CipherSuiteName", Func, 14},
    -		{"CipherSuites", Func, 14},
    -		{"Client", Func, 0},
    -		{"ClientAuthType", Type, 0},
    -		{"ClientHelloInfo", Type, 4},
    -		{"ClientHelloInfo.CipherSuites", Field, 4},
    -		{"ClientHelloInfo.Conn", Field, 8},
    -		{"ClientHelloInfo.Extensions", Field, 24},
    -		{"ClientHelloInfo.ServerName", Field, 4},
    -		{"ClientHelloInfo.SignatureSchemes", Field, 8},
    -		{"ClientHelloInfo.SupportedCurves", Field, 4},
    -		{"ClientHelloInfo.SupportedPoints", Field, 4},
    -		{"ClientHelloInfo.SupportedProtos", Field, 8},
    -		{"ClientHelloInfo.SupportedVersions", Field, 8},
    -		{"ClientSessionCache", Type, 3},
    -		{"ClientSessionState", Type, 3},
    -		{"Config", Type, 0},
    -		{"Config.Certificates", Field, 0},
    -		{"Config.CipherSuites", Field, 0},
    -		{"Config.ClientAuth", Field, 0},
    -		{"Config.ClientCAs", Field, 0},
    -		{"Config.ClientSessionCache", Field, 3},
    -		{"Config.CurvePreferences", Field, 3},
    -		{"Config.DynamicRecordSizingDisabled", Field, 7},
    -		{"Config.EncryptedClientHelloConfigList", Field, 23},
    -		{"Config.EncryptedClientHelloKeys", Field, 24},
    -		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
    -		{"Config.GetCertificate", Field, 4},
    -		{"Config.GetClientCertificate", Field, 8},
    -		{"Config.GetConfigForClient", Field, 8},
    -		{"Config.InsecureSkipVerify", Field, 0},
    -		{"Config.KeyLogWriter", Field, 8},
    -		{"Config.MaxVersion", Field, 2},
    -		{"Config.MinVersion", Field, 2},
    -		{"Config.NameToCertificate", Field, 0},
    -		{"Config.NextProtos", Field, 0},
    -		{"Config.PreferServerCipherSuites", Field, 1},
    -		{"Config.Rand", Field, 0},
    -		{"Config.Renegotiation", Field, 7},
    -		{"Config.RootCAs", Field, 0},
    -		{"Config.ServerName", Field, 0},
    -		{"Config.SessionTicketKey", Field, 1},
    -		{"Config.SessionTicketsDisabled", Field, 1},
    -		{"Config.Time", Field, 0},
    -		{"Config.UnwrapSession", Field, 21},
    -		{"Config.VerifyConnection", Field, 15},
    -		{"Config.VerifyPeerCertificate", Field, 8},
    -		{"Config.WrapSession", Field, 21},
    -		{"Conn", Type, 0},
    -		{"ConnectionState", Type, 0},
    -		{"ConnectionState.CipherSuite", Field, 0},
    -		{"ConnectionState.DidResume", Field, 1},
    -		{"ConnectionState.ECHAccepted", Field, 23},
    -		{"ConnectionState.HandshakeComplete", Field, 0},
    -		{"ConnectionState.NegotiatedProtocol", Field, 0},
    -		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
    -		{"ConnectionState.OCSPResponse", Field, 5},
    -		{"ConnectionState.PeerCertificates", Field, 0},
    -		{"ConnectionState.ServerName", Field, 0},
    -		{"ConnectionState.SignedCertificateTimestamps", Field, 5},
    -		{"ConnectionState.TLSUnique", Field, 4},
    -		{"ConnectionState.VerifiedChains", Field, 0},
    -		{"ConnectionState.Version", Field, 3},
    -		{"CurveID", Type, 3},
    -		{"CurveP256", Const, 3},
    -		{"CurveP384", Const, 3},
    -		{"CurveP521", Const, 3},
    -		{"Dial", Func, 0},
    -		{"DialWithDialer", Func, 3},
    -		{"Dialer", Type, 15},
    -		{"Dialer.Config", Field, 15},
    -		{"Dialer.NetDialer", Field, 15},
    -		{"ECDSAWithP256AndSHA256", Const, 8},
    -		{"ECDSAWithP384AndSHA384", Const, 8},
    -		{"ECDSAWithP521AndSHA512", Const, 8},
    -		{"ECDSAWithSHA1", Const, 10},
    -		{"ECHRejectionError", Type, 23},
    -		{"ECHRejectionError.RetryConfigList", Field, 23},
    -		{"Ed25519", Const, 13},
    -		{"EncryptedClientHelloKey", Type, 24},
    -		{"EncryptedClientHelloKey.Config", Field, 24},
    -		{"EncryptedClientHelloKey.PrivateKey", Field, 24},
    -		{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
    -		{"InsecureCipherSuites", Func, 14},
    -		{"Listen", Func, 0},
    -		{"LoadX509KeyPair", Func, 0},
    -		{"NewLRUClientSessionCache", Func, 3},
    -		{"NewListener", Func, 0},
    -		{"NewResumptionState", Func, 21},
    -		{"NoClientCert", Const, 0},
    -		{"PKCS1WithSHA1", Const, 8},
    -		{"PKCS1WithSHA256", Const, 8},
    -		{"PKCS1WithSHA384", Const, 8},
    -		{"PKCS1WithSHA512", Const, 8},
    -		{"PSSWithSHA256", Const, 8},
    -		{"PSSWithSHA384", Const, 8},
    -		{"PSSWithSHA512", Const, 8},
    -		{"ParseSessionState", Func, 21},
    -		{"QUICClient", Func, 21},
    -		{"QUICConfig", Type, 21},
    -		{"QUICConfig.EnableSessionEvents", Field, 23},
    -		{"QUICConfig.TLSConfig", Field, 21},
    -		{"QUICConn", Type, 21},
    -		{"QUICEncryptionLevel", Type, 21},
    -		{"QUICEncryptionLevelApplication", Const, 21},
    -		{"QUICEncryptionLevelEarly", Const, 21},
    -		{"QUICEncryptionLevelHandshake", Const, 21},
    -		{"QUICEncryptionLevelInitial", Const, 21},
    -		{"QUICEvent", Type, 21},
    -		{"QUICEvent.Data", Field, 21},
    -		{"QUICEvent.Kind", Field, 21},
    -		{"QUICEvent.Level", Field, 21},
    -		{"QUICEvent.SessionState", Field, 23},
    -		{"QUICEvent.Suite", Field, 21},
    -		{"QUICEventKind", Type, 21},
    -		{"QUICHandshakeDone", Const, 21},
    -		{"QUICNoEvent", Const, 21},
    -		{"QUICRejectedEarlyData", Const, 21},
    -		{"QUICResumeSession", Const, 23},
    -		{"QUICServer", Func, 21},
    -		{"QUICSessionTicketOptions", Type, 21},
    -		{"QUICSessionTicketOptions.EarlyData", Field, 21},
    -		{"QUICSessionTicketOptions.Extra", Field, 23},
    -		{"QUICSetReadSecret", Const, 21},
    -		{"QUICSetWriteSecret", Const, 21},
    -		{"QUICStoreSession", Const, 23},
    -		{"QUICTransportParameters", Const, 21},
    -		{"QUICTransportParametersRequired", Const, 21},
    -		{"QUICWriteData", Const, 21},
    -		{"RecordHeaderError", Type, 6},
    -		{"RecordHeaderError.Conn", Field, 12},
    -		{"RecordHeaderError.Msg", Field, 6},
    -		{"RecordHeaderError.RecordHeader", Field, 6},
    -		{"RenegotiateFreelyAsClient", Const, 7},
    -		{"RenegotiateNever", Const, 7},
    -		{"RenegotiateOnceAsClient", Const, 7},
    -		{"RenegotiationSupport", Type, 7},
    -		{"RequestClientCert", Const, 0},
    -		{"RequireAndVerifyClientCert", Const, 0},
    -		{"RequireAnyClientCert", Const, 0},
    -		{"Server", Func, 0},
    -		{"SessionState", Type, 21},
    -		{"SessionState.EarlyData", Field, 21},
    -		{"SessionState.Extra", Field, 21},
    -		{"SignatureScheme", Type, 8},
    -		{"TLS_AES_128_GCM_SHA256", Const, 12},
    -		{"TLS_AES_256_GCM_SHA384", Const, 12},
    -		{"TLS_CHACHA20_POLY1305_SHA256", Const, 12},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2},
    -		{"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5},
    -		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8},
    -		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14},
    -		{"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2},
    -		{"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2},
    -		{"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1},
    -		{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5},
    -		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8},
    -		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14},
    -		{"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0},
    -		{"TLS_FALLBACK_SCSV", Const, 4},
    -		{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0},
    -		{"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0},
    -		{"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8},
    -		{"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6},
    -		{"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1},
    -		{"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6},
    -		{"TLS_RSA_WITH_RC4_128_SHA", Const, 0},
    -		{"VerifyClientCertIfGiven", Const, 0},
    -		{"VersionName", Func, 21},
    -		{"VersionSSL30", Const, 2},
    -		{"VersionTLS10", Const, 2},
    -		{"VersionTLS11", Const, 2},
    -		{"VersionTLS12", Const, 2},
    -		{"VersionTLS13", Const, 12},
    -		{"X25519", Const, 8},
    -		{"X25519MLKEM768", Const, 24},
    -		{"X509KeyPair", Func, 0},
    +		{"(*CertificateRequestInfo).Context", Method, 17, ""},
    +		{"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""},
    +		{"(*CertificateVerificationError).Error", Method, 20, ""},
    +		{"(*CertificateVerificationError).Unwrap", Method, 20, ""},
    +		{"(*ClientHelloInfo).Context", Method, 17, ""},
    +		{"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""},
    +		{"(*ClientSessionState).ResumptionState", Method, 21, ""},
    +		{"(*Config).BuildNameToCertificate", Method, 0, ""},
    +		{"(*Config).Clone", Method, 8, ""},
    +		{"(*Config).DecryptTicket", Method, 21, ""},
    +		{"(*Config).EncryptTicket", Method, 21, ""},
    +		{"(*Config).SetSessionTicketKeys", Method, 5, ""},
    +		{"(*Conn).Close", Method, 0, ""},
    +		{"(*Conn).CloseWrite", Method, 8, ""},
    +		{"(*Conn).ConnectionState", Method, 0, ""},
    +		{"(*Conn).Handshake", Method, 0, ""},
    +		{"(*Conn).HandshakeContext", Method, 17, ""},
    +		{"(*Conn).LocalAddr", Method, 0, ""},
    +		{"(*Conn).NetConn", Method, 18, ""},
    +		{"(*Conn).OCSPResponse", Method, 0, ""},
    +		{"(*Conn).Read", Method, 0, ""},
    +		{"(*Conn).RemoteAddr", Method, 0, ""},
    +		{"(*Conn).SetDeadline", Method, 0, ""},
    +		{"(*Conn).SetReadDeadline", Method, 0, ""},
    +		{"(*Conn).SetWriteDeadline", Method, 0, ""},
    +		{"(*Conn).VerifyHostname", Method, 0, ""},
    +		{"(*Conn).Write", Method, 0, ""},
    +		{"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""},
    +		{"(*Dialer).Dial", Method, 15, ""},
    +		{"(*Dialer).DialContext", Method, 15, ""},
    +		{"(*ECHRejectionError).Error", Method, 23, ""},
    +		{"(*QUICConn).Close", Method, 21, ""},
    +		{"(*QUICConn).ConnectionState", Method, 21, ""},
    +		{"(*QUICConn).HandleData", Method, 21, ""},
    +		{"(*QUICConn).NextEvent", Method, 21, ""},
    +		{"(*QUICConn).SendSessionTicket", Method, 21, ""},
    +		{"(*QUICConn).SetTransportParameters", Method, 21, ""},
    +		{"(*QUICConn).Start", Method, 21, ""},
    +		{"(*QUICConn).StoreSession", Method, 23, ""},
    +		{"(*SessionState).Bytes", Method, 21, ""},
    +		{"(AlertError).Error", Method, 21, ""},
    +		{"(ClientAuthType).String", Method, 15, ""},
    +		{"(CurveID).String", Method, 15, ""},
    +		{"(QUICEncryptionLevel).String", Method, 21, ""},
    +		{"(RecordHeaderError).Error", Method, 6, ""},
    +		{"(SignatureScheme).String", Method, 15, ""},
    +		{"AlertError", Type, 21, ""},
    +		{"Certificate", Type, 0, ""},
    +		{"Certificate.Certificate", Field, 0, ""},
    +		{"Certificate.Leaf", Field, 0, ""},
    +		{"Certificate.OCSPStaple", Field, 0, ""},
    +		{"Certificate.PrivateKey", Field, 0, ""},
    +		{"Certificate.SignedCertificateTimestamps", Field, 5, ""},
    +		{"Certificate.SupportedSignatureAlgorithms", Field, 14, ""},
    +		{"CertificateRequestInfo", Type, 8, ""},
    +		{"CertificateRequestInfo.AcceptableCAs", Field, 8, ""},
    +		{"CertificateRequestInfo.SignatureSchemes", Field, 8, ""},
    +		{"CertificateRequestInfo.Version", Field, 14, ""},
    +		{"CertificateVerificationError", Type, 20, ""},
    +		{"CertificateVerificationError.Err", Field, 20, ""},
    +		{"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""},
    +		{"CipherSuite", Type, 14, ""},
    +		{"CipherSuite.ID", Field, 14, ""},
    +		{"CipherSuite.Insecure", Field, 14, ""},
    +		{"CipherSuite.Name", Field, 14, ""},
    +		{"CipherSuite.SupportedVersions", Field, 14, ""},
    +		{"CipherSuiteName", Func, 14, "func(id uint16) string"},
    +		{"CipherSuites", Func, 14, "func() []*CipherSuite"},
    +		{"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
    +		{"ClientAuthType", Type, 0, ""},
    +		{"ClientHelloInfo", Type, 4, ""},
    +		{"ClientHelloInfo.CipherSuites", Field, 4, ""},
    +		{"ClientHelloInfo.Conn", Field, 8, ""},
    +		{"ClientHelloInfo.Extensions", Field, 24, ""},
    +		{"ClientHelloInfo.ServerName", Field, 4, ""},
    +		{"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
    +		{"ClientHelloInfo.SupportedCurves", Field, 4, ""},
    +		{"ClientHelloInfo.SupportedPoints", Field, 4, ""},
    +		{"ClientHelloInfo.SupportedProtos", Field, 8, ""},
    +		{"ClientHelloInfo.SupportedVersions", Field, 8, ""},
    +		{"ClientSessionCache", Type, 3, ""},
    +		{"ClientSessionState", Type, 3, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.Certificates", Field, 0, ""},
    +		{"Config.CipherSuites", Field, 0, ""},
    +		{"Config.ClientAuth", Field, 0, ""},
    +		{"Config.ClientCAs", Field, 0, ""},
    +		{"Config.ClientSessionCache", Field, 3, ""},
    +		{"Config.CurvePreferences", Field, 3, ""},
    +		{"Config.DynamicRecordSizingDisabled", Field, 7, ""},
    +		{"Config.EncryptedClientHelloConfigList", Field, 23, ""},
    +		{"Config.EncryptedClientHelloKeys", Field, 24, ""},
    +		{"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""},
    +		{"Config.GetCertificate", Field, 4, ""},
    +		{"Config.GetClientCertificate", Field, 8, ""},
    +		{"Config.GetConfigForClient", Field, 8, ""},
    +		{"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
    +		{"Config.InsecureSkipVerify", Field, 0, ""},
    +		{"Config.KeyLogWriter", Field, 8, ""},
    +		{"Config.MaxVersion", Field, 2, ""},
    +		{"Config.MinVersion", Field, 2, ""},
    +		{"Config.NameToCertificate", Field, 0, ""},
    +		{"Config.NextProtos", Field, 0, ""},
    +		{"Config.PreferServerCipherSuites", Field, 1, ""},
    +		{"Config.Rand", Field, 0, ""},
    +		{"Config.Renegotiation", Field, 7, ""},
    +		{"Config.RootCAs", Field, 0, ""},
    +		{"Config.ServerName", Field, 0, ""},
    +		{"Config.SessionTicketKey", Field, 1, ""},
    +		{"Config.SessionTicketsDisabled", Field, 1, ""},
    +		{"Config.Time", Field, 0, ""},
    +		{"Config.UnwrapSession", Field, 21, ""},
    +		{"Config.VerifyConnection", Field, 15, ""},
    +		{"Config.VerifyPeerCertificate", Field, 8, ""},
    +		{"Config.WrapSession", Field, 21, ""},
    +		{"Conn", Type, 0, ""},
    +		{"ConnectionState", Type, 0, ""},
    +		{"ConnectionState.CipherSuite", Field, 0, ""},
    +		{"ConnectionState.CurveID", Field, 25, ""},
    +		{"ConnectionState.DidResume", Field, 1, ""},
    +		{"ConnectionState.ECHAccepted", Field, 23, ""},
    +		{"ConnectionState.HandshakeComplete", Field, 0, ""},
    +		{"ConnectionState.NegotiatedProtocol", Field, 0, ""},
    +		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
    +		{"ConnectionState.OCSPResponse", Field, 5, ""},
    +		{"ConnectionState.PeerCertificates", Field, 0, ""},
    +		{"ConnectionState.ServerName", Field, 0, ""},
    +		{"ConnectionState.SignedCertificateTimestamps", Field, 5, ""},
    +		{"ConnectionState.TLSUnique", Field, 4, ""},
    +		{"ConnectionState.VerifiedChains", Field, 0, ""},
    +		{"ConnectionState.Version", Field, 3, ""},
    +		{"CurveID", Type, 3, ""},
    +		{"CurveP256", Const, 3, ""},
    +		{"CurveP384", Const, 3, ""},
    +		{"CurveP521", Const, 3, ""},
    +		{"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"},
    +		{"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"},
    +		{"Dialer", Type, 15, ""},
    +		{"Dialer.Config", Field, 15, ""},
    +		{"Dialer.NetDialer", Field, 15, ""},
    +		{"ECDSAWithP256AndSHA256", Const, 8, ""},
    +		{"ECDSAWithP384AndSHA384", Const, 8, ""},
    +		{"ECDSAWithP521AndSHA512", Const, 8, ""},
    +		{"ECDSAWithSHA1", Const, 10, ""},
    +		{"ECHRejectionError", Type, 23, ""},
    +		{"ECHRejectionError.RetryConfigList", Field, 23, ""},
    +		{"Ed25519", Const, 13, ""},
    +		{"EncryptedClientHelloKey", Type, 24, ""},
    +		{"EncryptedClientHelloKey.Config", Field, 24, ""},
    +		{"EncryptedClientHelloKey.PrivateKey", Field, 24, ""},
    +		{"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""},
    +		{"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"},
    +		{"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"},
    +		{"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"},
    +		{"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"},
    +		{"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"},
    +		{"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"},
    +		{"NoClientCert", Const, 0, ""},
    +		{"PKCS1WithSHA1", Const, 8, ""},
    +		{"PKCS1WithSHA256", Const, 8, ""},
    +		{"PKCS1WithSHA384", Const, 8, ""},
    +		{"PKCS1WithSHA512", Const, 8, ""},
    +		{"PSSWithSHA256", Const, 8, ""},
    +		{"PSSWithSHA384", Const, 8, ""},
    +		{"PSSWithSHA512", Const, 8, ""},
    +		{"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"},
    +		{"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"},
    +		{"QUICConfig", Type, 21, ""},
    +		{"QUICConfig.EnableSessionEvents", Field, 23, ""},
    +		{"QUICConfig.TLSConfig", Field, 21, ""},
    +		{"QUICConn", Type, 21, ""},
    +		{"QUICEncryptionLevel", Type, 21, ""},
    +		{"QUICEncryptionLevelApplication", Const, 21, ""},
    +		{"QUICEncryptionLevelEarly", Const, 21, ""},
    +		{"QUICEncryptionLevelHandshake", Const, 21, ""},
    +		{"QUICEncryptionLevelInitial", Const, 21, ""},
    +		{"QUICEvent", Type, 21, ""},
    +		{"QUICEvent.Data", Field, 21, ""},
    +		{"QUICEvent.Kind", Field, 21, ""},
    +		{"QUICEvent.Level", Field, 21, ""},
    +		{"QUICEvent.SessionState", Field, 23, ""},
    +		{"QUICEvent.Suite", Field, 21, ""},
    +		{"QUICEventKind", Type, 21, ""},
    +		{"QUICHandshakeDone", Const, 21, ""},
    +		{"QUICNoEvent", Const, 21, ""},
    +		{"QUICRejectedEarlyData", Const, 21, ""},
    +		{"QUICResumeSession", Const, 23, ""},
    +		{"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"},
    +		{"QUICSessionTicketOptions", Type, 21, ""},
    +		{"QUICSessionTicketOptions.EarlyData", Field, 21, ""},
    +		{"QUICSessionTicketOptions.Extra", Field, 23, ""},
    +		{"QUICSetReadSecret", Const, 21, ""},
    +		{"QUICSetWriteSecret", Const, 21, ""},
    +		{"QUICStoreSession", Const, 23, ""},
    +		{"QUICTransportParameters", Const, 21, ""},
    +		{"QUICTransportParametersRequired", Const, 21, ""},
    +		{"QUICWriteData", Const, 21, ""},
    +		{"RecordHeaderError", Type, 6, ""},
    +		{"RecordHeaderError.Conn", Field, 12, ""},
    +		{"RecordHeaderError.Msg", Field, 6, ""},
    +		{"RecordHeaderError.RecordHeader", Field, 6, ""},
    +		{"RenegotiateFreelyAsClient", Const, 7, ""},
    +		{"RenegotiateNever", Const, 7, ""},
    +		{"RenegotiateOnceAsClient", Const, 7, ""},
    +		{"RenegotiationSupport", Type, 7, ""},
    +		{"RequestClientCert", Const, 0, ""},
    +		{"RequireAndVerifyClientCert", Const, 0, ""},
    +		{"RequireAnyClientCert", Const, 0, ""},
    +		{"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
    +		{"SessionState", Type, 21, ""},
    +		{"SessionState.EarlyData", Field, 21, ""},
    +		{"SessionState.Extra", Field, 21, ""},
    +		{"SignatureScheme", Type, 8, ""},
    +		{"TLS_AES_128_GCM_SHA256", Const, 12, ""},
    +		{"TLS_AES_256_GCM_SHA384", Const, 12, ""},
    +		{"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
    +		{"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""},
    +		{"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
    +		{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
    +		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
    +		{"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
    +		{"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""},
    +		{"TLS_FALLBACK_SCSV", Const, 4, ""},
    +		{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
    +		{"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
    +		{"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
    +		{"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""},
    +		{"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
    +		{"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""},
    +		{"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""},
    +		{"VerifyClientCertIfGiven", Const, 0, ""},
    +		{"VersionName", Func, 21, "func(version uint16) string"},
    +		{"VersionSSL30", Const, 2, ""},
    +		{"VersionTLS10", Const, 2, ""},
    +		{"VersionTLS11", Const, 2, ""},
    +		{"VersionTLS12", Const, 2, ""},
    +		{"VersionTLS13", Const, 12, ""},
    +		{"X25519", Const, 8, ""},
    +		{"X25519MLKEM768", Const, 24, ""},
    +		{"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"},
     	},
     	"crypto/x509": {
    -		{"(*CertPool).AddCert", Method, 0},
    -		{"(*CertPool).AddCertWithConstraint", Method, 22},
    -		{"(*CertPool).AppendCertsFromPEM", Method, 0},
    -		{"(*CertPool).Clone", Method, 19},
    -		{"(*CertPool).Equal", Method, 19},
    -		{"(*CertPool).Subjects", Method, 0},
    -		{"(*Certificate).CheckCRLSignature", Method, 0},
    -		{"(*Certificate).CheckSignature", Method, 0},
    -		{"(*Certificate).CheckSignatureFrom", Method, 0},
    -		{"(*Certificate).CreateCRL", Method, 0},
    -		{"(*Certificate).Equal", Method, 0},
    -		{"(*Certificate).Verify", Method, 0},
    -		{"(*Certificate).VerifyHostname", Method, 0},
    -		{"(*CertificateRequest).CheckSignature", Method, 5},
    -		{"(*OID).UnmarshalBinary", Method, 23},
    -		{"(*OID).UnmarshalText", Method, 23},
    -		{"(*RevocationList).CheckSignatureFrom", Method, 19},
    -		{"(CertificateInvalidError).Error", Method, 0},
    -		{"(ConstraintViolationError).Error", Method, 0},
    -		{"(HostnameError).Error", Method, 0},
    -		{"(InsecureAlgorithmError).Error", Method, 6},
    -		{"(OID).AppendBinary", Method, 24},
    -		{"(OID).AppendText", Method, 24},
    -		{"(OID).Equal", Method, 22},
    -		{"(OID).EqualASN1OID", Method, 22},
    -		{"(OID).MarshalBinary", Method, 23},
    -		{"(OID).MarshalText", Method, 23},
    -		{"(OID).String", Method, 22},
    -		{"(PublicKeyAlgorithm).String", Method, 10},
    -		{"(SignatureAlgorithm).String", Method, 6},
    -		{"(SystemRootsError).Error", Method, 1},
    -		{"(SystemRootsError).Unwrap", Method, 16},
    -		{"(UnhandledCriticalExtension).Error", Method, 0},
    -		{"(UnknownAuthorityError).Error", Method, 0},
    -		{"CANotAuthorizedForExtKeyUsage", Const, 10},
    -		{"CANotAuthorizedForThisName", Const, 0},
    -		{"CertPool", Type, 0},
    -		{"Certificate", Type, 0},
    -		{"Certificate.AuthorityKeyId", Field, 0},
    -		{"Certificate.BasicConstraintsValid", Field, 0},
    -		{"Certificate.CRLDistributionPoints", Field, 2},
    -		{"Certificate.DNSNames", Field, 0},
    -		{"Certificate.EmailAddresses", Field, 0},
    -		{"Certificate.ExcludedDNSDomains", Field, 9},
    -		{"Certificate.ExcludedEmailAddresses", Field, 10},
    -		{"Certificate.ExcludedIPRanges", Field, 10},
    -		{"Certificate.ExcludedURIDomains", Field, 10},
    -		{"Certificate.ExtKeyUsage", Field, 0},
    -		{"Certificate.Extensions", Field, 2},
    -		{"Certificate.ExtraExtensions", Field, 2},
    -		{"Certificate.IPAddresses", Field, 1},
    -		{"Certificate.InhibitAnyPolicy", Field, 24},
    -		{"Certificate.InhibitAnyPolicyZero", Field, 24},
    -		{"Certificate.InhibitPolicyMapping", Field, 24},
    -		{"Certificate.InhibitPolicyMappingZero", Field, 24},
    -		{"Certificate.IsCA", Field, 0},
    -		{"Certificate.Issuer", Field, 0},
    -		{"Certificate.IssuingCertificateURL", Field, 2},
    -		{"Certificate.KeyUsage", Field, 0},
    -		{"Certificate.MaxPathLen", Field, 0},
    -		{"Certificate.MaxPathLenZero", Field, 4},
    -		{"Certificate.NotAfter", Field, 0},
    -		{"Certificate.NotBefore", Field, 0},
    -		{"Certificate.OCSPServer", Field, 2},
    -		{"Certificate.PermittedDNSDomains", Field, 0},
    -		{"Certificate.PermittedDNSDomainsCritical", Field, 0},
    -		{"Certificate.PermittedEmailAddresses", Field, 10},
    -		{"Certificate.PermittedIPRanges", Field, 10},
    -		{"Certificate.PermittedURIDomains", Field, 10},
    -		{"Certificate.Policies", Field, 22},
    -		{"Certificate.PolicyIdentifiers", Field, 0},
    -		{"Certificate.PolicyMappings", Field, 24},
    -		{"Certificate.PublicKey", Field, 0},
    -		{"Certificate.PublicKeyAlgorithm", Field, 0},
    -		{"Certificate.Raw", Field, 0},
    -		{"Certificate.RawIssuer", Field, 0},
    -		{"Certificate.RawSubject", Field, 0},
    -		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
    -		{"Certificate.RawTBSCertificate", Field, 0},
    -		{"Certificate.RequireExplicitPolicy", Field, 24},
    -		{"Certificate.RequireExplicitPolicyZero", Field, 24},
    -		{"Certificate.SerialNumber", Field, 0},
    -		{"Certificate.Signature", Field, 0},
    -		{"Certificate.SignatureAlgorithm", Field, 0},
    -		{"Certificate.Subject", Field, 0},
    -		{"Certificate.SubjectKeyId", Field, 0},
    -		{"Certificate.URIs", Field, 10},
    -		{"Certificate.UnhandledCriticalExtensions", Field, 5},
    -		{"Certificate.UnknownExtKeyUsage", Field, 0},
    -		{"Certificate.Version", Field, 0},
    -		{"CertificateInvalidError", Type, 0},
    -		{"CertificateInvalidError.Cert", Field, 0},
    -		{"CertificateInvalidError.Detail", Field, 10},
    -		{"CertificateInvalidError.Reason", Field, 0},
    -		{"CertificateRequest", Type, 3},
    -		{"CertificateRequest.Attributes", Field, 3},
    -		{"CertificateRequest.DNSNames", Field, 3},
    -		{"CertificateRequest.EmailAddresses", Field, 3},
    -		{"CertificateRequest.Extensions", Field, 3},
    -		{"CertificateRequest.ExtraExtensions", Field, 3},
    -		{"CertificateRequest.IPAddresses", Field, 3},
    -		{"CertificateRequest.PublicKey", Field, 3},
    -		{"CertificateRequest.PublicKeyAlgorithm", Field, 3},
    -		{"CertificateRequest.Raw", Field, 3},
    -		{"CertificateRequest.RawSubject", Field, 3},
    -		{"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3},
    -		{"CertificateRequest.RawTBSCertificateRequest", Field, 3},
    -		{"CertificateRequest.Signature", Field, 3},
    -		{"CertificateRequest.SignatureAlgorithm", Field, 3},
    -		{"CertificateRequest.Subject", Field, 3},
    -		{"CertificateRequest.URIs", Field, 10},
    -		{"CertificateRequest.Version", Field, 3},
    -		{"ConstraintViolationError", Type, 0},
    -		{"CreateCertificate", Func, 0},
    -		{"CreateCertificateRequest", Func, 3},
    -		{"CreateRevocationList", Func, 15},
    -		{"DSA", Const, 0},
    -		{"DSAWithSHA1", Const, 0},
    -		{"DSAWithSHA256", Const, 0},
    -		{"DecryptPEMBlock", Func, 1},
    -		{"ECDSA", Const, 1},
    -		{"ECDSAWithSHA1", Const, 1},
    -		{"ECDSAWithSHA256", Const, 1},
    -		{"ECDSAWithSHA384", Const, 1},
    -		{"ECDSAWithSHA512", Const, 1},
    -		{"Ed25519", Const, 13},
    -		{"EncryptPEMBlock", Func, 1},
    -		{"ErrUnsupportedAlgorithm", Var, 0},
    -		{"Expired", Const, 0},
    -		{"ExtKeyUsage", Type, 0},
    -		{"ExtKeyUsageAny", Const, 0},
    -		{"ExtKeyUsageClientAuth", Const, 0},
    -		{"ExtKeyUsageCodeSigning", Const, 0},
    -		{"ExtKeyUsageEmailProtection", Const, 0},
    -		{"ExtKeyUsageIPSECEndSystem", Const, 1},
    -		{"ExtKeyUsageIPSECTunnel", Const, 1},
    -		{"ExtKeyUsageIPSECUser", Const, 1},
    -		{"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10},
    -		{"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10},
    -		{"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1},
    -		{"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1},
    -		{"ExtKeyUsageOCSPSigning", Const, 0},
    -		{"ExtKeyUsageServerAuth", Const, 0},
    -		{"ExtKeyUsageTimeStamping", Const, 0},
    -		{"HostnameError", Type, 0},
    -		{"HostnameError.Certificate", Field, 0},
    -		{"HostnameError.Host", Field, 0},
    -		{"IncompatibleUsage", Const, 1},
    -		{"IncorrectPasswordError", Var, 1},
    -		{"InsecureAlgorithmError", Type, 6},
    -		{"InvalidReason", Type, 0},
    -		{"IsEncryptedPEMBlock", Func, 1},
    -		{"KeyUsage", Type, 0},
    -		{"KeyUsageCRLSign", Const, 0},
    -		{"KeyUsageCertSign", Const, 0},
    -		{"KeyUsageContentCommitment", Const, 0},
    -		{"KeyUsageDataEncipherment", Const, 0},
    -		{"KeyUsageDecipherOnly", Const, 0},
    -		{"KeyUsageDigitalSignature", Const, 0},
    -		{"KeyUsageEncipherOnly", Const, 0},
    -		{"KeyUsageKeyAgreement", Const, 0},
    -		{"KeyUsageKeyEncipherment", Const, 0},
    -		{"MD2WithRSA", Const, 0},
    -		{"MD5WithRSA", Const, 0},
    -		{"MarshalECPrivateKey", Func, 2},
    -		{"MarshalPKCS1PrivateKey", Func, 0},
    -		{"MarshalPKCS1PublicKey", Func, 10},
    -		{"MarshalPKCS8PrivateKey", Func, 10},
    -		{"MarshalPKIXPublicKey", Func, 0},
    -		{"NameConstraintsWithoutSANs", Const, 10},
    -		{"NameMismatch", Const, 8},
    -		{"NewCertPool", Func, 0},
    -		{"NoValidChains", Const, 24},
    -		{"NotAuthorizedToSign", Const, 0},
    -		{"OID", Type, 22},
    -		{"OIDFromInts", Func, 22},
    -		{"PEMCipher", Type, 1},
    -		{"PEMCipher3DES", Const, 1},
    -		{"PEMCipherAES128", Const, 1},
    -		{"PEMCipherAES192", Const, 1},
    -		{"PEMCipherAES256", Const, 1},
    -		{"PEMCipherDES", Const, 1},
    -		{"ParseCRL", Func, 0},
    -		{"ParseCertificate", Func, 0},
    -		{"ParseCertificateRequest", Func, 3},
    -		{"ParseCertificates", Func, 0},
    -		{"ParseDERCRL", Func, 0},
    -		{"ParseECPrivateKey", Func, 1},
    -		{"ParseOID", Func, 23},
    -		{"ParsePKCS1PrivateKey", Func, 0},
    -		{"ParsePKCS1PublicKey", Func, 10},
    -		{"ParsePKCS8PrivateKey", Func, 0},
    -		{"ParsePKIXPublicKey", Func, 0},
    -		{"ParseRevocationList", Func, 19},
    -		{"PolicyMapping", Type, 24},
    -		{"PolicyMapping.IssuerDomainPolicy", Field, 24},
    -		{"PolicyMapping.SubjectDomainPolicy", Field, 24},
    -		{"PublicKeyAlgorithm", Type, 0},
    -		{"PureEd25519", Const, 13},
    -		{"RSA", Const, 0},
    -		{"RevocationList", Type, 15},
    -		{"RevocationList.AuthorityKeyId", Field, 19},
    -		{"RevocationList.Extensions", Field, 19},
    -		{"RevocationList.ExtraExtensions", Field, 15},
    -		{"RevocationList.Issuer", Field, 19},
    -		{"RevocationList.NextUpdate", Field, 15},
    -		{"RevocationList.Number", Field, 15},
    -		{"RevocationList.Raw", Field, 19},
    -		{"RevocationList.RawIssuer", Field, 19},
    -		{"RevocationList.RawTBSRevocationList", Field, 19},
    -		{"RevocationList.RevokedCertificateEntries", Field, 21},
    -		{"RevocationList.RevokedCertificates", Field, 15},
    -		{"RevocationList.Signature", Field, 19},
    -		{"RevocationList.SignatureAlgorithm", Field, 15},
    -		{"RevocationList.ThisUpdate", Field, 15},
    -		{"RevocationListEntry", Type, 21},
    -		{"RevocationListEntry.Extensions", Field, 21},
    -		{"RevocationListEntry.ExtraExtensions", Field, 21},
    -		{"RevocationListEntry.Raw", Field, 21},
    -		{"RevocationListEntry.ReasonCode", Field, 21},
    -		{"RevocationListEntry.RevocationTime", Field, 21},
    -		{"RevocationListEntry.SerialNumber", Field, 21},
    -		{"SHA1WithRSA", Const, 0},
    -		{"SHA256WithRSA", Const, 0},
    -		{"SHA256WithRSAPSS", Const, 8},
    -		{"SHA384WithRSA", Const, 0},
    -		{"SHA384WithRSAPSS", Const, 8},
    -		{"SHA512WithRSA", Const, 0},
    -		{"SHA512WithRSAPSS", Const, 8},
    -		{"SetFallbackRoots", Func, 20},
    -		{"SignatureAlgorithm", Type, 0},
    -		{"SystemCertPool", Func, 7},
    -		{"SystemRootsError", Type, 1},
    -		{"SystemRootsError.Err", Field, 7},
    -		{"TooManyConstraints", Const, 10},
    -		{"TooManyIntermediates", Const, 0},
    -		{"UnconstrainedName", Const, 10},
    -		{"UnhandledCriticalExtension", Type, 0},
    -		{"UnknownAuthorityError", Type, 0},
    -		{"UnknownAuthorityError.Cert", Field, 8},
    -		{"UnknownPublicKeyAlgorithm", Const, 0},
    -		{"UnknownSignatureAlgorithm", Const, 0},
    -		{"VerifyOptions", Type, 0},
    -		{"VerifyOptions.CertificatePolicies", Field, 24},
    -		{"VerifyOptions.CurrentTime", Field, 0},
    -		{"VerifyOptions.DNSName", Field, 0},
    -		{"VerifyOptions.Intermediates", Field, 0},
    -		{"VerifyOptions.KeyUsages", Field, 1},
    -		{"VerifyOptions.MaxConstraintComparisions", Field, 10},
    -		{"VerifyOptions.Roots", Field, 0},
    +		{"(*CertPool).AddCert", Method, 0, ""},
    +		{"(*CertPool).AddCertWithConstraint", Method, 22, ""},
    +		{"(*CertPool).AppendCertsFromPEM", Method, 0, ""},
    +		{"(*CertPool).Clone", Method, 19, ""},
    +		{"(*CertPool).Equal", Method, 19, ""},
    +		{"(*CertPool).Subjects", Method, 0, ""},
    +		{"(*Certificate).CheckCRLSignature", Method, 0, ""},
    +		{"(*Certificate).CheckSignature", Method, 0, ""},
    +		{"(*Certificate).CheckSignatureFrom", Method, 0, ""},
    +		{"(*Certificate).CreateCRL", Method, 0, ""},
    +		{"(*Certificate).Equal", Method, 0, ""},
    +		{"(*Certificate).Verify", Method, 0, ""},
    +		{"(*Certificate).VerifyHostname", Method, 0, ""},
    +		{"(*CertificateRequest).CheckSignature", Method, 5, ""},
    +		{"(*OID).UnmarshalBinary", Method, 23, ""},
    +		{"(*OID).UnmarshalText", Method, 23, ""},
    +		{"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
    +		{"(CertificateInvalidError).Error", Method, 0, ""},
    +		{"(ConstraintViolationError).Error", Method, 0, ""},
    +		{"(HostnameError).Error", Method, 0, ""},
    +		{"(InsecureAlgorithmError).Error", Method, 6, ""},
    +		{"(OID).AppendBinary", Method, 24, ""},
    +		{"(OID).AppendText", Method, 24, ""},
    +		{"(OID).Equal", Method, 22, ""},
    +		{"(OID).EqualASN1OID", Method, 22, ""},
    +		{"(OID).MarshalBinary", Method, 23, ""},
    +		{"(OID).MarshalText", Method, 23, ""},
    +		{"(OID).String", Method, 22, ""},
    +		{"(PublicKeyAlgorithm).String", Method, 10, ""},
    +		{"(SignatureAlgorithm).String", Method, 6, ""},
    +		{"(SystemRootsError).Error", Method, 1, ""},
    +		{"(SystemRootsError).Unwrap", Method, 16, ""},
    +		{"(UnhandledCriticalExtension).Error", Method, 0, ""},
    +		{"(UnknownAuthorityError).Error", Method, 0, ""},
    +		{"CANotAuthorizedForExtKeyUsage", Const, 10, ""},
    +		{"CANotAuthorizedForThisName", Const, 0, ""},
    +		{"CertPool", Type, 0, ""},
    +		{"Certificate", Type, 0, ""},
    +		{"Certificate.AuthorityKeyId", Field, 0, ""},
    +		{"Certificate.BasicConstraintsValid", Field, 0, ""},
    +		{"Certificate.CRLDistributionPoints", Field, 2, ""},
    +		{"Certificate.DNSNames", Field, 0, ""},
    +		{"Certificate.EmailAddresses", Field, 0, ""},
    +		{"Certificate.ExcludedDNSDomains", Field, 9, ""},
    +		{"Certificate.ExcludedEmailAddresses", Field, 10, ""},
    +		{"Certificate.ExcludedIPRanges", Field, 10, ""},
    +		{"Certificate.ExcludedURIDomains", Field, 10, ""},
    +		{"Certificate.ExtKeyUsage", Field, 0, ""},
    +		{"Certificate.Extensions", Field, 2, ""},
    +		{"Certificate.ExtraExtensions", Field, 2, ""},
    +		{"Certificate.IPAddresses", Field, 1, ""},
    +		{"Certificate.InhibitAnyPolicy", Field, 24, ""},
    +		{"Certificate.InhibitAnyPolicyZero", Field, 24, ""},
    +		{"Certificate.InhibitPolicyMapping", Field, 24, ""},
    +		{"Certificate.InhibitPolicyMappingZero", Field, 24, ""},
    +		{"Certificate.IsCA", Field, 0, ""},
    +		{"Certificate.Issuer", Field, 0, ""},
    +		{"Certificate.IssuingCertificateURL", Field, 2, ""},
    +		{"Certificate.KeyUsage", Field, 0, ""},
    +		{"Certificate.MaxPathLen", Field, 0, ""},
    +		{"Certificate.MaxPathLenZero", Field, 4, ""},
    +		{"Certificate.NotAfter", Field, 0, ""},
    +		{"Certificate.NotBefore", Field, 0, ""},
    +		{"Certificate.OCSPServer", Field, 2, ""},
    +		{"Certificate.PermittedDNSDomains", Field, 0, ""},
    +		{"Certificate.PermittedDNSDomainsCritical", Field, 0, ""},
    +		{"Certificate.PermittedEmailAddresses", Field, 10, ""},
    +		{"Certificate.PermittedIPRanges", Field, 10, ""},
    +		{"Certificate.PermittedURIDomains", Field, 10, ""},
    +		{"Certificate.Policies", Field, 22, ""},
    +		{"Certificate.PolicyIdentifiers", Field, 0, ""},
    +		{"Certificate.PolicyMappings", Field, 24, ""},
    +		{"Certificate.PublicKey", Field, 0, ""},
    +		{"Certificate.PublicKeyAlgorithm", Field, 0, ""},
    +		{"Certificate.Raw", Field, 0, ""},
    +		{"Certificate.RawIssuer", Field, 0, ""},
    +		{"Certificate.RawSubject", Field, 0, ""},
    +		{"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""},
    +		{"Certificate.RawTBSCertificate", Field, 0, ""},
    +		{"Certificate.RequireExplicitPolicy", Field, 24, ""},
    +		{"Certificate.RequireExplicitPolicyZero", Field, 24, ""},
    +		{"Certificate.SerialNumber", Field, 0, ""},
    +		{"Certificate.Signature", Field, 0, ""},
    +		{"Certificate.SignatureAlgorithm", Field, 0, ""},
    +		{"Certificate.Subject", Field, 0, ""},
    +		{"Certificate.SubjectKeyId", Field, 0, ""},
    +		{"Certificate.URIs", Field, 10, ""},
    +		{"Certificate.UnhandledCriticalExtensions", Field, 5, ""},
    +		{"Certificate.UnknownExtKeyUsage", Field, 0, ""},
    +		{"Certificate.Version", Field, 0, ""},
    +		{"CertificateInvalidError", Type, 0, ""},
    +		{"CertificateInvalidError.Cert", Field, 0, ""},
    +		{"CertificateInvalidError.Detail", Field, 10, ""},
    +		{"CertificateInvalidError.Reason", Field, 0, ""},
    +		{"CertificateRequest", Type, 3, ""},
    +		{"CertificateRequest.Attributes", Field, 3, ""},
    +		{"CertificateRequest.DNSNames", Field, 3, ""},
    +		{"CertificateRequest.EmailAddresses", Field, 3, ""},
    +		{"CertificateRequest.Extensions", Field, 3, ""},
    +		{"CertificateRequest.ExtraExtensions", Field, 3, ""},
    +		{"CertificateRequest.IPAddresses", Field, 3, ""},
    +		{"CertificateRequest.PublicKey", Field, 3, ""},
    +		{"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""},
    +		{"CertificateRequest.Raw", Field, 3, ""},
    +		{"CertificateRequest.RawSubject", Field, 3, ""},
    +		{"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""},
    +		{"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""},
    +		{"CertificateRequest.Signature", Field, 3, ""},
    +		{"CertificateRequest.SignatureAlgorithm", Field, 3, ""},
    +		{"CertificateRequest.Subject", Field, 3, ""},
    +		{"CertificateRequest.URIs", Field, 10, ""},
    +		{"CertificateRequest.Version", Field, 3, ""},
    +		{"ConstraintViolationError", Type, 0, ""},
    +		{"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"},
    +		{"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"},
    +		{"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"},
    +		{"DSA", Const, 0, ""},
    +		{"DSAWithSHA1", Const, 0, ""},
    +		{"DSAWithSHA256", Const, 0, ""},
    +		{"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"},
    +		{"ECDSA", Const, 1, ""},
    +		{"ECDSAWithSHA1", Const, 1, ""},
    +		{"ECDSAWithSHA256", Const, 1, ""},
    +		{"ECDSAWithSHA384", Const, 1, ""},
    +		{"ECDSAWithSHA512", Const, 1, ""},
    +		{"Ed25519", Const, 13, ""},
    +		{"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"},
    +		{"ErrUnsupportedAlgorithm", Var, 0, ""},
    +		{"Expired", Const, 0, ""},
    +		{"ExtKeyUsage", Type, 0, ""},
    +		{"ExtKeyUsageAny", Const, 0, ""},
    +		{"ExtKeyUsageClientAuth", Const, 0, ""},
    +		{"ExtKeyUsageCodeSigning", Const, 0, ""},
    +		{"ExtKeyUsageEmailProtection", Const, 0, ""},
    +		{"ExtKeyUsageIPSECEndSystem", Const, 1, ""},
    +		{"ExtKeyUsageIPSECTunnel", Const, 1, ""},
    +		{"ExtKeyUsageIPSECUser", Const, 1, ""},
    +		{"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""},
    +		{"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""},
    +		{"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""},
    +		{"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""},
    +		{"ExtKeyUsageOCSPSigning", Const, 0, ""},
    +		{"ExtKeyUsageServerAuth", Const, 0, ""},
    +		{"ExtKeyUsageTimeStamping", Const, 0, ""},
    +		{"HostnameError", Type, 0, ""},
    +		{"HostnameError.Certificate", Field, 0, ""},
    +		{"HostnameError.Host", Field, 0, ""},
    +		{"IncompatibleUsage", Const, 1, ""},
    +		{"IncorrectPasswordError", Var, 1, ""},
    +		{"InsecureAlgorithmError", Type, 6, ""},
    +		{"InvalidReason", Type, 0, ""},
    +		{"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"},
    +		{"KeyUsage", Type, 0, ""},
    +		{"KeyUsageCRLSign", Const, 0, ""},
    +		{"KeyUsageCertSign", Const, 0, ""},
    +		{"KeyUsageContentCommitment", Const, 0, ""},
    +		{"KeyUsageDataEncipherment", Const, 0, ""},
    +		{"KeyUsageDecipherOnly", Const, 0, ""},
    +		{"KeyUsageDigitalSignature", Const, 0, ""},
    +		{"KeyUsageEncipherOnly", Const, 0, ""},
    +		{"KeyUsageKeyAgreement", Const, 0, ""},
    +		{"KeyUsageKeyEncipherment", Const, 0, ""},
    +		{"MD2WithRSA", Const, 0, ""},
    +		{"MD5WithRSA", Const, 0, ""},
    +		{"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"},
    +		{"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"},
    +		{"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"},
    +		{"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"},
    +		{"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"},
    +		{"NameConstraintsWithoutSANs", Const, 10, ""},
    +		{"NameMismatch", Const, 8, ""},
    +		{"NewCertPool", Func, 0, "func() *CertPool"},
    +		{"NoValidChains", Const, 24, ""},
    +		{"NotAuthorizedToSign", Const, 0, ""},
    +		{"OID", Type, 22, ""},
    +		{"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"},
    +		{"PEMCipher", Type, 1, ""},
    +		{"PEMCipher3DES", Const, 1, ""},
    +		{"PEMCipherAES128", Const, 1, ""},
    +		{"PEMCipherAES192", Const, 1, ""},
    +		{"PEMCipherAES256", Const, 1, ""},
    +		{"PEMCipherDES", Const, 1, ""},
    +		{"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"},
    +		{"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"},
    +		{"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"},
    +		{"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"},
    +		{"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"},
    +		{"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"},
    +		{"ParseOID", Func, 23, "func(oid string) (OID, error)"},
    +		{"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"},
    +		{"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"},
    +		{"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"},
    +		{"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"},
    +		{"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"},
    +		{"PolicyMapping", Type, 24, ""},
    +		{"PolicyMapping.IssuerDomainPolicy", Field, 24, ""},
    +		{"PolicyMapping.SubjectDomainPolicy", Field, 24, ""},
    +		{"PublicKeyAlgorithm", Type, 0, ""},
    +		{"PureEd25519", Const, 13, ""},
    +		{"RSA", Const, 0, ""},
    +		{"RevocationList", Type, 15, ""},
    +		{"RevocationList.AuthorityKeyId", Field, 19, ""},
    +		{"RevocationList.Extensions", Field, 19, ""},
    +		{"RevocationList.ExtraExtensions", Field, 15, ""},
    +		{"RevocationList.Issuer", Field, 19, ""},
    +		{"RevocationList.NextUpdate", Field, 15, ""},
    +		{"RevocationList.Number", Field, 15, ""},
    +		{"RevocationList.Raw", Field, 19, ""},
    +		{"RevocationList.RawIssuer", Field, 19, ""},
    +		{"RevocationList.RawTBSRevocationList", Field, 19, ""},
    +		{"RevocationList.RevokedCertificateEntries", Field, 21, ""},
    +		{"RevocationList.RevokedCertificates", Field, 15, ""},
    +		{"RevocationList.Signature", Field, 19, ""},
    +		{"RevocationList.SignatureAlgorithm", Field, 15, ""},
    +		{"RevocationList.ThisUpdate", Field, 15, ""},
    +		{"RevocationListEntry", Type, 21, ""},
    +		{"RevocationListEntry.Extensions", Field, 21, ""},
    +		{"RevocationListEntry.ExtraExtensions", Field, 21, ""},
    +		{"RevocationListEntry.Raw", Field, 21, ""},
    +		{"RevocationListEntry.ReasonCode", Field, 21, ""},
    +		{"RevocationListEntry.RevocationTime", Field, 21, ""},
    +		{"RevocationListEntry.SerialNumber", Field, 21, ""},
    +		{"SHA1WithRSA", Const, 0, ""},
    +		{"SHA256WithRSA", Const, 0, ""},
    +		{"SHA256WithRSAPSS", Const, 8, ""},
    +		{"SHA384WithRSA", Const, 0, ""},
    +		{"SHA384WithRSAPSS", Const, 8, ""},
    +		{"SHA512WithRSA", Const, 0, ""},
    +		{"SHA512WithRSAPSS", Const, 8, ""},
    +		{"SetFallbackRoots", Func, 20, "func(roots *CertPool)"},
    +		{"SignatureAlgorithm", Type, 0, ""},
    +		{"SystemCertPool", Func, 7, "func() (*CertPool, error)"},
    +		{"SystemRootsError", Type, 1, ""},
    +		{"SystemRootsError.Err", Field, 7, ""},
    +		{"TooManyConstraints", Const, 10, ""},
    +		{"TooManyIntermediates", Const, 0, ""},
    +		{"UnconstrainedName", Const, 10, ""},
    +		{"UnhandledCriticalExtension", Type, 0, ""},
    +		{"UnknownAuthorityError", Type, 0, ""},
    +		{"UnknownAuthorityError.Cert", Field, 8, ""},
    +		{"UnknownPublicKeyAlgorithm", Const, 0, ""},
    +		{"UnknownSignatureAlgorithm", Const, 0, ""},
    +		{"VerifyOptions", Type, 0, ""},
    +		{"VerifyOptions.CertificatePolicies", Field, 24, ""},
    +		{"VerifyOptions.CurrentTime", Field, 0, ""},
    +		{"VerifyOptions.DNSName", Field, 0, ""},
    +		{"VerifyOptions.Intermediates", Field, 0, ""},
    +		{"VerifyOptions.KeyUsages", Field, 1, ""},
    +		{"VerifyOptions.MaxConstraintComparisions", Field, 10, ""},
    +		{"VerifyOptions.Roots", Field, 0, ""},
     	},
     	"crypto/x509/pkix": {
    -		{"(*CertificateList).HasExpired", Method, 0},
    -		{"(*Name).FillFromRDNSequence", Method, 0},
    -		{"(Name).String", Method, 10},
    -		{"(Name).ToRDNSequence", Method, 0},
    -		{"(RDNSequence).String", Method, 10},
    -		{"AlgorithmIdentifier", Type, 0},
    -		{"AlgorithmIdentifier.Algorithm", Field, 0},
    -		{"AlgorithmIdentifier.Parameters", Field, 0},
    -		{"AttributeTypeAndValue", Type, 0},
    -		{"AttributeTypeAndValue.Type", Field, 0},
    -		{"AttributeTypeAndValue.Value", Field, 0},
    -		{"AttributeTypeAndValueSET", Type, 3},
    -		{"AttributeTypeAndValueSET.Type", Field, 3},
    -		{"AttributeTypeAndValueSET.Value", Field, 3},
    -		{"CertificateList", Type, 0},
    -		{"CertificateList.SignatureAlgorithm", Field, 0},
    -		{"CertificateList.SignatureValue", Field, 0},
    -		{"CertificateList.TBSCertList", Field, 0},
    -		{"Extension", Type, 0},
    -		{"Extension.Critical", Field, 0},
    -		{"Extension.Id", Field, 0},
    -		{"Extension.Value", Field, 0},
    -		{"Name", Type, 0},
    -		{"Name.CommonName", Field, 0},
    -		{"Name.Country", Field, 0},
    -		{"Name.ExtraNames", Field, 5},
    -		{"Name.Locality", Field, 0},
    -		{"Name.Names", Field, 0},
    -		{"Name.Organization", Field, 0},
    -		{"Name.OrganizationalUnit", Field, 0},
    -		{"Name.PostalCode", Field, 0},
    -		{"Name.Province", Field, 0},
    -		{"Name.SerialNumber", Field, 0},
    -		{"Name.StreetAddress", Field, 0},
    -		{"RDNSequence", Type, 0},
    -		{"RelativeDistinguishedNameSET", Type, 0},
    -		{"RevokedCertificate", Type, 0},
    -		{"RevokedCertificate.Extensions", Field, 0},
    -		{"RevokedCertificate.RevocationTime", Field, 0},
    -		{"RevokedCertificate.SerialNumber", Field, 0},
    -		{"TBSCertificateList", Type, 0},
    -		{"TBSCertificateList.Extensions", Field, 0},
    -		{"TBSCertificateList.Issuer", Field, 0},
    -		{"TBSCertificateList.NextUpdate", Field, 0},
    -		{"TBSCertificateList.Raw", Field, 0},
    -		{"TBSCertificateList.RevokedCertificates", Field, 0},
    -		{"TBSCertificateList.Signature", Field, 0},
    -		{"TBSCertificateList.ThisUpdate", Field, 0},
    -		{"TBSCertificateList.Version", Field, 0},
    +		{"(*CertificateList).HasExpired", Method, 0, ""},
    +		{"(*Name).FillFromRDNSequence", Method, 0, ""},
    +		{"(Name).String", Method, 10, ""},
    +		{"(Name).ToRDNSequence", Method, 0, ""},
    +		{"(RDNSequence).String", Method, 10, ""},
    +		{"AlgorithmIdentifier", Type, 0, ""},
    +		{"AlgorithmIdentifier.Algorithm", Field, 0, ""},
    +		{"AlgorithmIdentifier.Parameters", Field, 0, ""},
    +		{"AttributeTypeAndValue", Type, 0, ""},
    +		{"AttributeTypeAndValue.Type", Field, 0, ""},
    +		{"AttributeTypeAndValue.Value", Field, 0, ""},
    +		{"AttributeTypeAndValueSET", Type, 3, ""},
    +		{"AttributeTypeAndValueSET.Type", Field, 3, ""},
    +		{"AttributeTypeAndValueSET.Value", Field, 3, ""},
    +		{"CertificateList", Type, 0, ""},
    +		{"CertificateList.SignatureAlgorithm", Field, 0, ""},
    +		{"CertificateList.SignatureValue", Field, 0, ""},
    +		{"CertificateList.TBSCertList", Field, 0, ""},
    +		{"Extension", Type, 0, ""},
    +		{"Extension.Critical", Field, 0, ""},
    +		{"Extension.Id", Field, 0, ""},
    +		{"Extension.Value", Field, 0, ""},
    +		{"Name", Type, 0, ""},
    +		{"Name.CommonName", Field, 0, ""},
    +		{"Name.Country", Field, 0, ""},
    +		{"Name.ExtraNames", Field, 5, ""},
    +		{"Name.Locality", Field, 0, ""},
    +		{"Name.Names", Field, 0, ""},
    +		{"Name.Organization", Field, 0, ""},
    +		{"Name.OrganizationalUnit", Field, 0, ""},
    +		{"Name.PostalCode", Field, 0, ""},
    +		{"Name.Province", Field, 0, ""},
    +		{"Name.SerialNumber", Field, 0, ""},
    +		{"Name.StreetAddress", Field, 0, ""},
    +		{"RDNSequence", Type, 0, ""},
    +		{"RelativeDistinguishedNameSET", Type, 0, ""},
    +		{"RevokedCertificate", Type, 0, ""},
    +		{"RevokedCertificate.Extensions", Field, 0, ""},
    +		{"RevokedCertificate.RevocationTime", Field, 0, ""},
    +		{"RevokedCertificate.SerialNumber", Field, 0, ""},
    +		{"TBSCertificateList", Type, 0, ""},
    +		{"TBSCertificateList.Extensions", Field, 0, ""},
    +		{"TBSCertificateList.Issuer", Field, 0, ""},
    +		{"TBSCertificateList.NextUpdate", Field, 0, ""},
    +		{"TBSCertificateList.Raw", Field, 0, ""},
    +		{"TBSCertificateList.RevokedCertificates", Field, 0, ""},
    +		{"TBSCertificateList.Signature", Field, 0, ""},
    +		{"TBSCertificateList.ThisUpdate", Field, 0, ""},
    +		{"TBSCertificateList.Version", Field, 0, ""},
     	},
     	"database/sql": {
    -		{"(*ColumnType).DatabaseTypeName", Method, 8},
    -		{"(*ColumnType).DecimalSize", Method, 8},
    -		{"(*ColumnType).Length", Method, 8},
    -		{"(*ColumnType).Name", Method, 8},
    -		{"(*ColumnType).Nullable", Method, 8},
    -		{"(*ColumnType).ScanType", Method, 8},
    -		{"(*Conn).BeginTx", Method, 9},
    -		{"(*Conn).Close", Method, 9},
    -		{"(*Conn).ExecContext", Method, 9},
    -		{"(*Conn).PingContext", Method, 9},
    -		{"(*Conn).PrepareContext", Method, 9},
    -		{"(*Conn).QueryContext", Method, 9},
    -		{"(*Conn).QueryRowContext", Method, 9},
    -		{"(*Conn).Raw", Method, 13},
    -		{"(*DB).Begin", Method, 0},
    -		{"(*DB).BeginTx", Method, 8},
    -		{"(*DB).Close", Method, 0},
    -		{"(*DB).Conn", Method, 9},
    -		{"(*DB).Driver", Method, 0},
    -		{"(*DB).Exec", Method, 0},
    -		{"(*DB).ExecContext", Method, 8},
    -		{"(*DB).Ping", Method, 1},
    -		{"(*DB).PingContext", Method, 8},
    -		{"(*DB).Prepare", Method, 0},
    -		{"(*DB).PrepareContext", Method, 8},
    -		{"(*DB).Query", Method, 0},
    -		{"(*DB).QueryContext", Method, 8},
    -		{"(*DB).QueryRow", Method, 0},
    -		{"(*DB).QueryRowContext", Method, 8},
    -		{"(*DB).SetConnMaxIdleTime", Method, 15},
    -		{"(*DB).SetConnMaxLifetime", Method, 6},
    -		{"(*DB).SetMaxIdleConns", Method, 1},
    -		{"(*DB).SetMaxOpenConns", Method, 2},
    -		{"(*DB).Stats", Method, 5},
    -		{"(*Null).Scan", Method, 22},
    -		{"(*NullBool).Scan", Method, 0},
    -		{"(*NullByte).Scan", Method, 17},
    -		{"(*NullFloat64).Scan", Method, 0},
    -		{"(*NullInt16).Scan", Method, 17},
    -		{"(*NullInt32).Scan", Method, 13},
    -		{"(*NullInt64).Scan", Method, 0},
    -		{"(*NullString).Scan", Method, 0},
    -		{"(*NullTime).Scan", Method, 13},
    -		{"(*Row).Err", Method, 15},
    -		{"(*Row).Scan", Method, 0},
    -		{"(*Rows).Close", Method, 0},
    -		{"(*Rows).ColumnTypes", Method, 8},
    -		{"(*Rows).Columns", Method, 0},
    -		{"(*Rows).Err", Method, 0},
    -		{"(*Rows).Next", Method, 0},
    -		{"(*Rows).NextResultSet", Method, 8},
    -		{"(*Rows).Scan", Method, 0},
    -		{"(*Stmt).Close", Method, 0},
    -		{"(*Stmt).Exec", Method, 0},
    -		{"(*Stmt).ExecContext", Method, 8},
    -		{"(*Stmt).Query", Method, 0},
    -		{"(*Stmt).QueryContext", Method, 8},
    -		{"(*Stmt).QueryRow", Method, 0},
    -		{"(*Stmt).QueryRowContext", Method, 8},
    -		{"(*Tx).Commit", Method, 0},
    -		{"(*Tx).Exec", Method, 0},
    -		{"(*Tx).ExecContext", Method, 8},
    -		{"(*Tx).Prepare", Method, 0},
    -		{"(*Tx).PrepareContext", Method, 8},
    -		{"(*Tx).Query", Method, 0},
    -		{"(*Tx).QueryContext", Method, 8},
    -		{"(*Tx).QueryRow", Method, 0},
    -		{"(*Tx).QueryRowContext", Method, 8},
    -		{"(*Tx).Rollback", Method, 0},
    -		{"(*Tx).Stmt", Method, 0},
    -		{"(*Tx).StmtContext", Method, 8},
    -		{"(IsolationLevel).String", Method, 11},
    -		{"(Null).Value", Method, 22},
    -		{"(NullBool).Value", Method, 0},
    -		{"(NullByte).Value", Method, 17},
    -		{"(NullFloat64).Value", Method, 0},
    -		{"(NullInt16).Value", Method, 17},
    -		{"(NullInt32).Value", Method, 13},
    -		{"(NullInt64).Value", Method, 0},
    -		{"(NullString).Value", Method, 0},
    -		{"(NullTime).Value", Method, 13},
    -		{"ColumnType", Type, 8},
    -		{"Conn", Type, 9},
    -		{"DB", Type, 0},
    -		{"DBStats", Type, 5},
    -		{"DBStats.Idle", Field, 11},
    -		{"DBStats.InUse", Field, 11},
    -		{"DBStats.MaxIdleClosed", Field, 11},
    -		{"DBStats.MaxIdleTimeClosed", Field, 15},
    -		{"DBStats.MaxLifetimeClosed", Field, 11},
    -		{"DBStats.MaxOpenConnections", Field, 11},
    -		{"DBStats.OpenConnections", Field, 5},
    -		{"DBStats.WaitCount", Field, 11},
    -		{"DBStats.WaitDuration", Field, 11},
    -		{"Drivers", Func, 4},
    -		{"ErrConnDone", Var, 9},
    -		{"ErrNoRows", Var, 0},
    -		{"ErrTxDone", Var, 0},
    -		{"IsolationLevel", Type, 8},
    -		{"LevelDefault", Const, 8},
    -		{"LevelLinearizable", Const, 8},
    -		{"LevelReadCommitted", Const, 8},
    -		{"LevelReadUncommitted", Const, 8},
    -		{"LevelRepeatableRead", Const, 8},
    -		{"LevelSerializable", Const, 8},
    -		{"LevelSnapshot", Const, 8},
    -		{"LevelWriteCommitted", Const, 8},
    -		{"Named", Func, 8},
    -		{"NamedArg", Type, 8},
    -		{"NamedArg.Name", Field, 8},
    -		{"NamedArg.Value", Field, 8},
    -		{"Null", Type, 22},
    -		{"Null.V", Field, 22},
    -		{"Null.Valid", Field, 22},
    -		{"NullBool", Type, 0},
    -		{"NullBool.Bool", Field, 0},
    -		{"NullBool.Valid", Field, 0},
    -		{"NullByte", Type, 17},
    -		{"NullByte.Byte", Field, 17},
    -		{"NullByte.Valid", Field, 17},
    -		{"NullFloat64", Type, 0},
    -		{"NullFloat64.Float64", Field, 0},
    -		{"NullFloat64.Valid", Field, 0},
    -		{"NullInt16", Type, 17},
    -		{"NullInt16.Int16", Field, 17},
    -		{"NullInt16.Valid", Field, 17},
    -		{"NullInt32", Type, 13},
    -		{"NullInt32.Int32", Field, 13},
    -		{"NullInt32.Valid", Field, 13},
    -		{"NullInt64", Type, 0},
    -		{"NullInt64.Int64", Field, 0},
    -		{"NullInt64.Valid", Field, 0},
    -		{"NullString", Type, 0},
    -		{"NullString.String", Field, 0},
    -		{"NullString.Valid", Field, 0},
    -		{"NullTime", Type, 13},
    -		{"NullTime.Time", Field, 13},
    -		{"NullTime.Valid", Field, 13},
    -		{"Open", Func, 0},
    -		{"OpenDB", Func, 10},
    -		{"Out", Type, 9},
    -		{"Out.Dest", Field, 9},
    -		{"Out.In", Field, 9},
    -		{"RawBytes", Type, 0},
    -		{"Register", Func, 0},
    -		{"Result", Type, 0},
    -		{"Row", Type, 0},
    -		{"Rows", Type, 0},
    -		{"Scanner", Type, 0},
    -		{"Stmt", Type, 0},
    -		{"Tx", Type, 0},
    -		{"TxOptions", Type, 8},
    -		{"TxOptions.Isolation", Field, 8},
    -		{"TxOptions.ReadOnly", Field, 8},
    +		{"(*ColumnType).DatabaseTypeName", Method, 8, ""},
    +		{"(*ColumnType).DecimalSize", Method, 8, ""},
    +		{"(*ColumnType).Length", Method, 8, ""},
    +		{"(*ColumnType).Name", Method, 8, ""},
    +		{"(*ColumnType).Nullable", Method, 8, ""},
    +		{"(*ColumnType).ScanType", Method, 8, ""},
    +		{"(*Conn).BeginTx", Method, 9, ""},
    +		{"(*Conn).Close", Method, 9, ""},
    +		{"(*Conn).ExecContext", Method, 9, ""},
    +		{"(*Conn).PingContext", Method, 9, ""},
    +		{"(*Conn).PrepareContext", Method, 9, ""},
    +		{"(*Conn).QueryContext", Method, 9, ""},
    +		{"(*Conn).QueryRowContext", Method, 9, ""},
    +		{"(*Conn).Raw", Method, 13, ""},
    +		{"(*DB).Begin", Method, 0, ""},
    +		{"(*DB).BeginTx", Method, 8, ""},
    +		{"(*DB).Close", Method, 0, ""},
    +		{"(*DB).Conn", Method, 9, ""},
    +		{"(*DB).Driver", Method, 0, ""},
    +		{"(*DB).Exec", Method, 0, ""},
    +		{"(*DB).ExecContext", Method, 8, ""},
    +		{"(*DB).Ping", Method, 1, ""},
    +		{"(*DB).PingContext", Method, 8, ""},
    +		{"(*DB).Prepare", Method, 0, ""},
    +		{"(*DB).PrepareContext", Method, 8, ""},
    +		{"(*DB).Query", Method, 0, ""},
    +		{"(*DB).QueryContext", Method, 8, ""},
    +		{"(*DB).QueryRow", Method, 0, ""},
    +		{"(*DB).QueryRowContext", Method, 8, ""},
    +		{"(*DB).SetConnMaxIdleTime", Method, 15, ""},
    +		{"(*DB).SetConnMaxLifetime", Method, 6, ""},
    +		{"(*DB).SetMaxIdleConns", Method, 1, ""},
    +		{"(*DB).SetMaxOpenConns", Method, 2, ""},
    +		{"(*DB).Stats", Method, 5, ""},
    +		{"(*Null).Scan", Method, 22, ""},
    +		{"(*NullBool).Scan", Method, 0, ""},
    +		{"(*NullByte).Scan", Method, 17, ""},
    +		{"(*NullFloat64).Scan", Method, 0, ""},
    +		{"(*NullInt16).Scan", Method, 17, ""},
    +		{"(*NullInt32).Scan", Method, 13, ""},
    +		{"(*NullInt64).Scan", Method, 0, ""},
    +		{"(*NullString).Scan", Method, 0, ""},
    +		{"(*NullTime).Scan", Method, 13, ""},
    +		{"(*Row).Err", Method, 15, ""},
    +		{"(*Row).Scan", Method, 0, ""},
    +		{"(*Rows).Close", Method, 0, ""},
    +		{"(*Rows).ColumnTypes", Method, 8, ""},
    +		{"(*Rows).Columns", Method, 0, ""},
    +		{"(*Rows).Err", Method, 0, ""},
    +		{"(*Rows).Next", Method, 0, ""},
    +		{"(*Rows).NextResultSet", Method, 8, ""},
    +		{"(*Rows).Scan", Method, 0, ""},
    +		{"(*Stmt).Close", Method, 0, ""},
    +		{"(*Stmt).Exec", Method, 0, ""},
    +		{"(*Stmt).ExecContext", Method, 8, ""},
    +		{"(*Stmt).Query", Method, 0, ""},
    +		{"(*Stmt).QueryContext", Method, 8, ""},
    +		{"(*Stmt).QueryRow", Method, 0, ""},
    +		{"(*Stmt).QueryRowContext", Method, 8, ""},
    +		{"(*Tx).Commit", Method, 0, ""},
    +		{"(*Tx).Exec", Method, 0, ""},
    +		{"(*Tx).ExecContext", Method, 8, ""},
    +		{"(*Tx).Prepare", Method, 0, ""},
    +		{"(*Tx).PrepareContext", Method, 8, ""},
    +		{"(*Tx).Query", Method, 0, ""},
    +		{"(*Tx).QueryContext", Method, 8, ""},
    +		{"(*Tx).QueryRow", Method, 0, ""},
    +		{"(*Tx).QueryRowContext", Method, 8, ""},
    +		{"(*Tx).Rollback", Method, 0, ""},
    +		{"(*Tx).Stmt", Method, 0, ""},
    +		{"(*Tx).StmtContext", Method, 8, ""},
    +		{"(IsolationLevel).String", Method, 11, ""},
    +		{"(Null).Value", Method, 22, ""},
    +		{"(NullBool).Value", Method, 0, ""},
    +		{"(NullByte).Value", Method, 17, ""},
    +		{"(NullFloat64).Value", Method, 0, ""},
    +		{"(NullInt16).Value", Method, 17, ""},
    +		{"(NullInt32).Value", Method, 13, ""},
    +		{"(NullInt64).Value", Method, 0, ""},
    +		{"(NullString).Value", Method, 0, ""},
    +		{"(NullTime).Value", Method, 13, ""},
    +		{"ColumnType", Type, 8, ""},
    +		{"Conn", Type, 9, ""},
    +		{"DB", Type, 0, ""},
    +		{"DBStats", Type, 5, ""},
    +		{"DBStats.Idle", Field, 11, ""},
    +		{"DBStats.InUse", Field, 11, ""},
    +		{"DBStats.MaxIdleClosed", Field, 11, ""},
    +		{"DBStats.MaxIdleTimeClosed", Field, 15, ""},
    +		{"DBStats.MaxLifetimeClosed", Field, 11, ""},
    +		{"DBStats.MaxOpenConnections", Field, 11, ""},
    +		{"DBStats.OpenConnections", Field, 5, ""},
    +		{"DBStats.WaitCount", Field, 11, ""},
    +		{"DBStats.WaitDuration", Field, 11, ""},
    +		{"Drivers", Func, 4, "func() []string"},
    +		{"ErrConnDone", Var, 9, ""},
    +		{"ErrNoRows", Var, 0, ""},
    +		{"ErrTxDone", Var, 0, ""},
    +		{"IsolationLevel", Type, 8, ""},
    +		{"LevelDefault", Const, 8, ""},
    +		{"LevelLinearizable", Const, 8, ""},
    +		{"LevelReadCommitted", Const, 8, ""},
    +		{"LevelReadUncommitted", Const, 8, ""},
    +		{"LevelRepeatableRead", Const, 8, ""},
    +		{"LevelSerializable", Const, 8, ""},
    +		{"LevelSnapshot", Const, 8, ""},
    +		{"LevelWriteCommitted", Const, 8, ""},
    +		{"Named", Func, 8, "func(name string, value any) NamedArg"},
    +		{"NamedArg", Type, 8, ""},
    +		{"NamedArg.Name", Field, 8, ""},
    +		{"NamedArg.Value", Field, 8, ""},
    +		{"Null", Type, 22, ""},
    +		{"Null.V", Field, 22, ""},
    +		{"Null.Valid", Field, 22, ""},
    +		{"NullBool", Type, 0, ""},
    +		{"NullBool.Bool", Field, 0, ""},
    +		{"NullBool.Valid", Field, 0, ""},
    +		{"NullByte", Type, 17, ""},
    +		{"NullByte.Byte", Field, 17, ""},
    +		{"NullByte.Valid", Field, 17, ""},
    +		{"NullFloat64", Type, 0, ""},
    +		{"NullFloat64.Float64", Field, 0, ""},
    +		{"NullFloat64.Valid", Field, 0, ""},
    +		{"NullInt16", Type, 17, ""},
    +		{"NullInt16.Int16", Field, 17, ""},
    +		{"NullInt16.Valid", Field, 17, ""},
    +		{"NullInt32", Type, 13, ""},
    +		{"NullInt32.Int32", Field, 13, ""},
    +		{"NullInt32.Valid", Field, 13, ""},
    +		{"NullInt64", Type, 0, ""},
    +		{"NullInt64.Int64", Field, 0, ""},
    +		{"NullInt64.Valid", Field, 0, ""},
    +		{"NullString", Type, 0, ""},
    +		{"NullString.String", Field, 0, ""},
    +		{"NullString.Valid", Field, 0, ""},
    +		{"NullTime", Type, 13, ""},
    +		{"NullTime.Time", Field, 13, ""},
    +		{"NullTime.Valid", Field, 13, ""},
    +		{"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"},
    +		{"OpenDB", Func, 10, "func(c driver.Connector) *DB"},
    +		{"Out", Type, 9, ""},
    +		{"Out.Dest", Field, 9, ""},
    +		{"Out.In", Field, 9, ""},
    +		{"RawBytes", Type, 0, ""},
    +		{"Register", Func, 0, "func(name string, driver driver.Driver)"},
    +		{"Result", Type, 0, ""},
    +		{"Row", Type, 0, ""},
    +		{"Rows", Type, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"Tx", Type, 0, ""},
    +		{"TxOptions", Type, 8, ""},
    +		{"TxOptions.Isolation", Field, 8, ""},
    +		{"TxOptions.ReadOnly", Field, 8, ""},
     	},
     	"database/sql/driver": {
    -		{"(NotNull).ConvertValue", Method, 0},
    -		{"(Null).ConvertValue", Method, 0},
    -		{"(RowsAffected).LastInsertId", Method, 0},
    -		{"(RowsAffected).RowsAffected", Method, 0},
    -		{"Bool", Var, 0},
    -		{"ColumnConverter", Type, 0},
    -		{"Conn", Type, 0},
    -		{"ConnBeginTx", Type, 8},
    -		{"ConnPrepareContext", Type, 8},
    -		{"Connector", Type, 10},
    -		{"DefaultParameterConverter", Var, 0},
    -		{"Driver", Type, 0},
    -		{"DriverContext", Type, 10},
    -		{"ErrBadConn", Var, 0},
    -		{"ErrRemoveArgument", Var, 9},
    -		{"ErrSkip", Var, 0},
    -		{"Execer", Type, 0},
    -		{"ExecerContext", Type, 8},
    -		{"Int32", Var, 0},
    -		{"IsScanValue", Func, 0},
    -		{"IsValue", Func, 0},
    -		{"IsolationLevel", Type, 8},
    -		{"NamedValue", Type, 8},
    -		{"NamedValue.Name", Field, 8},
    -		{"NamedValue.Ordinal", Field, 8},
    -		{"NamedValue.Value", Field, 8},
    -		{"NamedValueChecker", Type, 9},
    -		{"NotNull", Type, 0},
    -		{"NotNull.Converter", Field, 0},
    -		{"Null", Type, 0},
    -		{"Null.Converter", Field, 0},
    -		{"Pinger", Type, 8},
    -		{"Queryer", Type, 1},
    -		{"QueryerContext", Type, 8},
    -		{"Result", Type, 0},
    -		{"ResultNoRows", Var, 0},
    -		{"Rows", Type, 0},
    -		{"RowsAffected", Type, 0},
    -		{"RowsColumnTypeDatabaseTypeName", Type, 8},
    -		{"RowsColumnTypeLength", Type, 8},
    -		{"RowsColumnTypeNullable", Type, 8},
    -		{"RowsColumnTypePrecisionScale", Type, 8},
    -		{"RowsColumnTypeScanType", Type, 8},
    -		{"RowsNextResultSet", Type, 8},
    -		{"SessionResetter", Type, 10},
    -		{"Stmt", Type, 0},
    -		{"StmtExecContext", Type, 8},
    -		{"StmtQueryContext", Type, 8},
    -		{"String", Var, 0},
    -		{"Tx", Type, 0},
    -		{"TxOptions", Type, 8},
    -		{"TxOptions.Isolation", Field, 8},
    -		{"TxOptions.ReadOnly", Field, 8},
    -		{"Validator", Type, 15},
    -		{"Value", Type, 0},
    -		{"ValueConverter", Type, 0},
    -		{"Valuer", Type, 0},
    +		{"(NotNull).ConvertValue", Method, 0, ""},
    +		{"(Null).ConvertValue", Method, 0, ""},
    +		{"(RowsAffected).LastInsertId", Method, 0, ""},
    +		{"(RowsAffected).RowsAffected", Method, 0, ""},
    +		{"Bool", Var, 0, ""},
    +		{"ColumnConverter", Type, 0, ""},
    +		{"Conn", Type, 0, ""},
    +		{"ConnBeginTx", Type, 8, ""},
    +		{"ConnPrepareContext", Type, 8, ""},
    +		{"Connector", Type, 10, ""},
    +		{"DefaultParameterConverter", Var, 0, ""},
    +		{"Driver", Type, 0, ""},
    +		{"DriverContext", Type, 10, ""},
    +		{"ErrBadConn", Var, 0, ""},
    +		{"ErrRemoveArgument", Var, 9, ""},
    +		{"ErrSkip", Var, 0, ""},
    +		{"Execer", Type, 0, ""},
    +		{"ExecerContext", Type, 8, ""},
    +		{"Int32", Var, 0, ""},
    +		{"IsScanValue", Func, 0, "func(v any) bool"},
    +		{"IsValue", Func, 0, "func(v any) bool"},
    +		{"IsolationLevel", Type, 8, ""},
    +		{"NamedValue", Type, 8, ""},
    +		{"NamedValue.Name", Field, 8, ""},
    +		{"NamedValue.Ordinal", Field, 8, ""},
    +		{"NamedValue.Value", Field, 8, ""},
    +		{"NamedValueChecker", Type, 9, ""},
    +		{"NotNull", Type, 0, ""},
    +		{"NotNull.Converter", Field, 0, ""},
    +		{"Null", Type, 0, ""},
    +		{"Null.Converter", Field, 0, ""},
    +		{"Pinger", Type, 8, ""},
    +		{"Queryer", Type, 1, ""},
    +		{"QueryerContext", Type, 8, ""},
    +		{"Result", Type, 0, ""},
    +		{"ResultNoRows", Var, 0, ""},
    +		{"Rows", Type, 0, ""},
    +		{"RowsAffected", Type, 0, ""},
    +		{"RowsColumnTypeDatabaseTypeName", Type, 8, ""},
    +		{"RowsColumnTypeLength", Type, 8, ""},
    +		{"RowsColumnTypeNullable", Type, 8, ""},
    +		{"RowsColumnTypePrecisionScale", Type, 8, ""},
    +		{"RowsColumnTypeScanType", Type, 8, ""},
    +		{"RowsNextResultSet", Type, 8, ""},
    +		{"SessionResetter", Type, 10, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"StmtExecContext", Type, 8, ""},
    +		{"StmtQueryContext", Type, 8, ""},
    +		{"String", Var, 0, ""},
    +		{"Tx", Type, 0, ""},
    +		{"TxOptions", Type, 8, ""},
    +		{"TxOptions.Isolation", Field, 8, ""},
    +		{"TxOptions.ReadOnly", Field, 8, ""},
    +		{"Validator", Type, 15, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueConverter", Type, 0, ""},
    +		{"Valuer", Type, 0, ""},
     	},
     	"debug/buildinfo": {
    -		{"BuildInfo", Type, 18},
    -		{"Read", Func, 18},
    -		{"ReadFile", Func, 18},
    +		{"BuildInfo", Type, 18, ""},
    +		{"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"},
    +		{"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"},
     	},
     	"debug/dwarf": {
    -		{"(*AddrType).Basic", Method, 0},
    -		{"(*AddrType).Common", Method, 0},
    -		{"(*AddrType).Size", Method, 0},
    -		{"(*AddrType).String", Method, 0},
    -		{"(*ArrayType).Common", Method, 0},
    -		{"(*ArrayType).Size", Method, 0},
    -		{"(*ArrayType).String", Method, 0},
    -		{"(*BasicType).Basic", Method, 0},
    -		{"(*BasicType).Common", Method, 0},
    -		{"(*BasicType).Size", Method, 0},
    -		{"(*BasicType).String", Method, 0},
    -		{"(*BoolType).Basic", Method, 0},
    -		{"(*BoolType).Common", Method, 0},
    -		{"(*BoolType).Size", Method, 0},
    -		{"(*BoolType).String", Method, 0},
    -		{"(*CharType).Basic", Method, 0},
    -		{"(*CharType).Common", Method, 0},
    -		{"(*CharType).Size", Method, 0},
    -		{"(*CharType).String", Method, 0},
    -		{"(*CommonType).Common", Method, 0},
    -		{"(*CommonType).Size", Method, 0},
    -		{"(*ComplexType).Basic", Method, 0},
    -		{"(*ComplexType).Common", Method, 0},
    -		{"(*ComplexType).Size", Method, 0},
    -		{"(*ComplexType).String", Method, 0},
    -		{"(*Data).AddSection", Method, 14},
    -		{"(*Data).AddTypes", Method, 3},
    -		{"(*Data).LineReader", Method, 5},
    -		{"(*Data).Ranges", Method, 7},
    -		{"(*Data).Reader", Method, 0},
    -		{"(*Data).Type", Method, 0},
    -		{"(*DotDotDotType).Common", Method, 0},
    -		{"(*DotDotDotType).Size", Method, 0},
    -		{"(*DotDotDotType).String", Method, 0},
    -		{"(*Entry).AttrField", Method, 5},
    -		{"(*Entry).Val", Method, 0},
    -		{"(*EnumType).Common", Method, 0},
    -		{"(*EnumType).Size", Method, 0},
    -		{"(*EnumType).String", Method, 0},
    -		{"(*FloatType).Basic", Method, 0},
    -		{"(*FloatType).Common", Method, 0},
    -		{"(*FloatType).Size", Method, 0},
    -		{"(*FloatType).String", Method, 0},
    -		{"(*FuncType).Common", Method, 0},
    -		{"(*FuncType).Size", Method, 0},
    -		{"(*FuncType).String", Method, 0},
    -		{"(*IntType).Basic", Method, 0},
    -		{"(*IntType).Common", Method, 0},
    -		{"(*IntType).Size", Method, 0},
    -		{"(*IntType).String", Method, 0},
    -		{"(*LineReader).Files", Method, 14},
    -		{"(*LineReader).Next", Method, 5},
    -		{"(*LineReader).Reset", Method, 5},
    -		{"(*LineReader).Seek", Method, 5},
    -		{"(*LineReader).SeekPC", Method, 5},
    -		{"(*LineReader).Tell", Method, 5},
    -		{"(*PtrType).Common", Method, 0},
    -		{"(*PtrType).Size", Method, 0},
    -		{"(*PtrType).String", Method, 0},
    -		{"(*QualType).Common", Method, 0},
    -		{"(*QualType).Size", Method, 0},
    -		{"(*QualType).String", Method, 0},
    -		{"(*Reader).AddressSize", Method, 5},
    -		{"(*Reader).ByteOrder", Method, 14},
    -		{"(*Reader).Next", Method, 0},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).SeekPC", Method, 7},
    -		{"(*Reader).SkipChildren", Method, 0},
    -		{"(*StructType).Common", Method, 0},
    -		{"(*StructType).Defn", Method, 0},
    -		{"(*StructType).Size", Method, 0},
    -		{"(*StructType).String", Method, 0},
    -		{"(*TypedefType).Common", Method, 0},
    -		{"(*TypedefType).Size", Method, 0},
    -		{"(*TypedefType).String", Method, 0},
    -		{"(*UcharType).Basic", Method, 0},
    -		{"(*UcharType).Common", Method, 0},
    -		{"(*UcharType).Size", Method, 0},
    -		{"(*UcharType).String", Method, 0},
    -		{"(*UintType).Basic", Method, 0},
    -		{"(*UintType).Common", Method, 0},
    -		{"(*UintType).Size", Method, 0},
    -		{"(*UintType).String", Method, 0},
    -		{"(*UnspecifiedType).Basic", Method, 4},
    -		{"(*UnspecifiedType).Common", Method, 4},
    -		{"(*UnspecifiedType).Size", Method, 4},
    -		{"(*UnspecifiedType).String", Method, 4},
    -		{"(*UnsupportedType).Common", Method, 13},
    -		{"(*UnsupportedType).Size", Method, 13},
    -		{"(*UnsupportedType).String", Method, 13},
    -		{"(*VoidType).Common", Method, 0},
    -		{"(*VoidType).Size", Method, 0},
    -		{"(*VoidType).String", Method, 0},
    -		{"(Attr).GoString", Method, 0},
    -		{"(Attr).String", Method, 0},
    -		{"(Class).GoString", Method, 5},
    -		{"(Class).String", Method, 5},
    -		{"(DecodeError).Error", Method, 0},
    -		{"(Tag).GoString", Method, 0},
    -		{"(Tag).String", Method, 0},
    -		{"AddrType", Type, 0},
    -		{"AddrType.BasicType", Field, 0},
    -		{"ArrayType", Type, 0},
    -		{"ArrayType.CommonType", Field, 0},
    -		{"ArrayType.Count", Field, 0},
    -		{"ArrayType.StrideBitSize", Field, 0},
    -		{"ArrayType.Type", Field, 0},
    -		{"Attr", Type, 0},
    -		{"AttrAbstractOrigin", Const, 0},
    -		{"AttrAccessibility", Const, 0},
    -		{"AttrAddrBase", Const, 14},
    -		{"AttrAddrClass", Const, 0},
    -		{"AttrAlignment", Const, 14},
    -		{"AttrAllocated", Const, 0},
    -		{"AttrArtificial", Const, 0},
    -		{"AttrAssociated", Const, 0},
    -		{"AttrBaseTypes", Const, 0},
    -		{"AttrBinaryScale", Const, 14},
    -		{"AttrBitOffset", Const, 0},
    -		{"AttrBitSize", Const, 0},
    -		{"AttrByteSize", Const, 0},
    -		{"AttrCallAllCalls", Const, 14},
    -		{"AttrCallAllSourceCalls", Const, 14},
    -		{"AttrCallAllTailCalls", Const, 14},
    -		{"AttrCallColumn", Const, 0},
    -		{"AttrCallDataLocation", Const, 14},
    -		{"AttrCallDataValue", Const, 14},
    -		{"AttrCallFile", Const, 0},
    -		{"AttrCallLine", Const, 0},
    -		{"AttrCallOrigin", Const, 14},
    -		{"AttrCallPC", Const, 14},
    -		{"AttrCallParameter", Const, 14},
    -		{"AttrCallReturnPC", Const, 14},
    -		{"AttrCallTailCall", Const, 14},
    -		{"AttrCallTarget", Const, 14},
    -		{"AttrCallTargetClobbered", Const, 14},
    -		{"AttrCallValue", Const, 14},
    -		{"AttrCalling", Const, 0},
    -		{"AttrCommonRef", Const, 0},
    -		{"AttrCompDir", Const, 0},
    -		{"AttrConstExpr", Const, 14},
    -		{"AttrConstValue", Const, 0},
    -		{"AttrContainingType", Const, 0},
    -		{"AttrCount", Const, 0},
    -		{"AttrDataBitOffset", Const, 14},
    -		{"AttrDataLocation", Const, 0},
    -		{"AttrDataMemberLoc", Const, 0},
    -		{"AttrDecimalScale", Const, 14},
    -		{"AttrDecimalSign", Const, 14},
    -		{"AttrDeclColumn", Const, 0},
    -		{"AttrDeclFile", Const, 0},
    -		{"AttrDeclLine", Const, 0},
    -		{"AttrDeclaration", Const, 0},
    -		{"AttrDefaultValue", Const, 0},
    -		{"AttrDefaulted", Const, 14},
    -		{"AttrDeleted", Const, 14},
    -		{"AttrDescription", Const, 0},
    -		{"AttrDigitCount", Const, 14},
    -		{"AttrDiscr", Const, 0},
    -		{"AttrDiscrList", Const, 0},
    -		{"AttrDiscrValue", Const, 0},
    -		{"AttrDwoName", Const, 14},
    -		{"AttrElemental", Const, 14},
    -		{"AttrEncoding", Const, 0},
    -		{"AttrEndianity", Const, 14},
    -		{"AttrEntrypc", Const, 0},
    -		{"AttrEnumClass", Const, 14},
    -		{"AttrExplicit", Const, 14},
    -		{"AttrExportSymbols", Const, 14},
    -		{"AttrExtension", Const, 0},
    -		{"AttrExternal", Const, 0},
    -		{"AttrFrameBase", Const, 0},
    -		{"AttrFriend", Const, 0},
    -		{"AttrHighpc", Const, 0},
    -		{"AttrIdentifierCase", Const, 0},
    -		{"AttrImport", Const, 0},
    -		{"AttrInline", Const, 0},
    -		{"AttrIsOptional", Const, 0},
    -		{"AttrLanguage", Const, 0},
    -		{"AttrLinkageName", Const, 14},
    -		{"AttrLocation", Const, 0},
    -		{"AttrLoclistsBase", Const, 14},
    -		{"AttrLowerBound", Const, 0},
    -		{"AttrLowpc", Const, 0},
    -		{"AttrMacroInfo", Const, 0},
    -		{"AttrMacros", Const, 14},
    -		{"AttrMainSubprogram", Const, 14},
    -		{"AttrMutable", Const, 14},
    -		{"AttrName", Const, 0},
    -		{"AttrNamelistItem", Const, 0},
    -		{"AttrNoreturn", Const, 14},
    -		{"AttrObjectPointer", Const, 14},
    -		{"AttrOrdering", Const, 0},
    -		{"AttrPictureString", Const, 14},
    -		{"AttrPriority", Const, 0},
    -		{"AttrProducer", Const, 0},
    -		{"AttrPrototyped", Const, 0},
    -		{"AttrPure", Const, 14},
    -		{"AttrRanges", Const, 0},
    -		{"AttrRank", Const, 14},
    -		{"AttrRecursive", Const, 14},
    -		{"AttrReference", Const, 14},
    -		{"AttrReturnAddr", Const, 0},
    -		{"AttrRnglistsBase", Const, 14},
    -		{"AttrRvalueReference", Const, 14},
    -		{"AttrSegment", Const, 0},
    -		{"AttrSibling", Const, 0},
    -		{"AttrSignature", Const, 14},
    -		{"AttrSmall", Const, 14},
    -		{"AttrSpecification", Const, 0},
    -		{"AttrStartScope", Const, 0},
    -		{"AttrStaticLink", Const, 0},
    -		{"AttrStmtList", Const, 0},
    -		{"AttrStrOffsetsBase", Const, 14},
    -		{"AttrStride", Const, 0},
    -		{"AttrStrideSize", Const, 0},
    -		{"AttrStringLength", Const, 0},
    -		{"AttrStringLengthBitSize", Const, 14},
    -		{"AttrStringLengthByteSize", Const, 14},
    -		{"AttrThreadsScaled", Const, 14},
    -		{"AttrTrampoline", Const, 0},
    -		{"AttrType", Const, 0},
    -		{"AttrUpperBound", Const, 0},
    -		{"AttrUseLocation", Const, 0},
    -		{"AttrUseUTF8", Const, 0},
    -		{"AttrVarParam", Const, 0},
    -		{"AttrVirtuality", Const, 0},
    -		{"AttrVisibility", Const, 0},
    -		{"AttrVtableElemLoc", Const, 0},
    -		{"BasicType", Type, 0},
    -		{"BasicType.BitOffset", Field, 0},
    -		{"BasicType.BitSize", Field, 0},
    -		{"BasicType.CommonType", Field, 0},
    -		{"BasicType.DataBitOffset", Field, 18},
    -		{"BoolType", Type, 0},
    -		{"BoolType.BasicType", Field, 0},
    -		{"CharType", Type, 0},
    -		{"CharType.BasicType", Field, 0},
    -		{"Class", Type, 5},
    -		{"ClassAddrPtr", Const, 14},
    -		{"ClassAddress", Const, 5},
    -		{"ClassBlock", Const, 5},
    -		{"ClassConstant", Const, 5},
    -		{"ClassExprLoc", Const, 5},
    -		{"ClassFlag", Const, 5},
    -		{"ClassLinePtr", Const, 5},
    -		{"ClassLocList", Const, 14},
    -		{"ClassLocListPtr", Const, 5},
    -		{"ClassMacPtr", Const, 5},
    -		{"ClassRangeListPtr", Const, 5},
    -		{"ClassReference", Const, 5},
    -		{"ClassReferenceAlt", Const, 5},
    -		{"ClassReferenceSig", Const, 5},
    -		{"ClassRngList", Const, 14},
    -		{"ClassRngListsPtr", Const, 14},
    -		{"ClassStrOffsetsPtr", Const, 14},
    -		{"ClassString", Const, 5},
    -		{"ClassStringAlt", Const, 5},
    -		{"ClassUnknown", Const, 6},
    -		{"CommonType", Type, 0},
    -		{"CommonType.ByteSize", Field, 0},
    -		{"CommonType.Name", Field, 0},
    -		{"ComplexType", Type, 0},
    -		{"ComplexType.BasicType", Field, 0},
    -		{"Data", Type, 0},
    -		{"DecodeError", Type, 0},
    -		{"DecodeError.Err", Field, 0},
    -		{"DecodeError.Name", Field, 0},
    -		{"DecodeError.Offset", Field, 0},
    -		{"DotDotDotType", Type, 0},
    -		{"DotDotDotType.CommonType", Field, 0},
    -		{"Entry", Type, 0},
    -		{"Entry.Children", Field, 0},
    -		{"Entry.Field", Field, 0},
    -		{"Entry.Offset", Field, 0},
    -		{"Entry.Tag", Field, 0},
    -		{"EnumType", Type, 0},
    -		{"EnumType.CommonType", Field, 0},
    -		{"EnumType.EnumName", Field, 0},
    -		{"EnumType.Val", Field, 0},
    -		{"EnumValue", Type, 0},
    -		{"EnumValue.Name", Field, 0},
    -		{"EnumValue.Val", Field, 0},
    -		{"ErrUnknownPC", Var, 5},
    -		{"Field", Type, 0},
    -		{"Field.Attr", Field, 0},
    -		{"Field.Class", Field, 5},
    -		{"Field.Val", Field, 0},
    -		{"FloatType", Type, 0},
    -		{"FloatType.BasicType", Field, 0},
    -		{"FuncType", Type, 0},
    -		{"FuncType.CommonType", Field, 0},
    -		{"FuncType.ParamType", Field, 0},
    -		{"FuncType.ReturnType", Field, 0},
    -		{"IntType", Type, 0},
    -		{"IntType.BasicType", Field, 0},
    -		{"LineEntry", Type, 5},
    -		{"LineEntry.Address", Field, 5},
    -		{"LineEntry.BasicBlock", Field, 5},
    -		{"LineEntry.Column", Field, 5},
    -		{"LineEntry.Discriminator", Field, 5},
    -		{"LineEntry.EndSequence", Field, 5},
    -		{"LineEntry.EpilogueBegin", Field, 5},
    -		{"LineEntry.File", Field, 5},
    -		{"LineEntry.ISA", Field, 5},
    -		{"LineEntry.IsStmt", Field, 5},
    -		{"LineEntry.Line", Field, 5},
    -		{"LineEntry.OpIndex", Field, 5},
    -		{"LineEntry.PrologueEnd", Field, 5},
    -		{"LineFile", Type, 5},
    -		{"LineFile.Length", Field, 5},
    -		{"LineFile.Mtime", Field, 5},
    -		{"LineFile.Name", Field, 5},
    -		{"LineReader", Type, 5},
    -		{"LineReaderPos", Type, 5},
    -		{"New", Func, 0},
    -		{"Offset", Type, 0},
    -		{"PtrType", Type, 0},
    -		{"PtrType.CommonType", Field, 0},
    -		{"PtrType.Type", Field, 0},
    -		{"QualType", Type, 0},
    -		{"QualType.CommonType", Field, 0},
    -		{"QualType.Qual", Field, 0},
    -		{"QualType.Type", Field, 0},
    -		{"Reader", Type, 0},
    -		{"StructField", Type, 0},
    -		{"StructField.BitOffset", Field, 0},
    -		{"StructField.BitSize", Field, 0},
    -		{"StructField.ByteOffset", Field, 0},
    -		{"StructField.ByteSize", Field, 0},
    -		{"StructField.DataBitOffset", Field, 18},
    -		{"StructField.Name", Field, 0},
    -		{"StructField.Type", Field, 0},
    -		{"StructType", Type, 0},
    -		{"StructType.CommonType", Field, 0},
    -		{"StructType.Field", Field, 0},
    -		{"StructType.Incomplete", Field, 0},
    -		{"StructType.Kind", Field, 0},
    -		{"StructType.StructName", Field, 0},
    -		{"Tag", Type, 0},
    -		{"TagAccessDeclaration", Const, 0},
    -		{"TagArrayType", Const, 0},
    -		{"TagAtomicType", Const, 14},
    -		{"TagBaseType", Const, 0},
    -		{"TagCallSite", Const, 14},
    -		{"TagCallSiteParameter", Const, 14},
    -		{"TagCatchDwarfBlock", Const, 0},
    -		{"TagClassType", Const, 0},
    -		{"TagCoarrayType", Const, 14},
    -		{"TagCommonDwarfBlock", Const, 0},
    -		{"TagCommonInclusion", Const, 0},
    -		{"TagCompileUnit", Const, 0},
    -		{"TagCondition", Const, 3},
    -		{"TagConstType", Const, 0},
    -		{"TagConstant", Const, 0},
    -		{"TagDwarfProcedure", Const, 0},
    -		{"TagDynamicType", Const, 14},
    -		{"TagEntryPoint", Const, 0},
    -		{"TagEnumerationType", Const, 0},
    -		{"TagEnumerator", Const, 0},
    -		{"TagFileType", Const, 0},
    -		{"TagFormalParameter", Const, 0},
    -		{"TagFriend", Const, 0},
    -		{"TagGenericSubrange", Const, 14},
    -		{"TagImmutableType", Const, 14},
    -		{"TagImportedDeclaration", Const, 0},
    -		{"TagImportedModule", Const, 0},
    -		{"TagImportedUnit", Const, 0},
    -		{"TagInheritance", Const, 0},
    -		{"TagInlinedSubroutine", Const, 0},
    -		{"TagInterfaceType", Const, 0},
    -		{"TagLabel", Const, 0},
    -		{"TagLexDwarfBlock", Const, 0},
    -		{"TagMember", Const, 0},
    -		{"TagModule", Const, 0},
    -		{"TagMutableType", Const, 0},
    -		{"TagNamelist", Const, 0},
    -		{"TagNamelistItem", Const, 0},
    -		{"TagNamespace", Const, 0},
    -		{"TagPackedType", Const, 0},
    -		{"TagPartialUnit", Const, 0},
    -		{"TagPointerType", Const, 0},
    -		{"TagPtrToMemberType", Const, 0},
    -		{"TagReferenceType", Const, 0},
    -		{"TagRestrictType", Const, 0},
    -		{"TagRvalueReferenceType", Const, 3},
    -		{"TagSetType", Const, 0},
    -		{"TagSharedType", Const, 3},
    -		{"TagSkeletonUnit", Const, 14},
    -		{"TagStringType", Const, 0},
    -		{"TagStructType", Const, 0},
    -		{"TagSubprogram", Const, 0},
    -		{"TagSubrangeType", Const, 0},
    -		{"TagSubroutineType", Const, 0},
    -		{"TagTemplateAlias", Const, 3},
    -		{"TagTemplateTypeParameter", Const, 0},
    -		{"TagTemplateValueParameter", Const, 0},
    -		{"TagThrownType", Const, 0},
    -		{"TagTryDwarfBlock", Const, 0},
    -		{"TagTypeUnit", Const, 3},
    -		{"TagTypedef", Const, 0},
    -		{"TagUnionType", Const, 0},
    -		{"TagUnspecifiedParameters", Const, 0},
    -		{"TagUnspecifiedType", Const, 0},
    -		{"TagVariable", Const, 0},
    -		{"TagVariant", Const, 0},
    -		{"TagVariantPart", Const, 0},
    -		{"TagVolatileType", Const, 0},
    -		{"TagWithStmt", Const, 0},
    -		{"Type", Type, 0},
    -		{"TypedefType", Type, 0},
    -		{"TypedefType.CommonType", Field, 0},
    -		{"TypedefType.Type", Field, 0},
    -		{"UcharType", Type, 0},
    -		{"UcharType.BasicType", Field, 0},
    -		{"UintType", Type, 0},
    -		{"UintType.BasicType", Field, 0},
    -		{"UnspecifiedType", Type, 4},
    -		{"UnspecifiedType.BasicType", Field, 4},
    -		{"UnsupportedType", Type, 13},
    -		{"UnsupportedType.CommonType", Field, 13},
    -		{"UnsupportedType.Tag", Field, 13},
    -		{"VoidType", Type, 0},
    -		{"VoidType.CommonType", Field, 0},
    +		{"(*AddrType).Basic", Method, 0, ""},
    +		{"(*AddrType).Common", Method, 0, ""},
    +		{"(*AddrType).Size", Method, 0, ""},
    +		{"(*AddrType).String", Method, 0, ""},
    +		{"(*ArrayType).Common", Method, 0, ""},
    +		{"(*ArrayType).Size", Method, 0, ""},
    +		{"(*ArrayType).String", Method, 0, ""},
    +		{"(*BasicType).Basic", Method, 0, ""},
    +		{"(*BasicType).Common", Method, 0, ""},
    +		{"(*BasicType).Size", Method, 0, ""},
    +		{"(*BasicType).String", Method, 0, ""},
    +		{"(*BoolType).Basic", Method, 0, ""},
    +		{"(*BoolType).Common", Method, 0, ""},
    +		{"(*BoolType).Size", Method, 0, ""},
    +		{"(*BoolType).String", Method, 0, ""},
    +		{"(*CharType).Basic", Method, 0, ""},
    +		{"(*CharType).Common", Method, 0, ""},
    +		{"(*CharType).Size", Method, 0, ""},
    +		{"(*CharType).String", Method, 0, ""},
    +		{"(*CommonType).Common", Method, 0, ""},
    +		{"(*CommonType).Size", Method, 0, ""},
    +		{"(*ComplexType).Basic", Method, 0, ""},
    +		{"(*ComplexType).Common", Method, 0, ""},
    +		{"(*ComplexType).Size", Method, 0, ""},
    +		{"(*ComplexType).String", Method, 0, ""},
    +		{"(*Data).AddSection", Method, 14, ""},
    +		{"(*Data).AddTypes", Method, 3, ""},
    +		{"(*Data).LineReader", Method, 5, ""},
    +		{"(*Data).Ranges", Method, 7, ""},
    +		{"(*Data).Reader", Method, 0, ""},
    +		{"(*Data).Type", Method, 0, ""},
    +		{"(*DotDotDotType).Common", Method, 0, ""},
    +		{"(*DotDotDotType).Size", Method, 0, ""},
    +		{"(*DotDotDotType).String", Method, 0, ""},
    +		{"(*Entry).AttrField", Method, 5, ""},
    +		{"(*Entry).Val", Method, 0, ""},
    +		{"(*EnumType).Common", Method, 0, ""},
    +		{"(*EnumType).Size", Method, 0, ""},
    +		{"(*EnumType).String", Method, 0, ""},
    +		{"(*FloatType).Basic", Method, 0, ""},
    +		{"(*FloatType).Common", Method, 0, ""},
    +		{"(*FloatType).Size", Method, 0, ""},
    +		{"(*FloatType).String", Method, 0, ""},
    +		{"(*FuncType).Common", Method, 0, ""},
    +		{"(*FuncType).Size", Method, 0, ""},
    +		{"(*FuncType).String", Method, 0, ""},
    +		{"(*IntType).Basic", Method, 0, ""},
    +		{"(*IntType).Common", Method, 0, ""},
    +		{"(*IntType).Size", Method, 0, ""},
    +		{"(*IntType).String", Method, 0, ""},
    +		{"(*LineReader).Files", Method, 14, ""},
    +		{"(*LineReader).Next", Method, 5, ""},
    +		{"(*LineReader).Reset", Method, 5, ""},
    +		{"(*LineReader).Seek", Method, 5, ""},
    +		{"(*LineReader).SeekPC", Method, 5, ""},
    +		{"(*LineReader).Tell", Method, 5, ""},
    +		{"(*PtrType).Common", Method, 0, ""},
    +		{"(*PtrType).Size", Method, 0, ""},
    +		{"(*PtrType).String", Method, 0, ""},
    +		{"(*QualType).Common", Method, 0, ""},
    +		{"(*QualType).Size", Method, 0, ""},
    +		{"(*QualType).String", Method, 0, ""},
    +		{"(*Reader).AddressSize", Method, 5, ""},
    +		{"(*Reader).ByteOrder", Method, 14, ""},
    +		{"(*Reader).Next", Method, 0, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).SeekPC", Method, 7, ""},
    +		{"(*Reader).SkipChildren", Method, 0, ""},
    +		{"(*StructType).Common", Method, 0, ""},
    +		{"(*StructType).Defn", Method, 0, ""},
    +		{"(*StructType).Size", Method, 0, ""},
    +		{"(*StructType).String", Method, 0, ""},
    +		{"(*TypedefType).Common", Method, 0, ""},
    +		{"(*TypedefType).Size", Method, 0, ""},
    +		{"(*TypedefType).String", Method, 0, ""},
    +		{"(*UcharType).Basic", Method, 0, ""},
    +		{"(*UcharType).Common", Method, 0, ""},
    +		{"(*UcharType).Size", Method, 0, ""},
    +		{"(*UcharType).String", Method, 0, ""},
    +		{"(*UintType).Basic", Method, 0, ""},
    +		{"(*UintType).Common", Method, 0, ""},
    +		{"(*UintType).Size", Method, 0, ""},
    +		{"(*UintType).String", Method, 0, ""},
    +		{"(*UnspecifiedType).Basic", Method, 4, ""},
    +		{"(*UnspecifiedType).Common", Method, 4, ""},
    +		{"(*UnspecifiedType).Size", Method, 4, ""},
    +		{"(*UnspecifiedType).String", Method, 4, ""},
    +		{"(*UnsupportedType).Common", Method, 13, ""},
    +		{"(*UnsupportedType).Size", Method, 13, ""},
    +		{"(*UnsupportedType).String", Method, 13, ""},
    +		{"(*VoidType).Common", Method, 0, ""},
    +		{"(*VoidType).Size", Method, 0, ""},
    +		{"(*VoidType).String", Method, 0, ""},
    +		{"(Attr).GoString", Method, 0, ""},
    +		{"(Attr).String", Method, 0, ""},
    +		{"(Class).GoString", Method, 5, ""},
    +		{"(Class).String", Method, 5, ""},
    +		{"(DecodeError).Error", Method, 0, ""},
    +		{"(Tag).GoString", Method, 0, ""},
    +		{"(Tag).String", Method, 0, ""},
    +		{"AddrType", Type, 0, ""},
    +		{"AddrType.BasicType", Field, 0, ""},
    +		{"ArrayType", Type, 0, ""},
    +		{"ArrayType.CommonType", Field, 0, ""},
    +		{"ArrayType.Count", Field, 0, ""},
    +		{"ArrayType.StrideBitSize", Field, 0, ""},
    +		{"ArrayType.Type", Field, 0, ""},
    +		{"Attr", Type, 0, ""},
    +		{"AttrAbstractOrigin", Const, 0, ""},
    +		{"AttrAccessibility", Const, 0, ""},
    +		{"AttrAddrBase", Const, 14, ""},
    +		{"AttrAddrClass", Const, 0, ""},
    +		{"AttrAlignment", Const, 14, ""},
    +		{"AttrAllocated", Const, 0, ""},
    +		{"AttrArtificial", Const, 0, ""},
    +		{"AttrAssociated", Const, 0, ""},
    +		{"AttrBaseTypes", Const, 0, ""},
    +		{"AttrBinaryScale", Const, 14, ""},
    +		{"AttrBitOffset", Const, 0, ""},
    +		{"AttrBitSize", Const, 0, ""},
    +		{"AttrByteSize", Const, 0, ""},
    +		{"AttrCallAllCalls", Const, 14, ""},
    +		{"AttrCallAllSourceCalls", Const, 14, ""},
    +		{"AttrCallAllTailCalls", Const, 14, ""},
    +		{"AttrCallColumn", Const, 0, ""},
    +		{"AttrCallDataLocation", Const, 14, ""},
    +		{"AttrCallDataValue", Const, 14, ""},
    +		{"AttrCallFile", Const, 0, ""},
    +		{"AttrCallLine", Const, 0, ""},
    +		{"AttrCallOrigin", Const, 14, ""},
    +		{"AttrCallPC", Const, 14, ""},
    +		{"AttrCallParameter", Const, 14, ""},
    +		{"AttrCallReturnPC", Const, 14, ""},
    +		{"AttrCallTailCall", Const, 14, ""},
    +		{"AttrCallTarget", Const, 14, ""},
    +		{"AttrCallTargetClobbered", Const, 14, ""},
    +		{"AttrCallValue", Const, 14, ""},
    +		{"AttrCalling", Const, 0, ""},
    +		{"AttrCommonRef", Const, 0, ""},
    +		{"AttrCompDir", Const, 0, ""},
    +		{"AttrConstExpr", Const, 14, ""},
    +		{"AttrConstValue", Const, 0, ""},
    +		{"AttrContainingType", Const, 0, ""},
    +		{"AttrCount", Const, 0, ""},
    +		{"AttrDataBitOffset", Const, 14, ""},
    +		{"AttrDataLocation", Const, 0, ""},
    +		{"AttrDataMemberLoc", Const, 0, ""},
    +		{"AttrDecimalScale", Const, 14, ""},
    +		{"AttrDecimalSign", Const, 14, ""},
    +		{"AttrDeclColumn", Const, 0, ""},
    +		{"AttrDeclFile", Const, 0, ""},
    +		{"AttrDeclLine", Const, 0, ""},
    +		{"AttrDeclaration", Const, 0, ""},
    +		{"AttrDefaultValue", Const, 0, ""},
    +		{"AttrDefaulted", Const, 14, ""},
    +		{"AttrDeleted", Const, 14, ""},
    +		{"AttrDescription", Const, 0, ""},
    +		{"AttrDigitCount", Const, 14, ""},
    +		{"AttrDiscr", Const, 0, ""},
    +		{"AttrDiscrList", Const, 0, ""},
    +		{"AttrDiscrValue", Const, 0, ""},
    +		{"AttrDwoName", Const, 14, ""},
    +		{"AttrElemental", Const, 14, ""},
    +		{"AttrEncoding", Const, 0, ""},
    +		{"AttrEndianity", Const, 14, ""},
    +		{"AttrEntrypc", Const, 0, ""},
    +		{"AttrEnumClass", Const, 14, ""},
    +		{"AttrExplicit", Const, 14, ""},
    +		{"AttrExportSymbols", Const, 14, ""},
    +		{"AttrExtension", Const, 0, ""},
    +		{"AttrExternal", Const, 0, ""},
    +		{"AttrFrameBase", Const, 0, ""},
    +		{"AttrFriend", Const, 0, ""},
    +		{"AttrHighpc", Const, 0, ""},
    +		{"AttrIdentifierCase", Const, 0, ""},
    +		{"AttrImport", Const, 0, ""},
    +		{"AttrInline", Const, 0, ""},
    +		{"AttrIsOptional", Const, 0, ""},
    +		{"AttrLanguage", Const, 0, ""},
    +		{"AttrLinkageName", Const, 14, ""},
    +		{"AttrLocation", Const, 0, ""},
    +		{"AttrLoclistsBase", Const, 14, ""},
    +		{"AttrLowerBound", Const, 0, ""},
    +		{"AttrLowpc", Const, 0, ""},
    +		{"AttrMacroInfo", Const, 0, ""},
    +		{"AttrMacros", Const, 14, ""},
    +		{"AttrMainSubprogram", Const, 14, ""},
    +		{"AttrMutable", Const, 14, ""},
    +		{"AttrName", Const, 0, ""},
    +		{"AttrNamelistItem", Const, 0, ""},
    +		{"AttrNoreturn", Const, 14, ""},
    +		{"AttrObjectPointer", Const, 14, ""},
    +		{"AttrOrdering", Const, 0, ""},
    +		{"AttrPictureString", Const, 14, ""},
    +		{"AttrPriority", Const, 0, ""},
    +		{"AttrProducer", Const, 0, ""},
    +		{"AttrPrototyped", Const, 0, ""},
    +		{"AttrPure", Const, 14, ""},
    +		{"AttrRanges", Const, 0, ""},
    +		{"AttrRank", Const, 14, ""},
    +		{"AttrRecursive", Const, 14, ""},
    +		{"AttrReference", Const, 14, ""},
    +		{"AttrReturnAddr", Const, 0, ""},
    +		{"AttrRnglistsBase", Const, 14, ""},
    +		{"AttrRvalueReference", Const, 14, ""},
    +		{"AttrSegment", Const, 0, ""},
    +		{"AttrSibling", Const, 0, ""},
    +		{"AttrSignature", Const, 14, ""},
    +		{"AttrSmall", Const, 14, ""},
    +		{"AttrSpecification", Const, 0, ""},
    +		{"AttrStartScope", Const, 0, ""},
    +		{"AttrStaticLink", Const, 0, ""},
    +		{"AttrStmtList", Const, 0, ""},
    +		{"AttrStrOffsetsBase", Const, 14, ""},
    +		{"AttrStride", Const, 0, ""},
    +		{"AttrStrideSize", Const, 0, ""},
    +		{"AttrStringLength", Const, 0, ""},
    +		{"AttrStringLengthBitSize", Const, 14, ""},
    +		{"AttrStringLengthByteSize", Const, 14, ""},
    +		{"AttrThreadsScaled", Const, 14, ""},
    +		{"AttrTrampoline", Const, 0, ""},
    +		{"AttrType", Const, 0, ""},
    +		{"AttrUpperBound", Const, 0, ""},
    +		{"AttrUseLocation", Const, 0, ""},
    +		{"AttrUseUTF8", Const, 0, ""},
    +		{"AttrVarParam", Const, 0, ""},
    +		{"AttrVirtuality", Const, 0, ""},
    +		{"AttrVisibility", Const, 0, ""},
    +		{"AttrVtableElemLoc", Const, 0, ""},
    +		{"BasicType", Type, 0, ""},
    +		{"BasicType.BitOffset", Field, 0, ""},
    +		{"BasicType.BitSize", Field, 0, ""},
    +		{"BasicType.CommonType", Field, 0, ""},
    +		{"BasicType.DataBitOffset", Field, 18, ""},
    +		{"BoolType", Type, 0, ""},
    +		{"BoolType.BasicType", Field, 0, ""},
    +		{"CharType", Type, 0, ""},
    +		{"CharType.BasicType", Field, 0, ""},
    +		{"Class", Type, 5, ""},
    +		{"ClassAddrPtr", Const, 14, ""},
    +		{"ClassAddress", Const, 5, ""},
    +		{"ClassBlock", Const, 5, ""},
    +		{"ClassConstant", Const, 5, ""},
    +		{"ClassExprLoc", Const, 5, ""},
    +		{"ClassFlag", Const, 5, ""},
    +		{"ClassLinePtr", Const, 5, ""},
    +		{"ClassLocList", Const, 14, ""},
    +		{"ClassLocListPtr", Const, 5, ""},
    +		{"ClassMacPtr", Const, 5, ""},
    +		{"ClassRangeListPtr", Const, 5, ""},
    +		{"ClassReference", Const, 5, ""},
    +		{"ClassReferenceAlt", Const, 5, ""},
    +		{"ClassReferenceSig", Const, 5, ""},
    +		{"ClassRngList", Const, 14, ""},
    +		{"ClassRngListsPtr", Const, 14, ""},
    +		{"ClassStrOffsetsPtr", Const, 14, ""},
    +		{"ClassString", Const, 5, ""},
    +		{"ClassStringAlt", Const, 5, ""},
    +		{"ClassUnknown", Const, 6, ""},
    +		{"CommonType", Type, 0, ""},
    +		{"CommonType.ByteSize", Field, 0, ""},
    +		{"CommonType.Name", Field, 0, ""},
    +		{"ComplexType", Type, 0, ""},
    +		{"ComplexType.BasicType", Field, 0, ""},
    +		{"Data", Type, 0, ""},
    +		{"DecodeError", Type, 0, ""},
    +		{"DecodeError.Err", Field, 0, ""},
    +		{"DecodeError.Name", Field, 0, ""},
    +		{"DecodeError.Offset", Field, 0, ""},
    +		{"DotDotDotType", Type, 0, ""},
    +		{"DotDotDotType.CommonType", Field, 0, ""},
    +		{"Entry", Type, 0, ""},
    +		{"Entry.Children", Field, 0, ""},
    +		{"Entry.Field", Field, 0, ""},
    +		{"Entry.Offset", Field, 0, ""},
    +		{"Entry.Tag", Field, 0, ""},
    +		{"EnumType", Type, 0, ""},
    +		{"EnumType.CommonType", Field, 0, ""},
    +		{"EnumType.EnumName", Field, 0, ""},
    +		{"EnumType.Val", Field, 0, ""},
    +		{"EnumValue", Type, 0, ""},
    +		{"EnumValue.Name", Field, 0, ""},
    +		{"EnumValue.Val", Field, 0, ""},
    +		{"ErrUnknownPC", Var, 5, ""},
    +		{"Field", Type, 0, ""},
    +		{"Field.Attr", Field, 0, ""},
    +		{"Field.Class", Field, 5, ""},
    +		{"Field.Val", Field, 0, ""},
    +		{"FloatType", Type, 0, ""},
    +		{"FloatType.BasicType", Field, 0, ""},
    +		{"FuncType", Type, 0, ""},
    +		{"FuncType.CommonType", Field, 0, ""},
    +		{"FuncType.ParamType", Field, 0, ""},
    +		{"FuncType.ReturnType", Field, 0, ""},
    +		{"IntType", Type, 0, ""},
    +		{"IntType.BasicType", Field, 0, ""},
    +		{"LineEntry", Type, 5, ""},
    +		{"LineEntry.Address", Field, 5, ""},
    +		{"LineEntry.BasicBlock", Field, 5, ""},
    +		{"LineEntry.Column", Field, 5, ""},
    +		{"LineEntry.Discriminator", Field, 5, ""},
    +		{"LineEntry.EndSequence", Field, 5, ""},
    +		{"LineEntry.EpilogueBegin", Field, 5, ""},
    +		{"LineEntry.File", Field, 5, ""},
    +		{"LineEntry.ISA", Field, 5, ""},
    +		{"LineEntry.IsStmt", Field, 5, ""},
    +		{"LineEntry.Line", Field, 5, ""},
    +		{"LineEntry.OpIndex", Field, 5, ""},
    +		{"LineEntry.PrologueEnd", Field, 5, ""},
    +		{"LineFile", Type, 5, ""},
    +		{"LineFile.Length", Field, 5, ""},
    +		{"LineFile.Mtime", Field, 5, ""},
    +		{"LineFile.Name", Field, 5, ""},
    +		{"LineReader", Type, 5, ""},
    +		{"LineReaderPos", Type, 5, ""},
    +		{"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"},
    +		{"Offset", Type, 0, ""},
    +		{"PtrType", Type, 0, ""},
    +		{"PtrType.CommonType", Field, 0, ""},
    +		{"PtrType.Type", Field, 0, ""},
    +		{"QualType", Type, 0, ""},
    +		{"QualType.CommonType", Field, 0, ""},
    +		{"QualType.Qual", Field, 0, ""},
    +		{"QualType.Type", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"StructField", Type, 0, ""},
    +		{"StructField.BitOffset", Field, 0, ""},
    +		{"StructField.BitSize", Field, 0, ""},
    +		{"StructField.ByteOffset", Field, 0, ""},
    +		{"StructField.ByteSize", Field, 0, ""},
    +		{"StructField.DataBitOffset", Field, 18, ""},
    +		{"StructField.Name", Field, 0, ""},
    +		{"StructField.Type", Field, 0, ""},
    +		{"StructType", Type, 0, ""},
    +		{"StructType.CommonType", Field, 0, ""},
    +		{"StructType.Field", Field, 0, ""},
    +		{"StructType.Incomplete", Field, 0, ""},
    +		{"StructType.Kind", Field, 0, ""},
    +		{"StructType.StructName", Field, 0, ""},
    +		{"Tag", Type, 0, ""},
    +		{"TagAccessDeclaration", Const, 0, ""},
    +		{"TagArrayType", Const, 0, ""},
    +		{"TagAtomicType", Const, 14, ""},
    +		{"TagBaseType", Const, 0, ""},
    +		{"TagCallSite", Const, 14, ""},
    +		{"TagCallSiteParameter", Const, 14, ""},
    +		{"TagCatchDwarfBlock", Const, 0, ""},
    +		{"TagClassType", Const, 0, ""},
    +		{"TagCoarrayType", Const, 14, ""},
    +		{"TagCommonDwarfBlock", Const, 0, ""},
    +		{"TagCommonInclusion", Const, 0, ""},
    +		{"TagCompileUnit", Const, 0, ""},
    +		{"TagCondition", Const, 3, ""},
    +		{"TagConstType", Const, 0, ""},
    +		{"TagConstant", Const, 0, ""},
    +		{"TagDwarfProcedure", Const, 0, ""},
    +		{"TagDynamicType", Const, 14, ""},
    +		{"TagEntryPoint", Const, 0, ""},
    +		{"TagEnumerationType", Const, 0, ""},
    +		{"TagEnumerator", Const, 0, ""},
    +		{"TagFileType", Const, 0, ""},
    +		{"TagFormalParameter", Const, 0, ""},
    +		{"TagFriend", Const, 0, ""},
    +		{"TagGenericSubrange", Const, 14, ""},
    +		{"TagImmutableType", Const, 14, ""},
    +		{"TagImportedDeclaration", Const, 0, ""},
    +		{"TagImportedModule", Const, 0, ""},
    +		{"TagImportedUnit", Const, 0, ""},
    +		{"TagInheritance", Const, 0, ""},
    +		{"TagInlinedSubroutine", Const, 0, ""},
    +		{"TagInterfaceType", Const, 0, ""},
    +		{"TagLabel", Const, 0, ""},
    +		{"TagLexDwarfBlock", Const, 0, ""},
    +		{"TagMember", Const, 0, ""},
    +		{"TagModule", Const, 0, ""},
    +		{"TagMutableType", Const, 0, ""},
    +		{"TagNamelist", Const, 0, ""},
    +		{"TagNamelistItem", Const, 0, ""},
    +		{"TagNamespace", Const, 0, ""},
    +		{"TagPackedType", Const, 0, ""},
    +		{"TagPartialUnit", Const, 0, ""},
    +		{"TagPointerType", Const, 0, ""},
    +		{"TagPtrToMemberType", Const, 0, ""},
    +		{"TagReferenceType", Const, 0, ""},
    +		{"TagRestrictType", Const, 0, ""},
    +		{"TagRvalueReferenceType", Const, 3, ""},
    +		{"TagSetType", Const, 0, ""},
    +		{"TagSharedType", Const, 3, ""},
    +		{"TagSkeletonUnit", Const, 14, ""},
    +		{"TagStringType", Const, 0, ""},
    +		{"TagStructType", Const, 0, ""},
    +		{"TagSubprogram", Const, 0, ""},
    +		{"TagSubrangeType", Const, 0, ""},
    +		{"TagSubroutineType", Const, 0, ""},
    +		{"TagTemplateAlias", Const, 3, ""},
    +		{"TagTemplateTypeParameter", Const, 0, ""},
    +		{"TagTemplateValueParameter", Const, 0, ""},
    +		{"TagThrownType", Const, 0, ""},
    +		{"TagTryDwarfBlock", Const, 0, ""},
    +		{"TagTypeUnit", Const, 3, ""},
    +		{"TagTypedef", Const, 0, ""},
    +		{"TagUnionType", Const, 0, ""},
    +		{"TagUnspecifiedParameters", Const, 0, ""},
    +		{"TagUnspecifiedType", Const, 0, ""},
    +		{"TagVariable", Const, 0, ""},
    +		{"TagVariant", Const, 0, ""},
    +		{"TagVariantPart", Const, 0, ""},
    +		{"TagVolatileType", Const, 0, ""},
    +		{"TagWithStmt", Const, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypedefType", Type, 0, ""},
    +		{"TypedefType.CommonType", Field, 0, ""},
    +		{"TypedefType.Type", Field, 0, ""},
    +		{"UcharType", Type, 0, ""},
    +		{"UcharType.BasicType", Field, 0, ""},
    +		{"UintType", Type, 0, ""},
    +		{"UintType.BasicType", Field, 0, ""},
    +		{"UnspecifiedType", Type, 4, ""},
    +		{"UnspecifiedType.BasicType", Field, 4, ""},
    +		{"UnsupportedType", Type, 13, ""},
    +		{"UnsupportedType.CommonType", Field, 13, ""},
    +		{"UnsupportedType.Tag", Field, 13, ""},
    +		{"VoidType", Type, 0, ""},
    +		{"VoidType.CommonType", Field, 0, ""},
     	},
     	"debug/elf": {
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).DynString", Method, 1},
    -		{"(*File).DynValue", Method, 21},
    -		{"(*File).DynamicSymbols", Method, 4},
    -		{"(*File).DynamicVersionNeeds", Method, 24},
    -		{"(*File).DynamicVersions", Method, 24},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*File).SectionByType", Method, 0},
    -		{"(*File).Symbols", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Prog).Open", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(Class).GoString", Method, 0},
    -		{"(Class).String", Method, 0},
    -		{"(CompressionType).GoString", Method, 6},
    -		{"(CompressionType).String", Method, 6},
    -		{"(Data).GoString", Method, 0},
    -		{"(Data).String", Method, 0},
    -		{"(DynFlag).GoString", Method, 0},
    -		{"(DynFlag).String", Method, 0},
    -		{"(DynFlag1).GoString", Method, 21},
    -		{"(DynFlag1).String", Method, 21},
    -		{"(DynTag).GoString", Method, 0},
    -		{"(DynTag).String", Method, 0},
    -		{"(Machine).GoString", Method, 0},
    -		{"(Machine).String", Method, 0},
    -		{"(NType).GoString", Method, 0},
    -		{"(NType).String", Method, 0},
    -		{"(OSABI).GoString", Method, 0},
    -		{"(OSABI).String", Method, 0},
    -		{"(Prog).ReadAt", Method, 0},
    -		{"(ProgFlag).GoString", Method, 0},
    -		{"(ProgFlag).String", Method, 0},
    -		{"(ProgType).GoString", Method, 0},
    -		{"(ProgType).String", Method, 0},
    -		{"(R_386).GoString", Method, 0},
    -		{"(R_386).String", Method, 0},
    -		{"(R_390).GoString", Method, 7},
    -		{"(R_390).String", Method, 7},
    -		{"(R_AARCH64).GoString", Method, 4},
    -		{"(R_AARCH64).String", Method, 4},
    -		{"(R_ALPHA).GoString", Method, 0},
    -		{"(R_ALPHA).String", Method, 0},
    -		{"(R_ARM).GoString", Method, 0},
    -		{"(R_ARM).String", Method, 0},
    -		{"(R_LARCH).GoString", Method, 19},
    -		{"(R_LARCH).String", Method, 19},
    -		{"(R_MIPS).GoString", Method, 6},
    -		{"(R_MIPS).String", Method, 6},
    -		{"(R_PPC).GoString", Method, 0},
    -		{"(R_PPC).String", Method, 0},
    -		{"(R_PPC64).GoString", Method, 5},
    -		{"(R_PPC64).String", Method, 5},
    -		{"(R_RISCV).GoString", Method, 11},
    -		{"(R_RISCV).String", Method, 11},
    -		{"(R_SPARC).GoString", Method, 0},
    -		{"(R_SPARC).String", Method, 0},
    -		{"(R_X86_64).GoString", Method, 0},
    -		{"(R_X86_64).String", Method, 0},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(SectionFlag).GoString", Method, 0},
    -		{"(SectionFlag).String", Method, 0},
    -		{"(SectionIndex).GoString", Method, 0},
    -		{"(SectionIndex).String", Method, 0},
    -		{"(SectionType).GoString", Method, 0},
    -		{"(SectionType).String", Method, 0},
    -		{"(SymBind).GoString", Method, 0},
    -		{"(SymBind).String", Method, 0},
    -		{"(SymType).GoString", Method, 0},
    -		{"(SymType).String", Method, 0},
    -		{"(SymVis).GoString", Method, 0},
    -		{"(SymVis).String", Method, 0},
    -		{"(Type).GoString", Method, 0},
    -		{"(Type).String", Method, 0},
    -		{"(Version).GoString", Method, 0},
    -		{"(Version).String", Method, 0},
    -		{"(VersionIndex).Index", Method, 24},
    -		{"(VersionIndex).IsHidden", Method, 24},
    -		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
    -		{"COMPRESS_HIOS", Const, 6},
    -		{"COMPRESS_HIPROC", Const, 6},
    -		{"COMPRESS_LOOS", Const, 6},
    -		{"COMPRESS_LOPROC", Const, 6},
    -		{"COMPRESS_ZLIB", Const, 6},
    -		{"COMPRESS_ZSTD", Const, 21},
    -		{"Chdr32", Type, 6},
    -		{"Chdr32.Addralign", Field, 6},
    -		{"Chdr32.Size", Field, 6},
    -		{"Chdr32.Type", Field, 6},
    -		{"Chdr64", Type, 6},
    -		{"Chdr64.Addralign", Field, 6},
    -		{"Chdr64.Size", Field, 6},
    -		{"Chdr64.Type", Field, 6},
    -		{"Class", Type, 0},
    -		{"CompressionType", Type, 6},
    -		{"DF_1_CONFALT", Const, 21},
    -		{"DF_1_DIRECT", Const, 21},
    -		{"DF_1_DISPRELDNE", Const, 21},
    -		{"DF_1_DISPRELPND", Const, 21},
    -		{"DF_1_EDITED", Const, 21},
    -		{"DF_1_ENDFILTEE", Const, 21},
    -		{"DF_1_GLOBAL", Const, 21},
    -		{"DF_1_GLOBAUDIT", Const, 21},
    -		{"DF_1_GROUP", Const, 21},
    -		{"DF_1_IGNMULDEF", Const, 21},
    -		{"DF_1_INITFIRST", Const, 21},
    -		{"DF_1_INTERPOSE", Const, 21},
    -		{"DF_1_KMOD", Const, 21},
    -		{"DF_1_LOADFLTR", Const, 21},
    -		{"DF_1_NOCOMMON", Const, 21},
    -		{"DF_1_NODEFLIB", Const, 21},
    -		{"DF_1_NODELETE", Const, 21},
    -		{"DF_1_NODIRECT", Const, 21},
    -		{"DF_1_NODUMP", Const, 21},
    -		{"DF_1_NOHDR", Const, 21},
    -		{"DF_1_NOKSYMS", Const, 21},
    -		{"DF_1_NOOPEN", Const, 21},
    -		{"DF_1_NORELOC", Const, 21},
    -		{"DF_1_NOW", Const, 21},
    -		{"DF_1_ORIGIN", Const, 21},
    -		{"DF_1_PIE", Const, 21},
    -		{"DF_1_SINGLETON", Const, 21},
    -		{"DF_1_STUB", Const, 21},
    -		{"DF_1_SYMINTPOSE", Const, 21},
    -		{"DF_1_TRANS", Const, 21},
    -		{"DF_1_WEAKFILTER", Const, 21},
    -		{"DF_BIND_NOW", Const, 0},
    -		{"DF_ORIGIN", Const, 0},
    -		{"DF_STATIC_TLS", Const, 0},
    -		{"DF_SYMBOLIC", Const, 0},
    -		{"DF_TEXTREL", Const, 0},
    -		{"DT_ADDRRNGHI", Const, 16},
    -		{"DT_ADDRRNGLO", Const, 16},
    -		{"DT_AUDIT", Const, 16},
    -		{"DT_AUXILIARY", Const, 16},
    -		{"DT_BIND_NOW", Const, 0},
    -		{"DT_CHECKSUM", Const, 16},
    -		{"DT_CONFIG", Const, 16},
    -		{"DT_DEBUG", Const, 0},
    -		{"DT_DEPAUDIT", Const, 16},
    -		{"DT_ENCODING", Const, 0},
    -		{"DT_FEATURE", Const, 16},
    -		{"DT_FILTER", Const, 16},
    -		{"DT_FINI", Const, 0},
    -		{"DT_FINI_ARRAY", Const, 0},
    -		{"DT_FINI_ARRAYSZ", Const, 0},
    -		{"DT_FLAGS", Const, 0},
    -		{"DT_FLAGS_1", Const, 16},
    -		{"DT_GNU_CONFLICT", Const, 16},
    -		{"DT_GNU_CONFLICTSZ", Const, 16},
    -		{"DT_GNU_HASH", Const, 16},
    -		{"DT_GNU_LIBLIST", Const, 16},
    -		{"DT_GNU_LIBLISTSZ", Const, 16},
    -		{"DT_GNU_PRELINKED", Const, 16},
    -		{"DT_HASH", Const, 0},
    -		{"DT_HIOS", Const, 0},
    -		{"DT_HIPROC", Const, 0},
    -		{"DT_INIT", Const, 0},
    -		{"DT_INIT_ARRAY", Const, 0},
    -		{"DT_INIT_ARRAYSZ", Const, 0},
    -		{"DT_JMPREL", Const, 0},
    -		{"DT_LOOS", Const, 0},
    -		{"DT_LOPROC", Const, 0},
    -		{"DT_MIPS_AUX_DYNAMIC", Const, 16},
    -		{"DT_MIPS_BASE_ADDRESS", Const, 16},
    -		{"DT_MIPS_COMPACT_SIZE", Const, 16},
    -		{"DT_MIPS_CONFLICT", Const, 16},
    -		{"DT_MIPS_CONFLICTNO", Const, 16},
    -		{"DT_MIPS_CXX_FLAGS", Const, 16},
    -		{"DT_MIPS_DELTA_CLASS", Const, 16},
    -		{"DT_MIPS_DELTA_CLASSSYM", Const, 16},
    -		{"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16},
    -		{"DT_MIPS_DELTA_CLASS_NO", Const, 16},
    -		{"DT_MIPS_DELTA_INSTANCE", Const, 16},
    -		{"DT_MIPS_DELTA_INSTANCE_NO", Const, 16},
    -		{"DT_MIPS_DELTA_RELOC", Const, 16},
    -		{"DT_MIPS_DELTA_RELOC_NO", Const, 16},
    -		{"DT_MIPS_DELTA_SYM", Const, 16},
    -		{"DT_MIPS_DELTA_SYM_NO", Const, 16},
    -		{"DT_MIPS_DYNSTR_ALIGN", Const, 16},
    -		{"DT_MIPS_FLAGS", Const, 16},
    -		{"DT_MIPS_GOTSYM", Const, 16},
    -		{"DT_MIPS_GP_VALUE", Const, 16},
    -		{"DT_MIPS_HIDDEN_GOTIDX", Const, 16},
    -		{"DT_MIPS_HIPAGENO", Const, 16},
    -		{"DT_MIPS_ICHECKSUM", Const, 16},
    -		{"DT_MIPS_INTERFACE", Const, 16},
    -		{"DT_MIPS_INTERFACE_SIZE", Const, 16},
    -		{"DT_MIPS_IVERSION", Const, 16},
    -		{"DT_MIPS_LIBLIST", Const, 16},
    -		{"DT_MIPS_LIBLISTNO", Const, 16},
    -		{"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16},
    -		{"DT_MIPS_LOCAL_GOTIDX", Const, 16},
    -		{"DT_MIPS_LOCAL_GOTNO", Const, 16},
    -		{"DT_MIPS_MSYM", Const, 16},
    -		{"DT_MIPS_OPTIONS", Const, 16},
    -		{"DT_MIPS_PERF_SUFFIX", Const, 16},
    -		{"DT_MIPS_PIXIE_INIT", Const, 16},
    -		{"DT_MIPS_PLTGOT", Const, 16},
    -		{"DT_MIPS_PROTECTED_GOTIDX", Const, 16},
    -		{"DT_MIPS_RLD_MAP", Const, 16},
    -		{"DT_MIPS_RLD_MAP_REL", Const, 16},
    -		{"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16},
    -		{"DT_MIPS_RLD_VERSION", Const, 16},
    -		{"DT_MIPS_RWPLT", Const, 16},
    -		{"DT_MIPS_SYMBOL_LIB", Const, 16},
    -		{"DT_MIPS_SYMTABNO", Const, 16},
    -		{"DT_MIPS_TIME_STAMP", Const, 16},
    -		{"DT_MIPS_UNREFEXTNO", Const, 16},
    -		{"DT_MOVEENT", Const, 16},
    -		{"DT_MOVESZ", Const, 16},
    -		{"DT_MOVETAB", Const, 16},
    -		{"DT_NEEDED", Const, 0},
    -		{"DT_NULL", Const, 0},
    -		{"DT_PLTGOT", Const, 0},
    -		{"DT_PLTPAD", Const, 16},
    -		{"DT_PLTPADSZ", Const, 16},
    -		{"DT_PLTREL", Const, 0},
    -		{"DT_PLTRELSZ", Const, 0},
    -		{"DT_POSFLAG_1", Const, 16},
    -		{"DT_PPC64_GLINK", Const, 16},
    -		{"DT_PPC64_OPD", Const, 16},
    -		{"DT_PPC64_OPDSZ", Const, 16},
    -		{"DT_PPC64_OPT", Const, 16},
    -		{"DT_PPC_GOT", Const, 16},
    -		{"DT_PPC_OPT", Const, 16},
    -		{"DT_PREINIT_ARRAY", Const, 0},
    -		{"DT_PREINIT_ARRAYSZ", Const, 0},
    -		{"DT_REL", Const, 0},
    -		{"DT_RELA", Const, 0},
    -		{"DT_RELACOUNT", Const, 16},
    -		{"DT_RELAENT", Const, 0},
    -		{"DT_RELASZ", Const, 0},
    -		{"DT_RELCOUNT", Const, 16},
    -		{"DT_RELENT", Const, 0},
    -		{"DT_RELSZ", Const, 0},
    -		{"DT_RPATH", Const, 0},
    -		{"DT_RUNPATH", Const, 0},
    -		{"DT_SONAME", Const, 0},
    -		{"DT_SPARC_REGISTER", Const, 16},
    -		{"DT_STRSZ", Const, 0},
    -		{"DT_STRTAB", Const, 0},
    -		{"DT_SYMBOLIC", Const, 0},
    -		{"DT_SYMENT", Const, 0},
    -		{"DT_SYMINENT", Const, 16},
    -		{"DT_SYMINFO", Const, 16},
    -		{"DT_SYMINSZ", Const, 16},
    -		{"DT_SYMTAB", Const, 0},
    -		{"DT_SYMTAB_SHNDX", Const, 16},
    -		{"DT_TEXTREL", Const, 0},
    -		{"DT_TLSDESC_GOT", Const, 16},
    -		{"DT_TLSDESC_PLT", Const, 16},
    -		{"DT_USED", Const, 16},
    -		{"DT_VALRNGHI", Const, 16},
    -		{"DT_VALRNGLO", Const, 16},
    -		{"DT_VERDEF", Const, 16},
    -		{"DT_VERDEFNUM", Const, 16},
    -		{"DT_VERNEED", Const, 0},
    -		{"DT_VERNEEDNUM", Const, 0},
    -		{"DT_VERSYM", Const, 0},
    -		{"Data", Type, 0},
    -		{"Dyn32", Type, 0},
    -		{"Dyn32.Tag", Field, 0},
    -		{"Dyn32.Val", Field, 0},
    -		{"Dyn64", Type, 0},
    -		{"Dyn64.Tag", Field, 0},
    -		{"Dyn64.Val", Field, 0},
    -		{"DynFlag", Type, 0},
    -		{"DynFlag1", Type, 21},
    -		{"DynTag", Type, 0},
    -		{"DynamicVersion", Type, 24},
    -		{"DynamicVersion.Deps", Field, 24},
    -		{"DynamicVersion.Flags", Field, 24},
    -		{"DynamicVersion.Index", Field, 24},
    -		{"DynamicVersion.Name", Field, 24},
    -		{"DynamicVersionDep", Type, 24},
    -		{"DynamicVersionDep.Dep", Field, 24},
    -		{"DynamicVersionDep.Flags", Field, 24},
    -		{"DynamicVersionDep.Index", Field, 24},
    -		{"DynamicVersionFlag", Type, 24},
    -		{"DynamicVersionNeed", Type, 24},
    -		{"DynamicVersionNeed.Name", Field, 24},
    -		{"DynamicVersionNeed.Needs", Field, 24},
    -		{"EI_ABIVERSION", Const, 0},
    -		{"EI_CLASS", Const, 0},
    -		{"EI_DATA", Const, 0},
    -		{"EI_NIDENT", Const, 0},
    -		{"EI_OSABI", Const, 0},
    -		{"EI_PAD", Const, 0},
    -		{"EI_VERSION", Const, 0},
    -		{"ELFCLASS32", Const, 0},
    -		{"ELFCLASS64", Const, 0},
    -		{"ELFCLASSNONE", Const, 0},
    -		{"ELFDATA2LSB", Const, 0},
    -		{"ELFDATA2MSB", Const, 0},
    -		{"ELFDATANONE", Const, 0},
    -		{"ELFMAG", Const, 0},
    -		{"ELFOSABI_86OPEN", Const, 0},
    -		{"ELFOSABI_AIX", Const, 0},
    -		{"ELFOSABI_ARM", Const, 0},
    -		{"ELFOSABI_AROS", Const, 11},
    -		{"ELFOSABI_CLOUDABI", Const, 11},
    -		{"ELFOSABI_FENIXOS", Const, 11},
    -		{"ELFOSABI_FREEBSD", Const, 0},
    -		{"ELFOSABI_HPUX", Const, 0},
    -		{"ELFOSABI_HURD", Const, 0},
    -		{"ELFOSABI_IRIX", Const, 0},
    -		{"ELFOSABI_LINUX", Const, 0},
    -		{"ELFOSABI_MODESTO", Const, 0},
    -		{"ELFOSABI_NETBSD", Const, 0},
    -		{"ELFOSABI_NONE", Const, 0},
    -		{"ELFOSABI_NSK", Const, 0},
    -		{"ELFOSABI_OPENBSD", Const, 0},
    -		{"ELFOSABI_OPENVMS", Const, 0},
    -		{"ELFOSABI_SOLARIS", Const, 0},
    -		{"ELFOSABI_STANDALONE", Const, 0},
    -		{"ELFOSABI_TRU64", Const, 0},
    -		{"EM_386", Const, 0},
    -		{"EM_486", Const, 0},
    -		{"EM_56800EX", Const, 11},
    -		{"EM_68HC05", Const, 11},
    -		{"EM_68HC08", Const, 11},
    -		{"EM_68HC11", Const, 11},
    -		{"EM_68HC12", Const, 0},
    -		{"EM_68HC16", Const, 11},
    -		{"EM_68K", Const, 0},
    -		{"EM_78KOR", Const, 11},
    -		{"EM_8051", Const, 11},
    -		{"EM_860", Const, 0},
    -		{"EM_88K", Const, 0},
    -		{"EM_960", Const, 0},
    -		{"EM_AARCH64", Const, 4},
    -		{"EM_ALPHA", Const, 0},
    -		{"EM_ALPHA_STD", Const, 0},
    -		{"EM_ALTERA_NIOS2", Const, 11},
    -		{"EM_AMDGPU", Const, 11},
    -		{"EM_ARC", Const, 0},
    -		{"EM_ARCA", Const, 11},
    -		{"EM_ARC_COMPACT", Const, 11},
    -		{"EM_ARC_COMPACT2", Const, 11},
    -		{"EM_ARM", Const, 0},
    -		{"EM_AVR", Const, 11},
    -		{"EM_AVR32", Const, 11},
    -		{"EM_BA1", Const, 11},
    -		{"EM_BA2", Const, 11},
    -		{"EM_BLACKFIN", Const, 11},
    -		{"EM_BPF", Const, 11},
    -		{"EM_C166", Const, 11},
    -		{"EM_CDP", Const, 11},
    -		{"EM_CE", Const, 11},
    -		{"EM_CLOUDSHIELD", Const, 11},
    -		{"EM_COGE", Const, 11},
    -		{"EM_COLDFIRE", Const, 0},
    -		{"EM_COOL", Const, 11},
    -		{"EM_COREA_1ST", Const, 11},
    -		{"EM_COREA_2ND", Const, 11},
    -		{"EM_CR", Const, 11},
    -		{"EM_CR16", Const, 11},
    -		{"EM_CRAYNV2", Const, 11},
    -		{"EM_CRIS", Const, 11},
    -		{"EM_CRX", Const, 11},
    -		{"EM_CSR_KALIMBA", Const, 11},
    -		{"EM_CUDA", Const, 11},
    -		{"EM_CYPRESS_M8C", Const, 11},
    -		{"EM_D10V", Const, 11},
    -		{"EM_D30V", Const, 11},
    -		{"EM_DSP24", Const, 11},
    -		{"EM_DSPIC30F", Const, 11},
    -		{"EM_DXP", Const, 11},
    -		{"EM_ECOG1", Const, 11},
    -		{"EM_ECOG16", Const, 11},
    -		{"EM_ECOG1X", Const, 11},
    -		{"EM_ECOG2", Const, 11},
    -		{"EM_ETPU", Const, 11},
    -		{"EM_EXCESS", Const, 11},
    -		{"EM_F2MC16", Const, 11},
    -		{"EM_FIREPATH", Const, 11},
    -		{"EM_FR20", Const, 0},
    -		{"EM_FR30", Const, 11},
    -		{"EM_FT32", Const, 11},
    -		{"EM_FX66", Const, 11},
    -		{"EM_H8S", Const, 0},
    -		{"EM_H8_300", Const, 0},
    -		{"EM_H8_300H", Const, 0},
    -		{"EM_H8_500", Const, 0},
    -		{"EM_HUANY", Const, 11},
    -		{"EM_IA_64", Const, 0},
    -		{"EM_INTEL205", Const, 11},
    -		{"EM_INTEL206", Const, 11},
    -		{"EM_INTEL207", Const, 11},
    -		{"EM_INTEL208", Const, 11},
    -		{"EM_INTEL209", Const, 11},
    -		{"EM_IP2K", Const, 11},
    -		{"EM_JAVELIN", Const, 11},
    -		{"EM_K10M", Const, 11},
    -		{"EM_KM32", Const, 11},
    -		{"EM_KMX16", Const, 11},
    -		{"EM_KMX32", Const, 11},
    -		{"EM_KMX8", Const, 11},
    -		{"EM_KVARC", Const, 11},
    -		{"EM_L10M", Const, 11},
    -		{"EM_LANAI", Const, 11},
    -		{"EM_LATTICEMICO32", Const, 11},
    -		{"EM_LOONGARCH", Const, 19},
    -		{"EM_M16C", Const, 11},
    -		{"EM_M32", Const, 0},
    -		{"EM_M32C", Const, 11},
    -		{"EM_M32R", Const, 11},
    -		{"EM_MANIK", Const, 11},
    -		{"EM_MAX", Const, 11},
    -		{"EM_MAXQ30", Const, 11},
    -		{"EM_MCHP_PIC", Const, 11},
    -		{"EM_MCST_ELBRUS", Const, 11},
    -		{"EM_ME16", Const, 0},
    -		{"EM_METAG", Const, 11},
    -		{"EM_MICROBLAZE", Const, 11},
    -		{"EM_MIPS", Const, 0},
    -		{"EM_MIPS_RS3_LE", Const, 0},
    -		{"EM_MIPS_RS4_BE", Const, 0},
    -		{"EM_MIPS_X", Const, 0},
    -		{"EM_MMA", Const, 0},
    -		{"EM_MMDSP_PLUS", Const, 11},
    -		{"EM_MMIX", Const, 11},
    -		{"EM_MN10200", Const, 11},
    -		{"EM_MN10300", Const, 11},
    -		{"EM_MOXIE", Const, 11},
    -		{"EM_MSP430", Const, 11},
    -		{"EM_NCPU", Const, 0},
    -		{"EM_NDR1", Const, 0},
    -		{"EM_NDS32", Const, 11},
    -		{"EM_NONE", Const, 0},
    -		{"EM_NORC", Const, 11},
    -		{"EM_NS32K", Const, 11},
    -		{"EM_OPEN8", Const, 11},
    -		{"EM_OPENRISC", Const, 11},
    -		{"EM_PARISC", Const, 0},
    -		{"EM_PCP", Const, 0},
    -		{"EM_PDP10", Const, 11},
    -		{"EM_PDP11", Const, 11},
    -		{"EM_PDSP", Const, 11},
    -		{"EM_PJ", Const, 11},
    -		{"EM_PPC", Const, 0},
    -		{"EM_PPC64", Const, 0},
    -		{"EM_PRISM", Const, 11},
    -		{"EM_QDSP6", Const, 11},
    -		{"EM_R32C", Const, 11},
    -		{"EM_RCE", Const, 0},
    -		{"EM_RH32", Const, 0},
    -		{"EM_RISCV", Const, 11},
    -		{"EM_RL78", Const, 11},
    -		{"EM_RS08", Const, 11},
    -		{"EM_RX", Const, 11},
    -		{"EM_S370", Const, 0},
    -		{"EM_S390", Const, 0},
    -		{"EM_SCORE7", Const, 11},
    -		{"EM_SEP", Const, 11},
    -		{"EM_SE_C17", Const, 11},
    -		{"EM_SE_C33", Const, 11},
    -		{"EM_SH", Const, 0},
    -		{"EM_SHARC", Const, 11},
    -		{"EM_SLE9X", Const, 11},
    -		{"EM_SNP1K", Const, 11},
    -		{"EM_SPARC", Const, 0},
    -		{"EM_SPARC32PLUS", Const, 0},
    -		{"EM_SPARCV9", Const, 0},
    -		{"EM_ST100", Const, 0},
    -		{"EM_ST19", Const, 11},
    -		{"EM_ST200", Const, 11},
    -		{"EM_ST7", Const, 11},
    -		{"EM_ST9PLUS", Const, 11},
    -		{"EM_STARCORE", Const, 0},
    -		{"EM_STM8", Const, 11},
    -		{"EM_STXP7X", Const, 11},
    -		{"EM_SVX", Const, 11},
    -		{"EM_TILE64", Const, 11},
    -		{"EM_TILEGX", Const, 11},
    -		{"EM_TILEPRO", Const, 11},
    -		{"EM_TINYJ", Const, 0},
    -		{"EM_TI_ARP32", Const, 11},
    -		{"EM_TI_C2000", Const, 11},
    -		{"EM_TI_C5500", Const, 11},
    -		{"EM_TI_C6000", Const, 11},
    -		{"EM_TI_PRU", Const, 11},
    -		{"EM_TMM_GPP", Const, 11},
    -		{"EM_TPC", Const, 11},
    -		{"EM_TRICORE", Const, 0},
    -		{"EM_TRIMEDIA", Const, 11},
    -		{"EM_TSK3000", Const, 11},
    -		{"EM_UNICORE", Const, 11},
    -		{"EM_V800", Const, 0},
    -		{"EM_V850", Const, 11},
    -		{"EM_VAX", Const, 11},
    -		{"EM_VIDEOCORE", Const, 11},
    -		{"EM_VIDEOCORE3", Const, 11},
    -		{"EM_VIDEOCORE5", Const, 11},
    -		{"EM_VISIUM", Const, 11},
    -		{"EM_VPP500", Const, 0},
    -		{"EM_X86_64", Const, 0},
    -		{"EM_XCORE", Const, 11},
    -		{"EM_XGATE", Const, 11},
    -		{"EM_XIMO16", Const, 11},
    -		{"EM_XTENSA", Const, 11},
    -		{"EM_Z80", Const, 11},
    -		{"EM_ZSP", Const, 11},
    -		{"ET_CORE", Const, 0},
    -		{"ET_DYN", Const, 0},
    -		{"ET_EXEC", Const, 0},
    -		{"ET_HIOS", Const, 0},
    -		{"ET_HIPROC", Const, 0},
    -		{"ET_LOOS", Const, 0},
    -		{"ET_LOPROC", Const, 0},
    -		{"ET_NONE", Const, 0},
    -		{"ET_REL", Const, 0},
    -		{"EV_CURRENT", Const, 0},
    -		{"EV_NONE", Const, 0},
    -		{"ErrNoSymbols", Var, 4},
    -		{"File", Type, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"File.Progs", Field, 0},
    -		{"File.Sections", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.ABIVersion", Field, 0},
    -		{"FileHeader.ByteOrder", Field, 0},
    -		{"FileHeader.Class", Field, 0},
    -		{"FileHeader.Data", Field, 0},
    -		{"FileHeader.Entry", Field, 1},
    -		{"FileHeader.Machine", Field, 0},
    -		{"FileHeader.OSABI", Field, 0},
    -		{"FileHeader.Type", Field, 0},
    -		{"FileHeader.Version", Field, 0},
    -		{"FormatError", Type, 0},
    -		{"Header32", Type, 0},
    -		{"Header32.Ehsize", Field, 0},
    -		{"Header32.Entry", Field, 0},
    -		{"Header32.Flags", Field, 0},
    -		{"Header32.Ident", Field, 0},
    -		{"Header32.Machine", Field, 0},
    -		{"Header32.Phentsize", Field, 0},
    -		{"Header32.Phnum", Field, 0},
    -		{"Header32.Phoff", Field, 0},
    -		{"Header32.Shentsize", Field, 0},
    -		{"Header32.Shnum", Field, 0},
    -		{"Header32.Shoff", Field, 0},
    -		{"Header32.Shstrndx", Field, 0},
    -		{"Header32.Type", Field, 0},
    -		{"Header32.Version", Field, 0},
    -		{"Header64", Type, 0},
    -		{"Header64.Ehsize", Field, 0},
    -		{"Header64.Entry", Field, 0},
    -		{"Header64.Flags", Field, 0},
    -		{"Header64.Ident", Field, 0},
    -		{"Header64.Machine", Field, 0},
    -		{"Header64.Phentsize", Field, 0},
    -		{"Header64.Phnum", Field, 0},
    -		{"Header64.Phoff", Field, 0},
    -		{"Header64.Shentsize", Field, 0},
    -		{"Header64.Shnum", Field, 0},
    -		{"Header64.Shoff", Field, 0},
    -		{"Header64.Shstrndx", Field, 0},
    -		{"Header64.Type", Field, 0},
    -		{"Header64.Version", Field, 0},
    -		{"ImportedSymbol", Type, 0},
    -		{"ImportedSymbol.Library", Field, 0},
    -		{"ImportedSymbol.Name", Field, 0},
    -		{"ImportedSymbol.Version", Field, 0},
    -		{"Machine", Type, 0},
    -		{"NT_FPREGSET", Const, 0},
    -		{"NT_PRPSINFO", Const, 0},
    -		{"NT_PRSTATUS", Const, 0},
    -		{"NType", Type, 0},
    -		{"NewFile", Func, 0},
    -		{"OSABI", Type, 0},
    -		{"Open", Func, 0},
    -		{"PF_MASKOS", Const, 0},
    -		{"PF_MASKPROC", Const, 0},
    -		{"PF_R", Const, 0},
    -		{"PF_W", Const, 0},
    -		{"PF_X", Const, 0},
    -		{"PT_AARCH64_ARCHEXT", Const, 16},
    -		{"PT_AARCH64_UNWIND", Const, 16},
    -		{"PT_ARM_ARCHEXT", Const, 16},
    -		{"PT_ARM_EXIDX", Const, 16},
    -		{"PT_DYNAMIC", Const, 0},
    -		{"PT_GNU_EH_FRAME", Const, 16},
    -		{"PT_GNU_MBIND_HI", Const, 16},
    -		{"PT_GNU_MBIND_LO", Const, 16},
    -		{"PT_GNU_PROPERTY", Const, 16},
    -		{"PT_GNU_RELRO", Const, 16},
    -		{"PT_GNU_STACK", Const, 16},
    -		{"PT_HIOS", Const, 0},
    -		{"PT_HIPROC", Const, 0},
    -		{"PT_INTERP", Const, 0},
    -		{"PT_LOAD", Const, 0},
    -		{"PT_LOOS", Const, 0},
    -		{"PT_LOPROC", Const, 0},
    -		{"PT_MIPS_ABIFLAGS", Const, 16},
    -		{"PT_MIPS_OPTIONS", Const, 16},
    -		{"PT_MIPS_REGINFO", Const, 16},
    -		{"PT_MIPS_RTPROC", Const, 16},
    -		{"PT_NOTE", Const, 0},
    -		{"PT_NULL", Const, 0},
    -		{"PT_OPENBSD_BOOTDATA", Const, 16},
    -		{"PT_OPENBSD_NOBTCFI", Const, 23},
    -		{"PT_OPENBSD_RANDOMIZE", Const, 16},
    -		{"PT_OPENBSD_WXNEEDED", Const, 16},
    -		{"PT_PAX_FLAGS", Const, 16},
    -		{"PT_PHDR", Const, 0},
    -		{"PT_S390_PGSTE", Const, 16},
    -		{"PT_SHLIB", Const, 0},
    -		{"PT_SUNWSTACK", Const, 16},
    -		{"PT_SUNW_EH_FRAME", Const, 16},
    -		{"PT_TLS", Const, 0},
    -		{"Prog", Type, 0},
    -		{"Prog.ProgHeader", Field, 0},
    -		{"Prog.ReaderAt", Field, 0},
    -		{"Prog32", Type, 0},
    -		{"Prog32.Align", Field, 0},
    -		{"Prog32.Filesz", Field, 0},
    -		{"Prog32.Flags", Field, 0},
    -		{"Prog32.Memsz", Field, 0},
    -		{"Prog32.Off", Field, 0},
    -		{"Prog32.Paddr", Field, 0},
    -		{"Prog32.Type", Field, 0},
    -		{"Prog32.Vaddr", Field, 0},
    -		{"Prog64", Type, 0},
    -		{"Prog64.Align", Field, 0},
    -		{"Prog64.Filesz", Field, 0},
    -		{"Prog64.Flags", Field, 0},
    -		{"Prog64.Memsz", Field, 0},
    -		{"Prog64.Off", Field, 0},
    -		{"Prog64.Paddr", Field, 0},
    -		{"Prog64.Type", Field, 0},
    -		{"Prog64.Vaddr", Field, 0},
    -		{"ProgFlag", Type, 0},
    -		{"ProgHeader", Type, 0},
    -		{"ProgHeader.Align", Field, 0},
    -		{"ProgHeader.Filesz", Field, 0},
    -		{"ProgHeader.Flags", Field, 0},
    -		{"ProgHeader.Memsz", Field, 0},
    -		{"ProgHeader.Off", Field, 0},
    -		{"ProgHeader.Paddr", Field, 0},
    -		{"ProgHeader.Type", Field, 0},
    -		{"ProgHeader.Vaddr", Field, 0},
    -		{"ProgType", Type, 0},
    -		{"R_386", Type, 0},
    -		{"R_386_16", Const, 10},
    -		{"R_386_32", Const, 0},
    -		{"R_386_32PLT", Const, 10},
    -		{"R_386_8", Const, 10},
    -		{"R_386_COPY", Const, 0},
    -		{"R_386_GLOB_DAT", Const, 0},
    -		{"R_386_GOT32", Const, 0},
    -		{"R_386_GOT32X", Const, 10},
    -		{"R_386_GOTOFF", Const, 0},
    -		{"R_386_GOTPC", Const, 0},
    -		{"R_386_IRELATIVE", Const, 10},
    -		{"R_386_JMP_SLOT", Const, 0},
    -		{"R_386_NONE", Const, 0},
    -		{"R_386_PC16", Const, 10},
    -		{"R_386_PC32", Const, 0},
    -		{"R_386_PC8", Const, 10},
    -		{"R_386_PLT32", Const, 0},
    -		{"R_386_RELATIVE", Const, 0},
    -		{"R_386_SIZE32", Const, 10},
    -		{"R_386_TLS_DESC", Const, 10},
    -		{"R_386_TLS_DESC_CALL", Const, 10},
    -		{"R_386_TLS_DTPMOD32", Const, 0},
    -		{"R_386_TLS_DTPOFF32", Const, 0},
    -		{"R_386_TLS_GD", Const, 0},
    -		{"R_386_TLS_GD_32", Const, 0},
    -		{"R_386_TLS_GD_CALL", Const, 0},
    -		{"R_386_TLS_GD_POP", Const, 0},
    -		{"R_386_TLS_GD_PUSH", Const, 0},
    -		{"R_386_TLS_GOTDESC", Const, 10},
    -		{"R_386_TLS_GOTIE", Const, 0},
    -		{"R_386_TLS_IE", Const, 0},
    -		{"R_386_TLS_IE_32", Const, 0},
    -		{"R_386_TLS_LDM", Const, 0},
    -		{"R_386_TLS_LDM_32", Const, 0},
    -		{"R_386_TLS_LDM_CALL", Const, 0},
    -		{"R_386_TLS_LDM_POP", Const, 0},
    -		{"R_386_TLS_LDM_PUSH", Const, 0},
    -		{"R_386_TLS_LDO_32", Const, 0},
    -		{"R_386_TLS_LE", Const, 0},
    -		{"R_386_TLS_LE_32", Const, 0},
    -		{"R_386_TLS_TPOFF", Const, 0},
    -		{"R_386_TLS_TPOFF32", Const, 0},
    -		{"R_390", Type, 7},
    -		{"R_390_12", Const, 7},
    -		{"R_390_16", Const, 7},
    -		{"R_390_20", Const, 7},
    -		{"R_390_32", Const, 7},
    -		{"R_390_64", Const, 7},
    -		{"R_390_8", Const, 7},
    -		{"R_390_COPY", Const, 7},
    -		{"R_390_GLOB_DAT", Const, 7},
    -		{"R_390_GOT12", Const, 7},
    -		{"R_390_GOT16", Const, 7},
    -		{"R_390_GOT20", Const, 7},
    -		{"R_390_GOT32", Const, 7},
    -		{"R_390_GOT64", Const, 7},
    -		{"R_390_GOTENT", Const, 7},
    -		{"R_390_GOTOFF", Const, 7},
    -		{"R_390_GOTOFF16", Const, 7},
    -		{"R_390_GOTOFF64", Const, 7},
    -		{"R_390_GOTPC", Const, 7},
    -		{"R_390_GOTPCDBL", Const, 7},
    -		{"R_390_GOTPLT12", Const, 7},
    -		{"R_390_GOTPLT16", Const, 7},
    -		{"R_390_GOTPLT20", Const, 7},
    -		{"R_390_GOTPLT32", Const, 7},
    -		{"R_390_GOTPLT64", Const, 7},
    -		{"R_390_GOTPLTENT", Const, 7},
    -		{"R_390_GOTPLTOFF16", Const, 7},
    -		{"R_390_GOTPLTOFF32", Const, 7},
    -		{"R_390_GOTPLTOFF64", Const, 7},
    -		{"R_390_JMP_SLOT", Const, 7},
    -		{"R_390_NONE", Const, 7},
    -		{"R_390_PC16", Const, 7},
    -		{"R_390_PC16DBL", Const, 7},
    -		{"R_390_PC32", Const, 7},
    -		{"R_390_PC32DBL", Const, 7},
    -		{"R_390_PC64", Const, 7},
    -		{"R_390_PLT16DBL", Const, 7},
    -		{"R_390_PLT32", Const, 7},
    -		{"R_390_PLT32DBL", Const, 7},
    -		{"R_390_PLT64", Const, 7},
    -		{"R_390_RELATIVE", Const, 7},
    -		{"R_390_TLS_DTPMOD", Const, 7},
    -		{"R_390_TLS_DTPOFF", Const, 7},
    -		{"R_390_TLS_GD32", Const, 7},
    -		{"R_390_TLS_GD64", Const, 7},
    -		{"R_390_TLS_GDCALL", Const, 7},
    -		{"R_390_TLS_GOTIE12", Const, 7},
    -		{"R_390_TLS_GOTIE20", Const, 7},
    -		{"R_390_TLS_GOTIE32", Const, 7},
    -		{"R_390_TLS_GOTIE64", Const, 7},
    -		{"R_390_TLS_IE32", Const, 7},
    -		{"R_390_TLS_IE64", Const, 7},
    -		{"R_390_TLS_IEENT", Const, 7},
    -		{"R_390_TLS_LDCALL", Const, 7},
    -		{"R_390_TLS_LDM32", Const, 7},
    -		{"R_390_TLS_LDM64", Const, 7},
    -		{"R_390_TLS_LDO32", Const, 7},
    -		{"R_390_TLS_LDO64", Const, 7},
    -		{"R_390_TLS_LE32", Const, 7},
    -		{"R_390_TLS_LE64", Const, 7},
    -		{"R_390_TLS_LOAD", Const, 7},
    -		{"R_390_TLS_TPOFF", Const, 7},
    -		{"R_AARCH64", Type, 4},
    -		{"R_AARCH64_ABS16", Const, 4},
    -		{"R_AARCH64_ABS32", Const, 4},
    -		{"R_AARCH64_ABS64", Const, 4},
    -		{"R_AARCH64_ADD_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_ADR_GOT_PAGE", Const, 4},
    -		{"R_AARCH64_ADR_PREL_LO21", Const, 4},
    -		{"R_AARCH64_ADR_PREL_PG_HI21", Const, 4},
    -		{"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4},
    -		{"R_AARCH64_CALL26", Const, 4},
    -		{"R_AARCH64_CONDBR19", Const, 4},
    -		{"R_AARCH64_COPY", Const, 4},
    -		{"R_AARCH64_GLOB_DAT", Const, 4},
    -		{"R_AARCH64_GOT_LD_PREL19", Const, 4},
    -		{"R_AARCH64_IRELATIVE", Const, 4},
    -		{"R_AARCH64_JUMP26", Const, 4},
    -		{"R_AARCH64_JUMP_SLOT", Const, 4},
    -		{"R_AARCH64_LD64_GOTOFF_LO15", Const, 10},
    -		{"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10},
    -		{"R_AARCH64_LD64_GOT_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_LD_PREL_LO19", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G0", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G1", Const, 4},
    -		{"R_AARCH64_MOVW_SABS_G2", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G0", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G0_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G1", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G1_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G2", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G2_NC", Const, 4},
    -		{"R_AARCH64_MOVW_UABS_G3", Const, 4},
    -		{"R_AARCH64_NONE", Const, 4},
    -		{"R_AARCH64_NULL", Const, 4},
    -		{"R_AARCH64_P32_ABS16", Const, 4},
    -		{"R_AARCH64_P32_ABS32", Const, 4},
    -		{"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4},
    -		{"R_AARCH64_P32_ADR_PREL_LO21", Const, 4},
    -		{"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4},
    -		{"R_AARCH64_P32_CALL26", Const, 4},
    -		{"R_AARCH64_P32_CONDBR19", Const, 4},
    -		{"R_AARCH64_P32_COPY", Const, 4},
    -		{"R_AARCH64_P32_GLOB_DAT", Const, 4},
    -		{"R_AARCH64_P32_GOT_LD_PREL19", Const, 4},
    -		{"R_AARCH64_P32_IRELATIVE", Const, 4},
    -		{"R_AARCH64_P32_JUMP26", Const, 4},
    -		{"R_AARCH64_P32_JUMP_SLOT", Const, 4},
    -		{"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_LD_PREL_LO19", Const, 4},
    -		{"R_AARCH64_P32_MOVW_SABS_G0", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G0", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4},
    -		{"R_AARCH64_P32_MOVW_UABS_G1", Const, 4},
    -		{"R_AARCH64_P32_PREL16", Const, 4},
    -		{"R_AARCH64_P32_PREL32", Const, 4},
    -		{"R_AARCH64_P32_RELATIVE", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_CALL", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4},
    -		{"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4},
    -		{"R_AARCH64_P32_TLS_DTPMOD", Const, 4},
    -		{"R_AARCH64_P32_TLS_DTPREL", Const, 4},
    -		{"R_AARCH64_P32_TLS_TPREL", Const, 4},
    -		{"R_AARCH64_P32_TSTBR14", Const, 4},
    -		{"R_AARCH64_PREL16", Const, 4},
    -		{"R_AARCH64_PREL32", Const, 4},
    -		{"R_AARCH64_PREL64", Const, 4},
    -		{"R_AARCH64_RELATIVE", Const, 4},
    -		{"R_AARCH64_TLSDESC", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADD", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4},
    -		{"R_AARCH64_TLSDESC_CALL", Const, 4},
    -		{"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_LDR", Const, 4},
    -		{"R_AARCH64_TLSDESC_LD_PREL19", Const, 4},
    -		{"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSDESC_OFF_G1", Const, 4},
    -		{"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSGD_ADR_PREL21", Const, 10},
    -		{"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10},
    -		{"R_AARCH64_TLSGD_MOVW_G1", Const, 10},
    -		{"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4},
    -		{"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4},
    -		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4},
    -		{"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10},
    -		{"R_AARCH64_TLSLD_ADR_PREL21", Const, 10},
    -		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10},
    -		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4},
    -		{"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10},
    -		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4},
    -		{"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4},
    -		{"R_AARCH64_TLS_DTPMOD64", Const, 4},
    -		{"R_AARCH64_TLS_DTPREL64", Const, 4},
    -		{"R_AARCH64_TLS_TPREL64", Const, 4},
    -		{"R_AARCH64_TSTBR14", Const, 4},
    -		{"R_ALPHA", Type, 0},
    -		{"R_ALPHA_BRADDR", Const, 0},
    -		{"R_ALPHA_COPY", Const, 0},
    -		{"R_ALPHA_GLOB_DAT", Const, 0},
    -		{"R_ALPHA_GPDISP", Const, 0},
    -		{"R_ALPHA_GPREL32", Const, 0},
    -		{"R_ALPHA_GPRELHIGH", Const, 0},
    -		{"R_ALPHA_GPRELLOW", Const, 0},
    -		{"R_ALPHA_GPVALUE", Const, 0},
    -		{"R_ALPHA_HINT", Const, 0},
    -		{"R_ALPHA_IMMED_BR_HI32", Const, 0},
    -		{"R_ALPHA_IMMED_GP_16", Const, 0},
    -		{"R_ALPHA_IMMED_GP_HI32", Const, 0},
    -		{"R_ALPHA_IMMED_LO32", Const, 0},
    -		{"R_ALPHA_IMMED_SCN_HI32", Const, 0},
    -		{"R_ALPHA_JMP_SLOT", Const, 0},
    -		{"R_ALPHA_LITERAL", Const, 0},
    -		{"R_ALPHA_LITUSE", Const, 0},
    -		{"R_ALPHA_NONE", Const, 0},
    -		{"R_ALPHA_OP_PRSHIFT", Const, 0},
    -		{"R_ALPHA_OP_PSUB", Const, 0},
    -		{"R_ALPHA_OP_PUSH", Const, 0},
    -		{"R_ALPHA_OP_STORE", Const, 0},
    -		{"R_ALPHA_REFLONG", Const, 0},
    -		{"R_ALPHA_REFQUAD", Const, 0},
    -		{"R_ALPHA_RELATIVE", Const, 0},
    -		{"R_ALPHA_SREL16", Const, 0},
    -		{"R_ALPHA_SREL32", Const, 0},
    -		{"R_ALPHA_SREL64", Const, 0},
    -		{"R_ARM", Type, 0},
    -		{"R_ARM_ABS12", Const, 0},
    -		{"R_ARM_ABS16", Const, 0},
    -		{"R_ARM_ABS32", Const, 0},
    -		{"R_ARM_ABS32_NOI", Const, 10},
    -		{"R_ARM_ABS8", Const, 0},
    -		{"R_ARM_ALU_PCREL_15_8", Const, 10},
    -		{"R_ARM_ALU_PCREL_23_15", Const, 10},
    -		{"R_ARM_ALU_PCREL_7_0", Const, 10},
    -		{"R_ARM_ALU_PC_G0", Const, 10},
    -		{"R_ARM_ALU_PC_G0_NC", Const, 10},
    -		{"R_ARM_ALU_PC_G1", Const, 10},
    -		{"R_ARM_ALU_PC_G1_NC", Const, 10},
    -		{"R_ARM_ALU_PC_G2", Const, 10},
    -		{"R_ARM_ALU_SBREL_19_12_NC", Const, 10},
    -		{"R_ARM_ALU_SBREL_27_20_CK", Const, 10},
    -		{"R_ARM_ALU_SB_G0", Const, 10},
    -		{"R_ARM_ALU_SB_G0_NC", Const, 10},
    -		{"R_ARM_ALU_SB_G1", Const, 10},
    -		{"R_ARM_ALU_SB_G1_NC", Const, 10},
    -		{"R_ARM_ALU_SB_G2", Const, 10},
    -		{"R_ARM_AMP_VCALL9", Const, 0},
    -		{"R_ARM_BASE_ABS", Const, 10},
    -		{"R_ARM_CALL", Const, 10},
    -		{"R_ARM_COPY", Const, 0},
    -		{"R_ARM_GLOB_DAT", Const, 0},
    -		{"R_ARM_GNU_VTENTRY", Const, 0},
    -		{"R_ARM_GNU_VTINHERIT", Const, 0},
    -		{"R_ARM_GOT32", Const, 0},
    -		{"R_ARM_GOTOFF", Const, 0},
    -		{"R_ARM_GOTOFF12", Const, 10},
    -		{"R_ARM_GOTPC", Const, 0},
    -		{"R_ARM_GOTRELAX", Const, 10},
    -		{"R_ARM_GOT_ABS", Const, 10},
    -		{"R_ARM_GOT_BREL12", Const, 10},
    -		{"R_ARM_GOT_PREL", Const, 10},
    -		{"R_ARM_IRELATIVE", Const, 10},
    -		{"R_ARM_JUMP24", Const, 10},
    -		{"R_ARM_JUMP_SLOT", Const, 0},
    -		{"R_ARM_LDC_PC_G0", Const, 10},
    -		{"R_ARM_LDC_PC_G1", Const, 10},
    -		{"R_ARM_LDC_PC_G2", Const, 10},
    -		{"R_ARM_LDC_SB_G0", Const, 10},
    -		{"R_ARM_LDC_SB_G1", Const, 10},
    -		{"R_ARM_LDC_SB_G2", Const, 10},
    -		{"R_ARM_LDRS_PC_G0", Const, 10},
    -		{"R_ARM_LDRS_PC_G1", Const, 10},
    -		{"R_ARM_LDRS_PC_G2", Const, 10},
    -		{"R_ARM_LDRS_SB_G0", Const, 10},
    -		{"R_ARM_LDRS_SB_G1", Const, 10},
    -		{"R_ARM_LDRS_SB_G2", Const, 10},
    -		{"R_ARM_LDR_PC_G1", Const, 10},
    -		{"R_ARM_LDR_PC_G2", Const, 10},
    -		{"R_ARM_LDR_SBREL_11_10_NC", Const, 10},
    -		{"R_ARM_LDR_SB_G0", Const, 10},
    -		{"R_ARM_LDR_SB_G1", Const, 10},
    -		{"R_ARM_LDR_SB_G2", Const, 10},
    -		{"R_ARM_ME_TOO", Const, 10},
    -		{"R_ARM_MOVT_ABS", Const, 10},
    -		{"R_ARM_MOVT_BREL", Const, 10},
    -		{"R_ARM_MOVT_PREL", Const, 10},
    -		{"R_ARM_MOVW_ABS_NC", Const, 10},
    -		{"R_ARM_MOVW_BREL", Const, 10},
    -		{"R_ARM_MOVW_BREL_NC", Const, 10},
    -		{"R_ARM_MOVW_PREL_NC", Const, 10},
    -		{"R_ARM_NONE", Const, 0},
    -		{"R_ARM_PC13", Const, 0},
    -		{"R_ARM_PC24", Const, 0},
    -		{"R_ARM_PLT32", Const, 0},
    -		{"R_ARM_PLT32_ABS", Const, 10},
    -		{"R_ARM_PREL31", Const, 10},
    -		{"R_ARM_PRIVATE_0", Const, 10},
    -		{"R_ARM_PRIVATE_1", Const, 10},
    -		{"R_ARM_PRIVATE_10", Const, 10},
    -		{"R_ARM_PRIVATE_11", Const, 10},
    -		{"R_ARM_PRIVATE_12", Const, 10},
    -		{"R_ARM_PRIVATE_13", Const, 10},
    -		{"R_ARM_PRIVATE_14", Const, 10},
    -		{"R_ARM_PRIVATE_15", Const, 10},
    -		{"R_ARM_PRIVATE_2", Const, 10},
    -		{"R_ARM_PRIVATE_3", Const, 10},
    -		{"R_ARM_PRIVATE_4", Const, 10},
    -		{"R_ARM_PRIVATE_5", Const, 10},
    -		{"R_ARM_PRIVATE_6", Const, 10},
    -		{"R_ARM_PRIVATE_7", Const, 10},
    -		{"R_ARM_PRIVATE_8", Const, 10},
    -		{"R_ARM_PRIVATE_9", Const, 10},
    -		{"R_ARM_RABS32", Const, 0},
    -		{"R_ARM_RBASE", Const, 0},
    -		{"R_ARM_REL32", Const, 0},
    -		{"R_ARM_REL32_NOI", Const, 10},
    -		{"R_ARM_RELATIVE", Const, 0},
    -		{"R_ARM_RPC24", Const, 0},
    -		{"R_ARM_RREL32", Const, 0},
    -		{"R_ARM_RSBREL32", Const, 0},
    -		{"R_ARM_RXPC25", Const, 10},
    -		{"R_ARM_SBREL31", Const, 10},
    -		{"R_ARM_SBREL32", Const, 0},
    -		{"R_ARM_SWI24", Const, 0},
    -		{"R_ARM_TARGET1", Const, 10},
    -		{"R_ARM_TARGET2", Const, 10},
    -		{"R_ARM_THM_ABS5", Const, 0},
    -		{"R_ARM_THM_ALU_ABS_G0_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G1_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G2_NC", Const, 10},
    -		{"R_ARM_THM_ALU_ABS_G3", Const, 10},
    -		{"R_ARM_THM_ALU_PREL_11_0", Const, 10},
    -		{"R_ARM_THM_GOT_BREL12", Const, 10},
    -		{"R_ARM_THM_JUMP11", Const, 10},
    -		{"R_ARM_THM_JUMP19", Const, 10},
    -		{"R_ARM_THM_JUMP24", Const, 10},
    -		{"R_ARM_THM_JUMP6", Const, 10},
    -		{"R_ARM_THM_JUMP8", Const, 10},
    -		{"R_ARM_THM_MOVT_ABS", Const, 10},
    -		{"R_ARM_THM_MOVT_BREL", Const, 10},
    -		{"R_ARM_THM_MOVT_PREL", Const, 10},
    -		{"R_ARM_THM_MOVW_ABS_NC", Const, 10},
    -		{"R_ARM_THM_MOVW_BREL", Const, 10},
    -		{"R_ARM_THM_MOVW_BREL_NC", Const, 10},
    -		{"R_ARM_THM_MOVW_PREL_NC", Const, 10},
    -		{"R_ARM_THM_PC12", Const, 10},
    -		{"R_ARM_THM_PC22", Const, 0},
    -		{"R_ARM_THM_PC8", Const, 0},
    -		{"R_ARM_THM_RPC22", Const, 0},
    -		{"R_ARM_THM_SWI8", Const, 0},
    -		{"R_ARM_THM_TLS_CALL", Const, 10},
    -		{"R_ARM_THM_TLS_DESCSEQ16", Const, 10},
    -		{"R_ARM_THM_TLS_DESCSEQ32", Const, 10},
    -		{"R_ARM_THM_XPC22", Const, 0},
    -		{"R_ARM_TLS_CALL", Const, 10},
    -		{"R_ARM_TLS_DESCSEQ", Const, 10},
    -		{"R_ARM_TLS_DTPMOD32", Const, 10},
    -		{"R_ARM_TLS_DTPOFF32", Const, 10},
    -		{"R_ARM_TLS_GD32", Const, 10},
    -		{"R_ARM_TLS_GOTDESC", Const, 10},
    -		{"R_ARM_TLS_IE12GP", Const, 10},
    -		{"R_ARM_TLS_IE32", Const, 10},
    -		{"R_ARM_TLS_LDM32", Const, 10},
    -		{"R_ARM_TLS_LDO12", Const, 10},
    -		{"R_ARM_TLS_LDO32", Const, 10},
    -		{"R_ARM_TLS_LE12", Const, 10},
    -		{"R_ARM_TLS_LE32", Const, 10},
    -		{"R_ARM_TLS_TPOFF32", Const, 10},
    -		{"R_ARM_V4BX", Const, 10},
    -		{"R_ARM_XPC25", Const, 0},
    -		{"R_INFO", Func, 0},
    -		{"R_INFO32", Func, 0},
    -		{"R_LARCH", Type, 19},
    -		{"R_LARCH_32", Const, 19},
    -		{"R_LARCH_32_PCREL", Const, 20},
    -		{"R_LARCH_64", Const, 19},
    -		{"R_LARCH_64_PCREL", Const, 22},
    -		{"R_LARCH_ABS64_HI12", Const, 20},
    -		{"R_LARCH_ABS64_LO20", Const, 20},
    -		{"R_LARCH_ABS_HI20", Const, 20},
    -		{"R_LARCH_ABS_LO12", Const, 20},
    -		{"R_LARCH_ADD16", Const, 19},
    -		{"R_LARCH_ADD24", Const, 19},
    -		{"R_LARCH_ADD32", Const, 19},
    -		{"R_LARCH_ADD6", Const, 22},
    -		{"R_LARCH_ADD64", Const, 19},
    -		{"R_LARCH_ADD8", Const, 19},
    -		{"R_LARCH_ADD_ULEB128", Const, 22},
    -		{"R_LARCH_ALIGN", Const, 22},
    -		{"R_LARCH_B16", Const, 20},
    -		{"R_LARCH_B21", Const, 20},
    -		{"R_LARCH_B26", Const, 20},
    -		{"R_LARCH_CFA", Const, 22},
    -		{"R_LARCH_COPY", Const, 19},
    -		{"R_LARCH_DELETE", Const, 22},
    -		{"R_LARCH_GNU_VTENTRY", Const, 20},
    -		{"R_LARCH_GNU_VTINHERIT", Const, 20},
    -		{"R_LARCH_GOT64_HI12", Const, 20},
    -		{"R_LARCH_GOT64_LO20", Const, 20},
    -		{"R_LARCH_GOT64_PC_HI12", Const, 20},
    -		{"R_LARCH_GOT64_PC_LO20", Const, 20},
    -		{"R_LARCH_GOT_HI20", Const, 20},
    -		{"R_LARCH_GOT_LO12", Const, 20},
    -		{"R_LARCH_GOT_PC_HI20", Const, 20},
    -		{"R_LARCH_GOT_PC_LO12", Const, 20},
    -		{"R_LARCH_IRELATIVE", Const, 19},
    -		{"R_LARCH_JUMP_SLOT", Const, 19},
    -		{"R_LARCH_MARK_LA", Const, 19},
    -		{"R_LARCH_MARK_PCREL", Const, 19},
    -		{"R_LARCH_NONE", Const, 19},
    -		{"R_LARCH_PCALA64_HI12", Const, 20},
    -		{"R_LARCH_PCALA64_LO20", Const, 20},
    -		{"R_LARCH_PCALA_HI20", Const, 20},
    -		{"R_LARCH_PCALA_LO12", Const, 20},
    -		{"R_LARCH_PCREL20_S2", Const, 22},
    -		{"R_LARCH_RELATIVE", Const, 19},
    -		{"R_LARCH_RELAX", Const, 20},
    -		{"R_LARCH_SOP_ADD", Const, 19},
    -		{"R_LARCH_SOP_AND", Const, 19},
    -		{"R_LARCH_SOP_ASSERT", Const, 19},
    -		{"R_LARCH_SOP_IF_ELSE", Const, 19},
    -		{"R_LARCH_SOP_NOT", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_12", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_16", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_10_5", Const, 19},
    -		{"R_LARCH_SOP_POP_32_S_5_20", Const, 19},
    -		{"R_LARCH_SOP_POP_32_U", Const, 19},
    -		{"R_LARCH_SOP_POP_32_U_10_12", Const, 19},
    -		{"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19},
    -		{"R_LARCH_SOP_PUSH_DUP", Const, 19},
    -		{"R_LARCH_SOP_PUSH_GPREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_PCREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_GD", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19},
    -		{"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19},
    -		{"R_LARCH_SOP_SL", Const, 19},
    -		{"R_LARCH_SOP_SR", Const, 19},
    -		{"R_LARCH_SOP_SUB", Const, 19},
    -		{"R_LARCH_SUB16", Const, 19},
    -		{"R_LARCH_SUB24", Const, 19},
    -		{"R_LARCH_SUB32", Const, 19},
    -		{"R_LARCH_SUB6", Const, 22},
    -		{"R_LARCH_SUB64", Const, 19},
    -		{"R_LARCH_SUB8", Const, 19},
    -		{"R_LARCH_SUB_ULEB128", Const, 22},
    -		{"R_LARCH_TLS_DTPMOD32", Const, 19},
    -		{"R_LARCH_TLS_DTPMOD64", Const, 19},
    -		{"R_LARCH_TLS_DTPREL32", Const, 19},
    -		{"R_LARCH_TLS_DTPREL64", Const, 19},
    -		{"R_LARCH_TLS_GD_HI20", Const, 20},
    -		{"R_LARCH_TLS_GD_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE64_HI12", Const, 20},
    -		{"R_LARCH_TLS_IE64_LO20", Const, 20},
    -		{"R_LARCH_TLS_IE64_PC_HI12", Const, 20},
    -		{"R_LARCH_TLS_IE64_PC_LO20", Const, 20},
    -		{"R_LARCH_TLS_IE_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE_LO12", Const, 20},
    -		{"R_LARCH_TLS_IE_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_IE_PC_LO12", Const, 20},
    -		{"R_LARCH_TLS_LD_HI20", Const, 20},
    -		{"R_LARCH_TLS_LD_PC_HI20", Const, 20},
    -		{"R_LARCH_TLS_LE64_HI12", Const, 20},
    -		{"R_LARCH_TLS_LE64_LO20", Const, 20},
    -		{"R_LARCH_TLS_LE_HI20", Const, 20},
    -		{"R_LARCH_TLS_LE_LO12", Const, 20},
    -		{"R_LARCH_TLS_TPREL32", Const, 19},
    -		{"R_LARCH_TLS_TPREL64", Const, 19},
    -		{"R_MIPS", Type, 6},
    -		{"R_MIPS_16", Const, 6},
    -		{"R_MIPS_26", Const, 6},
    -		{"R_MIPS_32", Const, 6},
    -		{"R_MIPS_64", Const, 6},
    -		{"R_MIPS_ADD_IMMEDIATE", Const, 6},
    -		{"R_MIPS_CALL16", Const, 6},
    -		{"R_MIPS_CALL_HI16", Const, 6},
    -		{"R_MIPS_CALL_LO16", Const, 6},
    -		{"R_MIPS_DELETE", Const, 6},
    -		{"R_MIPS_GOT16", Const, 6},
    -		{"R_MIPS_GOT_DISP", Const, 6},
    -		{"R_MIPS_GOT_HI16", Const, 6},
    -		{"R_MIPS_GOT_LO16", Const, 6},
    -		{"R_MIPS_GOT_OFST", Const, 6},
    -		{"R_MIPS_GOT_PAGE", Const, 6},
    -		{"R_MIPS_GPREL16", Const, 6},
    -		{"R_MIPS_GPREL32", Const, 6},
    -		{"R_MIPS_HI16", Const, 6},
    -		{"R_MIPS_HIGHER", Const, 6},
    -		{"R_MIPS_HIGHEST", Const, 6},
    -		{"R_MIPS_INSERT_A", Const, 6},
    -		{"R_MIPS_INSERT_B", Const, 6},
    -		{"R_MIPS_JALR", Const, 6},
    -		{"R_MIPS_LITERAL", Const, 6},
    -		{"R_MIPS_LO16", Const, 6},
    -		{"R_MIPS_NONE", Const, 6},
    -		{"R_MIPS_PC16", Const, 6},
    -		{"R_MIPS_PC32", Const, 22},
    -		{"R_MIPS_PJUMP", Const, 6},
    -		{"R_MIPS_REL16", Const, 6},
    -		{"R_MIPS_REL32", Const, 6},
    -		{"R_MIPS_RELGOT", Const, 6},
    -		{"R_MIPS_SCN_DISP", Const, 6},
    -		{"R_MIPS_SHIFT5", Const, 6},
    -		{"R_MIPS_SHIFT6", Const, 6},
    -		{"R_MIPS_SUB", Const, 6},
    -		{"R_MIPS_TLS_DTPMOD32", Const, 6},
    -		{"R_MIPS_TLS_DTPMOD64", Const, 6},
    -		{"R_MIPS_TLS_DTPREL32", Const, 6},
    -		{"R_MIPS_TLS_DTPREL64", Const, 6},
    -		{"R_MIPS_TLS_DTPREL_HI16", Const, 6},
    -		{"R_MIPS_TLS_DTPREL_LO16", Const, 6},
    -		{"R_MIPS_TLS_GD", Const, 6},
    -		{"R_MIPS_TLS_GOTTPREL", Const, 6},
    -		{"R_MIPS_TLS_LDM", Const, 6},
    -		{"R_MIPS_TLS_TPREL32", Const, 6},
    -		{"R_MIPS_TLS_TPREL64", Const, 6},
    -		{"R_MIPS_TLS_TPREL_HI16", Const, 6},
    -		{"R_MIPS_TLS_TPREL_LO16", Const, 6},
    -		{"R_PPC", Type, 0},
    -		{"R_PPC64", Type, 5},
    -		{"R_PPC64_ADDR14", Const, 5},
    -		{"R_PPC64_ADDR14_BRNTAKEN", Const, 5},
    -		{"R_PPC64_ADDR14_BRTAKEN", Const, 5},
    -		{"R_PPC64_ADDR16", Const, 5},
    -		{"R_PPC64_ADDR16_DS", Const, 5},
    -		{"R_PPC64_ADDR16_HA", Const, 5},
    -		{"R_PPC64_ADDR16_HI", Const, 5},
    -		{"R_PPC64_ADDR16_HIGH", Const, 10},
    -		{"R_PPC64_ADDR16_HIGHA", Const, 10},
    -		{"R_PPC64_ADDR16_HIGHER", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHER34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHERA", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHERA34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHEST", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHEST34", Const, 20},
    -		{"R_PPC64_ADDR16_HIGHESTA", Const, 5},
    -		{"R_PPC64_ADDR16_HIGHESTA34", Const, 20},
    -		{"R_PPC64_ADDR16_LO", Const, 5},
    -		{"R_PPC64_ADDR16_LO_DS", Const, 5},
    -		{"R_PPC64_ADDR24", Const, 5},
    -		{"R_PPC64_ADDR32", Const, 5},
    -		{"R_PPC64_ADDR64", Const, 5},
    -		{"R_PPC64_ADDR64_LOCAL", Const, 10},
    -		{"R_PPC64_COPY", Const, 20},
    -		{"R_PPC64_D28", Const, 20},
    -		{"R_PPC64_D34", Const, 20},
    -		{"R_PPC64_D34_HA30", Const, 20},
    -		{"R_PPC64_D34_HI30", Const, 20},
    -		{"R_PPC64_D34_LO", Const, 20},
    -		{"R_PPC64_DTPMOD64", Const, 5},
    -		{"R_PPC64_DTPREL16", Const, 5},
    -		{"R_PPC64_DTPREL16_DS", Const, 5},
    -		{"R_PPC64_DTPREL16_HA", Const, 5},
    -		{"R_PPC64_DTPREL16_HI", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGH", Const, 10},
    -		{"R_PPC64_DTPREL16_HIGHA", Const, 10},
    -		{"R_PPC64_DTPREL16_HIGHER", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHERA", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHEST", Const, 5},
    -		{"R_PPC64_DTPREL16_HIGHESTA", Const, 5},
    -		{"R_PPC64_DTPREL16_LO", Const, 5},
    -		{"R_PPC64_DTPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_DTPREL34", Const, 20},
    -		{"R_PPC64_DTPREL64", Const, 5},
    -		{"R_PPC64_ENTRY", Const, 10},
    -		{"R_PPC64_GLOB_DAT", Const, 20},
    -		{"R_PPC64_GNU_VTENTRY", Const, 20},
    -		{"R_PPC64_GNU_VTINHERIT", Const, 20},
    -		{"R_PPC64_GOT16", Const, 5},
    -		{"R_PPC64_GOT16_DS", Const, 5},
    -		{"R_PPC64_GOT16_HA", Const, 5},
    -		{"R_PPC64_GOT16_HI", Const, 5},
    -		{"R_PPC64_GOT16_LO", Const, 5},
    -		{"R_PPC64_GOT16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_HA", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_HI", Const, 5},
    -		{"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_DTPREL_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TLSGD16", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_HA", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_HI", Const, 5},
    -		{"R_PPC64_GOT_TLSGD16_LO", Const, 5},
    -		{"R_PPC64_GOT_TLSGD_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TLSLD16", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_HA", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_HI", Const, 5},
    -		{"R_PPC64_GOT_TLSLD16_LO", Const, 5},
    -		{"R_PPC64_GOT_TLSLD_PCREL34", Const, 20},
    -		{"R_PPC64_GOT_TPREL16_DS", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_HA", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_HI", Const, 5},
    -		{"R_PPC64_GOT_TPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_GOT_TPREL_PCREL34", Const, 20},
    -		{"R_PPC64_IRELATIVE", Const, 10},
    -		{"R_PPC64_JMP_IREL", Const, 10},
    -		{"R_PPC64_JMP_SLOT", Const, 5},
    -		{"R_PPC64_NONE", Const, 5},
    -		{"R_PPC64_PCREL28", Const, 20},
    -		{"R_PPC64_PCREL34", Const, 20},
    -		{"R_PPC64_PCREL_OPT", Const, 20},
    -		{"R_PPC64_PLT16_HA", Const, 20},
    -		{"R_PPC64_PLT16_HI", Const, 20},
    -		{"R_PPC64_PLT16_LO", Const, 20},
    -		{"R_PPC64_PLT16_LO_DS", Const, 10},
    -		{"R_PPC64_PLT32", Const, 20},
    -		{"R_PPC64_PLT64", Const, 20},
    -		{"R_PPC64_PLTCALL", Const, 20},
    -		{"R_PPC64_PLTCALL_NOTOC", Const, 20},
    -		{"R_PPC64_PLTGOT16", Const, 10},
    -		{"R_PPC64_PLTGOT16_DS", Const, 10},
    -		{"R_PPC64_PLTGOT16_HA", Const, 10},
    -		{"R_PPC64_PLTGOT16_HI", Const, 10},
    -		{"R_PPC64_PLTGOT16_LO", Const, 10},
    -		{"R_PPC64_PLTGOT_LO_DS", Const, 10},
    -		{"R_PPC64_PLTREL32", Const, 20},
    -		{"R_PPC64_PLTREL64", Const, 20},
    -		{"R_PPC64_PLTSEQ", Const, 20},
    -		{"R_PPC64_PLTSEQ_NOTOC", Const, 20},
    -		{"R_PPC64_PLT_PCREL34", Const, 20},
    -		{"R_PPC64_PLT_PCREL34_NOTOC", Const, 20},
    -		{"R_PPC64_REL14", Const, 5},
    -		{"R_PPC64_REL14_BRNTAKEN", Const, 5},
    -		{"R_PPC64_REL14_BRTAKEN", Const, 5},
    -		{"R_PPC64_REL16", Const, 5},
    -		{"R_PPC64_REL16DX_HA", Const, 10},
    -		{"R_PPC64_REL16_HA", Const, 5},
    -		{"R_PPC64_REL16_HI", Const, 5},
    -		{"R_PPC64_REL16_HIGH", Const, 20},
    -		{"R_PPC64_REL16_HIGHA", Const, 20},
    -		{"R_PPC64_REL16_HIGHER", Const, 20},
    -		{"R_PPC64_REL16_HIGHER34", Const, 20},
    -		{"R_PPC64_REL16_HIGHERA", Const, 20},
    -		{"R_PPC64_REL16_HIGHERA34", Const, 20},
    -		{"R_PPC64_REL16_HIGHEST", Const, 20},
    -		{"R_PPC64_REL16_HIGHEST34", Const, 20},
    -		{"R_PPC64_REL16_HIGHESTA", Const, 20},
    -		{"R_PPC64_REL16_HIGHESTA34", Const, 20},
    -		{"R_PPC64_REL16_LO", Const, 5},
    -		{"R_PPC64_REL24", Const, 5},
    -		{"R_PPC64_REL24_NOTOC", Const, 10},
    -		{"R_PPC64_REL24_P9NOTOC", Const, 21},
    -		{"R_PPC64_REL30", Const, 20},
    -		{"R_PPC64_REL32", Const, 5},
    -		{"R_PPC64_REL64", Const, 5},
    -		{"R_PPC64_RELATIVE", Const, 18},
    -		{"R_PPC64_SECTOFF", Const, 20},
    -		{"R_PPC64_SECTOFF_DS", Const, 10},
    -		{"R_PPC64_SECTOFF_HA", Const, 20},
    -		{"R_PPC64_SECTOFF_HI", Const, 20},
    -		{"R_PPC64_SECTOFF_LO", Const, 20},
    -		{"R_PPC64_SECTOFF_LO_DS", Const, 10},
    -		{"R_PPC64_TLS", Const, 5},
    -		{"R_PPC64_TLSGD", Const, 5},
    -		{"R_PPC64_TLSLD", Const, 5},
    -		{"R_PPC64_TOC", Const, 5},
    -		{"R_PPC64_TOC16", Const, 5},
    -		{"R_PPC64_TOC16_DS", Const, 5},
    -		{"R_PPC64_TOC16_HA", Const, 5},
    -		{"R_PPC64_TOC16_HI", Const, 5},
    -		{"R_PPC64_TOC16_LO", Const, 5},
    -		{"R_PPC64_TOC16_LO_DS", Const, 5},
    -		{"R_PPC64_TOCSAVE", Const, 10},
    -		{"R_PPC64_TPREL16", Const, 5},
    -		{"R_PPC64_TPREL16_DS", Const, 5},
    -		{"R_PPC64_TPREL16_HA", Const, 5},
    -		{"R_PPC64_TPREL16_HI", Const, 5},
    -		{"R_PPC64_TPREL16_HIGH", Const, 10},
    -		{"R_PPC64_TPREL16_HIGHA", Const, 10},
    -		{"R_PPC64_TPREL16_HIGHER", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHERA", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHEST", Const, 5},
    -		{"R_PPC64_TPREL16_HIGHESTA", Const, 5},
    -		{"R_PPC64_TPREL16_LO", Const, 5},
    -		{"R_PPC64_TPREL16_LO_DS", Const, 5},
    -		{"R_PPC64_TPREL34", Const, 20},
    -		{"R_PPC64_TPREL64", Const, 5},
    -		{"R_PPC64_UADDR16", Const, 20},
    -		{"R_PPC64_UADDR32", Const, 20},
    -		{"R_PPC64_UADDR64", Const, 20},
    -		{"R_PPC_ADDR14", Const, 0},
    -		{"R_PPC_ADDR14_BRNTAKEN", Const, 0},
    -		{"R_PPC_ADDR14_BRTAKEN", Const, 0},
    -		{"R_PPC_ADDR16", Const, 0},
    -		{"R_PPC_ADDR16_HA", Const, 0},
    -		{"R_PPC_ADDR16_HI", Const, 0},
    -		{"R_PPC_ADDR16_LO", Const, 0},
    -		{"R_PPC_ADDR24", Const, 0},
    -		{"R_PPC_ADDR32", Const, 0},
    -		{"R_PPC_COPY", Const, 0},
    -		{"R_PPC_DTPMOD32", Const, 0},
    -		{"R_PPC_DTPREL16", Const, 0},
    -		{"R_PPC_DTPREL16_HA", Const, 0},
    -		{"R_PPC_DTPREL16_HI", Const, 0},
    -		{"R_PPC_DTPREL16_LO", Const, 0},
    -		{"R_PPC_DTPREL32", Const, 0},
    -		{"R_PPC_EMB_BIT_FLD", Const, 0},
    -		{"R_PPC_EMB_MRKREF", Const, 0},
    -		{"R_PPC_EMB_NADDR16", Const, 0},
    -		{"R_PPC_EMB_NADDR16_HA", Const, 0},
    -		{"R_PPC_EMB_NADDR16_HI", Const, 0},
    -		{"R_PPC_EMB_NADDR16_LO", Const, 0},
    -		{"R_PPC_EMB_NADDR32", Const, 0},
    -		{"R_PPC_EMB_RELSDA", Const, 0},
    -		{"R_PPC_EMB_RELSEC16", Const, 0},
    -		{"R_PPC_EMB_RELST_HA", Const, 0},
    -		{"R_PPC_EMB_RELST_HI", Const, 0},
    -		{"R_PPC_EMB_RELST_LO", Const, 0},
    -		{"R_PPC_EMB_SDA21", Const, 0},
    -		{"R_PPC_EMB_SDA2I16", Const, 0},
    -		{"R_PPC_EMB_SDA2REL", Const, 0},
    -		{"R_PPC_EMB_SDAI16", Const, 0},
    -		{"R_PPC_GLOB_DAT", Const, 0},
    -		{"R_PPC_GOT16", Const, 0},
    -		{"R_PPC_GOT16_HA", Const, 0},
    -		{"R_PPC_GOT16_HI", Const, 0},
    -		{"R_PPC_GOT16_LO", Const, 0},
    -		{"R_PPC_GOT_TLSGD16", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_HA", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_HI", Const, 0},
    -		{"R_PPC_GOT_TLSGD16_LO", Const, 0},
    -		{"R_PPC_GOT_TLSLD16", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_HA", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_HI", Const, 0},
    -		{"R_PPC_GOT_TLSLD16_LO", Const, 0},
    -		{"R_PPC_GOT_TPREL16", Const, 0},
    -		{"R_PPC_GOT_TPREL16_HA", Const, 0},
    -		{"R_PPC_GOT_TPREL16_HI", Const, 0},
    -		{"R_PPC_GOT_TPREL16_LO", Const, 0},
    -		{"R_PPC_JMP_SLOT", Const, 0},
    -		{"R_PPC_LOCAL24PC", Const, 0},
    -		{"R_PPC_NONE", Const, 0},
    -		{"R_PPC_PLT16_HA", Const, 0},
    -		{"R_PPC_PLT16_HI", Const, 0},
    -		{"R_PPC_PLT16_LO", Const, 0},
    -		{"R_PPC_PLT32", Const, 0},
    -		{"R_PPC_PLTREL24", Const, 0},
    -		{"R_PPC_PLTREL32", Const, 0},
    -		{"R_PPC_REL14", Const, 0},
    -		{"R_PPC_REL14_BRNTAKEN", Const, 0},
    -		{"R_PPC_REL14_BRTAKEN", Const, 0},
    -		{"R_PPC_REL24", Const, 0},
    -		{"R_PPC_REL32", Const, 0},
    -		{"R_PPC_RELATIVE", Const, 0},
    -		{"R_PPC_SDAREL16", Const, 0},
    -		{"R_PPC_SECTOFF", Const, 0},
    -		{"R_PPC_SECTOFF_HA", Const, 0},
    -		{"R_PPC_SECTOFF_HI", Const, 0},
    -		{"R_PPC_SECTOFF_LO", Const, 0},
    -		{"R_PPC_TLS", Const, 0},
    -		{"R_PPC_TPREL16", Const, 0},
    -		{"R_PPC_TPREL16_HA", Const, 0},
    -		{"R_PPC_TPREL16_HI", Const, 0},
    -		{"R_PPC_TPREL16_LO", Const, 0},
    -		{"R_PPC_TPREL32", Const, 0},
    -		{"R_PPC_UADDR16", Const, 0},
    -		{"R_PPC_UADDR32", Const, 0},
    -		{"R_RISCV", Type, 11},
    -		{"R_RISCV_32", Const, 11},
    -		{"R_RISCV_32_PCREL", Const, 12},
    -		{"R_RISCV_64", Const, 11},
    -		{"R_RISCV_ADD16", Const, 11},
    -		{"R_RISCV_ADD32", Const, 11},
    -		{"R_RISCV_ADD64", Const, 11},
    -		{"R_RISCV_ADD8", Const, 11},
    -		{"R_RISCV_ALIGN", Const, 11},
    -		{"R_RISCV_BRANCH", Const, 11},
    -		{"R_RISCV_CALL", Const, 11},
    -		{"R_RISCV_CALL_PLT", Const, 11},
    -		{"R_RISCV_COPY", Const, 11},
    -		{"R_RISCV_GNU_VTENTRY", Const, 11},
    -		{"R_RISCV_GNU_VTINHERIT", Const, 11},
    -		{"R_RISCV_GOT_HI20", Const, 11},
    -		{"R_RISCV_GPREL_I", Const, 11},
    -		{"R_RISCV_GPREL_S", Const, 11},
    -		{"R_RISCV_HI20", Const, 11},
    -		{"R_RISCV_JAL", Const, 11},
    -		{"R_RISCV_JUMP_SLOT", Const, 11},
    -		{"R_RISCV_LO12_I", Const, 11},
    -		{"R_RISCV_LO12_S", Const, 11},
    -		{"R_RISCV_NONE", Const, 11},
    -		{"R_RISCV_PCREL_HI20", Const, 11},
    -		{"R_RISCV_PCREL_LO12_I", Const, 11},
    -		{"R_RISCV_PCREL_LO12_S", Const, 11},
    -		{"R_RISCV_RELATIVE", Const, 11},
    -		{"R_RISCV_RELAX", Const, 11},
    -		{"R_RISCV_RVC_BRANCH", Const, 11},
    -		{"R_RISCV_RVC_JUMP", Const, 11},
    -		{"R_RISCV_RVC_LUI", Const, 11},
    -		{"R_RISCV_SET16", Const, 11},
    -		{"R_RISCV_SET32", Const, 11},
    -		{"R_RISCV_SET6", Const, 11},
    -		{"R_RISCV_SET8", Const, 11},
    -		{"R_RISCV_SUB16", Const, 11},
    -		{"R_RISCV_SUB32", Const, 11},
    -		{"R_RISCV_SUB6", Const, 11},
    -		{"R_RISCV_SUB64", Const, 11},
    -		{"R_RISCV_SUB8", Const, 11},
    -		{"R_RISCV_TLS_DTPMOD32", Const, 11},
    -		{"R_RISCV_TLS_DTPMOD64", Const, 11},
    -		{"R_RISCV_TLS_DTPREL32", Const, 11},
    -		{"R_RISCV_TLS_DTPREL64", Const, 11},
    -		{"R_RISCV_TLS_GD_HI20", Const, 11},
    -		{"R_RISCV_TLS_GOT_HI20", Const, 11},
    -		{"R_RISCV_TLS_TPREL32", Const, 11},
    -		{"R_RISCV_TLS_TPREL64", Const, 11},
    -		{"R_RISCV_TPREL_ADD", Const, 11},
    -		{"R_RISCV_TPREL_HI20", Const, 11},
    -		{"R_RISCV_TPREL_I", Const, 11},
    -		{"R_RISCV_TPREL_LO12_I", Const, 11},
    -		{"R_RISCV_TPREL_LO12_S", Const, 11},
    -		{"R_RISCV_TPREL_S", Const, 11},
    -		{"R_SPARC", Type, 0},
    -		{"R_SPARC_10", Const, 0},
    -		{"R_SPARC_11", Const, 0},
    -		{"R_SPARC_13", Const, 0},
    -		{"R_SPARC_16", Const, 0},
    -		{"R_SPARC_22", Const, 0},
    -		{"R_SPARC_32", Const, 0},
    -		{"R_SPARC_5", Const, 0},
    -		{"R_SPARC_6", Const, 0},
    -		{"R_SPARC_64", Const, 0},
    -		{"R_SPARC_7", Const, 0},
    -		{"R_SPARC_8", Const, 0},
    -		{"R_SPARC_COPY", Const, 0},
    -		{"R_SPARC_DISP16", Const, 0},
    -		{"R_SPARC_DISP32", Const, 0},
    -		{"R_SPARC_DISP64", Const, 0},
    -		{"R_SPARC_DISP8", Const, 0},
    -		{"R_SPARC_GLOB_DAT", Const, 0},
    -		{"R_SPARC_GLOB_JMP", Const, 0},
    -		{"R_SPARC_GOT10", Const, 0},
    -		{"R_SPARC_GOT13", Const, 0},
    -		{"R_SPARC_GOT22", Const, 0},
    -		{"R_SPARC_H44", Const, 0},
    -		{"R_SPARC_HH22", Const, 0},
    -		{"R_SPARC_HI22", Const, 0},
    -		{"R_SPARC_HIPLT22", Const, 0},
    -		{"R_SPARC_HIX22", Const, 0},
    -		{"R_SPARC_HM10", Const, 0},
    -		{"R_SPARC_JMP_SLOT", Const, 0},
    -		{"R_SPARC_L44", Const, 0},
    -		{"R_SPARC_LM22", Const, 0},
    -		{"R_SPARC_LO10", Const, 0},
    -		{"R_SPARC_LOPLT10", Const, 0},
    -		{"R_SPARC_LOX10", Const, 0},
    -		{"R_SPARC_M44", Const, 0},
    -		{"R_SPARC_NONE", Const, 0},
    -		{"R_SPARC_OLO10", Const, 0},
    -		{"R_SPARC_PC10", Const, 0},
    -		{"R_SPARC_PC22", Const, 0},
    -		{"R_SPARC_PCPLT10", Const, 0},
    -		{"R_SPARC_PCPLT22", Const, 0},
    -		{"R_SPARC_PCPLT32", Const, 0},
    -		{"R_SPARC_PC_HH22", Const, 0},
    -		{"R_SPARC_PC_HM10", Const, 0},
    -		{"R_SPARC_PC_LM22", Const, 0},
    -		{"R_SPARC_PLT32", Const, 0},
    -		{"R_SPARC_PLT64", Const, 0},
    -		{"R_SPARC_REGISTER", Const, 0},
    -		{"R_SPARC_RELATIVE", Const, 0},
    -		{"R_SPARC_UA16", Const, 0},
    -		{"R_SPARC_UA32", Const, 0},
    -		{"R_SPARC_UA64", Const, 0},
    -		{"R_SPARC_WDISP16", Const, 0},
    -		{"R_SPARC_WDISP19", Const, 0},
    -		{"R_SPARC_WDISP22", Const, 0},
    -		{"R_SPARC_WDISP30", Const, 0},
    -		{"R_SPARC_WPLT30", Const, 0},
    -		{"R_SYM32", Func, 0},
    -		{"R_SYM64", Func, 0},
    -		{"R_TYPE32", Func, 0},
    -		{"R_TYPE64", Func, 0},
    -		{"R_X86_64", Type, 0},
    -		{"R_X86_64_16", Const, 0},
    -		{"R_X86_64_32", Const, 0},
    -		{"R_X86_64_32S", Const, 0},
    -		{"R_X86_64_64", Const, 0},
    -		{"R_X86_64_8", Const, 0},
    -		{"R_X86_64_COPY", Const, 0},
    -		{"R_X86_64_DTPMOD64", Const, 0},
    -		{"R_X86_64_DTPOFF32", Const, 0},
    -		{"R_X86_64_DTPOFF64", Const, 0},
    -		{"R_X86_64_GLOB_DAT", Const, 0},
    -		{"R_X86_64_GOT32", Const, 0},
    -		{"R_X86_64_GOT64", Const, 10},
    -		{"R_X86_64_GOTOFF64", Const, 10},
    -		{"R_X86_64_GOTPC32", Const, 10},
    -		{"R_X86_64_GOTPC32_TLSDESC", Const, 10},
    -		{"R_X86_64_GOTPC64", Const, 10},
    -		{"R_X86_64_GOTPCREL", Const, 0},
    -		{"R_X86_64_GOTPCREL64", Const, 10},
    -		{"R_X86_64_GOTPCRELX", Const, 10},
    -		{"R_X86_64_GOTPLT64", Const, 10},
    -		{"R_X86_64_GOTTPOFF", Const, 0},
    -		{"R_X86_64_IRELATIVE", Const, 10},
    -		{"R_X86_64_JMP_SLOT", Const, 0},
    -		{"R_X86_64_NONE", Const, 0},
    -		{"R_X86_64_PC16", Const, 0},
    -		{"R_X86_64_PC32", Const, 0},
    -		{"R_X86_64_PC32_BND", Const, 10},
    -		{"R_X86_64_PC64", Const, 10},
    -		{"R_X86_64_PC8", Const, 0},
    -		{"R_X86_64_PLT32", Const, 0},
    -		{"R_X86_64_PLT32_BND", Const, 10},
    -		{"R_X86_64_PLTOFF64", Const, 10},
    -		{"R_X86_64_RELATIVE", Const, 0},
    -		{"R_X86_64_RELATIVE64", Const, 10},
    -		{"R_X86_64_REX_GOTPCRELX", Const, 10},
    -		{"R_X86_64_SIZE32", Const, 10},
    -		{"R_X86_64_SIZE64", Const, 10},
    -		{"R_X86_64_TLSDESC", Const, 10},
    -		{"R_X86_64_TLSDESC_CALL", Const, 10},
    -		{"R_X86_64_TLSGD", Const, 0},
    -		{"R_X86_64_TLSLD", Const, 0},
    -		{"R_X86_64_TPOFF32", Const, 0},
    -		{"R_X86_64_TPOFF64", Const, 0},
    -		{"Rel32", Type, 0},
    -		{"Rel32.Info", Field, 0},
    -		{"Rel32.Off", Field, 0},
    -		{"Rel64", Type, 0},
    -		{"Rel64.Info", Field, 0},
    -		{"Rel64.Off", Field, 0},
    -		{"Rela32", Type, 0},
    -		{"Rela32.Addend", Field, 0},
    -		{"Rela32.Info", Field, 0},
    -		{"Rela32.Off", Field, 0},
    -		{"Rela64", Type, 0},
    -		{"Rela64.Addend", Field, 0},
    -		{"Rela64.Info", Field, 0},
    -		{"Rela64.Off", Field, 0},
    -		{"SHF_ALLOC", Const, 0},
    -		{"SHF_COMPRESSED", Const, 6},
    -		{"SHF_EXECINSTR", Const, 0},
    -		{"SHF_GROUP", Const, 0},
    -		{"SHF_INFO_LINK", Const, 0},
    -		{"SHF_LINK_ORDER", Const, 0},
    -		{"SHF_MASKOS", Const, 0},
    -		{"SHF_MASKPROC", Const, 0},
    -		{"SHF_MERGE", Const, 0},
    -		{"SHF_OS_NONCONFORMING", Const, 0},
    -		{"SHF_STRINGS", Const, 0},
    -		{"SHF_TLS", Const, 0},
    -		{"SHF_WRITE", Const, 0},
    -		{"SHN_ABS", Const, 0},
    -		{"SHN_COMMON", Const, 0},
    -		{"SHN_HIOS", Const, 0},
    -		{"SHN_HIPROC", Const, 0},
    -		{"SHN_HIRESERVE", Const, 0},
    -		{"SHN_LOOS", Const, 0},
    -		{"SHN_LOPROC", Const, 0},
    -		{"SHN_LORESERVE", Const, 0},
    -		{"SHN_UNDEF", Const, 0},
    -		{"SHN_XINDEX", Const, 0},
    -		{"SHT_DYNAMIC", Const, 0},
    -		{"SHT_DYNSYM", Const, 0},
    -		{"SHT_FINI_ARRAY", Const, 0},
    -		{"SHT_GNU_ATTRIBUTES", Const, 0},
    -		{"SHT_GNU_HASH", Const, 0},
    -		{"SHT_GNU_LIBLIST", Const, 0},
    -		{"SHT_GNU_VERDEF", Const, 0},
    -		{"SHT_GNU_VERNEED", Const, 0},
    -		{"SHT_GNU_VERSYM", Const, 0},
    -		{"SHT_GROUP", Const, 0},
    -		{"SHT_HASH", Const, 0},
    -		{"SHT_HIOS", Const, 0},
    -		{"SHT_HIPROC", Const, 0},
    -		{"SHT_HIUSER", Const, 0},
    -		{"SHT_INIT_ARRAY", Const, 0},
    -		{"SHT_LOOS", Const, 0},
    -		{"SHT_LOPROC", Const, 0},
    -		{"SHT_LOUSER", Const, 0},
    -		{"SHT_MIPS_ABIFLAGS", Const, 17},
    -		{"SHT_NOBITS", Const, 0},
    -		{"SHT_NOTE", Const, 0},
    -		{"SHT_NULL", Const, 0},
    -		{"SHT_PREINIT_ARRAY", Const, 0},
    -		{"SHT_PROGBITS", Const, 0},
    -		{"SHT_REL", Const, 0},
    -		{"SHT_RELA", Const, 0},
    -		{"SHT_SHLIB", Const, 0},
    -		{"SHT_STRTAB", Const, 0},
    -		{"SHT_SYMTAB", Const, 0},
    -		{"SHT_SYMTAB_SHNDX", Const, 0},
    -		{"STB_GLOBAL", Const, 0},
    -		{"STB_HIOS", Const, 0},
    -		{"STB_HIPROC", Const, 0},
    -		{"STB_LOCAL", Const, 0},
    -		{"STB_LOOS", Const, 0},
    -		{"STB_LOPROC", Const, 0},
    -		{"STB_WEAK", Const, 0},
    -		{"STT_COMMON", Const, 0},
    -		{"STT_FILE", Const, 0},
    -		{"STT_FUNC", Const, 0},
    -		{"STT_GNU_IFUNC", Const, 23},
    -		{"STT_HIOS", Const, 0},
    -		{"STT_HIPROC", Const, 0},
    -		{"STT_LOOS", Const, 0},
    -		{"STT_LOPROC", Const, 0},
    -		{"STT_NOTYPE", Const, 0},
    -		{"STT_OBJECT", Const, 0},
    -		{"STT_RELC", Const, 23},
    -		{"STT_SECTION", Const, 0},
    -		{"STT_SRELC", Const, 23},
    -		{"STT_TLS", Const, 0},
    -		{"STV_DEFAULT", Const, 0},
    -		{"STV_HIDDEN", Const, 0},
    -		{"STV_INTERNAL", Const, 0},
    -		{"STV_PROTECTED", Const, 0},
    -		{"ST_BIND", Func, 0},
    -		{"ST_INFO", Func, 0},
    -		{"ST_TYPE", Func, 0},
    -		{"ST_VISIBILITY", Func, 0},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.SectionHeader", Field, 0},
    -		{"Section32", Type, 0},
    -		{"Section32.Addr", Field, 0},
    -		{"Section32.Addralign", Field, 0},
    -		{"Section32.Entsize", Field, 0},
    -		{"Section32.Flags", Field, 0},
    -		{"Section32.Info", Field, 0},
    -		{"Section32.Link", Field, 0},
    -		{"Section32.Name", Field, 0},
    -		{"Section32.Off", Field, 0},
    -		{"Section32.Size", Field, 0},
    -		{"Section32.Type", Field, 0},
    -		{"Section64", Type, 0},
    -		{"Section64.Addr", Field, 0},
    -		{"Section64.Addralign", Field, 0},
    -		{"Section64.Entsize", Field, 0},
    -		{"Section64.Flags", Field, 0},
    -		{"Section64.Info", Field, 0},
    -		{"Section64.Link", Field, 0},
    -		{"Section64.Name", Field, 0},
    -		{"Section64.Off", Field, 0},
    -		{"Section64.Size", Field, 0},
    -		{"Section64.Type", Field, 0},
    -		{"SectionFlag", Type, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Addr", Field, 0},
    -		{"SectionHeader.Addralign", Field, 0},
    -		{"SectionHeader.Entsize", Field, 0},
    -		{"SectionHeader.FileSize", Field, 6},
    -		{"SectionHeader.Flags", Field, 0},
    -		{"SectionHeader.Info", Field, 0},
    -		{"SectionHeader.Link", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"SectionHeader.Type", Field, 0},
    -		{"SectionIndex", Type, 0},
    -		{"SectionType", Type, 0},
    -		{"Sym32", Type, 0},
    -		{"Sym32.Info", Field, 0},
    -		{"Sym32.Name", Field, 0},
    -		{"Sym32.Other", Field, 0},
    -		{"Sym32.Shndx", Field, 0},
    -		{"Sym32.Size", Field, 0},
    -		{"Sym32.Value", Field, 0},
    -		{"Sym32Size", Const, 0},
    -		{"Sym64", Type, 0},
    -		{"Sym64.Info", Field, 0},
    -		{"Sym64.Name", Field, 0},
    -		{"Sym64.Other", Field, 0},
    -		{"Sym64.Shndx", Field, 0},
    -		{"Sym64.Size", Field, 0},
    -		{"Sym64.Value", Field, 0},
    -		{"Sym64Size", Const, 0},
    -		{"SymBind", Type, 0},
    -		{"SymType", Type, 0},
    -		{"SymVis", Type, 0},
    -		{"Symbol", Type, 0},
    -		{"Symbol.HasVersion", Field, 24},
    -		{"Symbol.Info", Field, 0},
    -		{"Symbol.Library", Field, 13},
    -		{"Symbol.Name", Field, 0},
    -		{"Symbol.Other", Field, 0},
    -		{"Symbol.Section", Field, 0},
    -		{"Symbol.Size", Field, 0},
    -		{"Symbol.Value", Field, 0},
    -		{"Symbol.Version", Field, 13},
    -		{"Symbol.VersionIndex", Field, 24},
    -		{"Type", Type, 0},
    -		{"VER_FLG_BASE", Const, 24},
    -		{"VER_FLG_INFO", Const, 24},
    -		{"VER_FLG_WEAK", Const, 24},
    -		{"Version", Type, 0},
    -		{"VersionIndex", Type, 24},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).DynString", Method, 1, ""},
    +		{"(*File).DynValue", Method, 21, ""},
    +		{"(*File).DynamicSymbols", Method, 4, ""},
    +		{"(*File).DynamicVersionNeeds", Method, 24, ""},
    +		{"(*File).DynamicVersions", Method, 24, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*File).SectionByType", Method, 0, ""},
    +		{"(*File).Symbols", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Prog).Open", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(Class).GoString", Method, 0, ""},
    +		{"(Class).String", Method, 0, ""},
    +		{"(CompressionType).GoString", Method, 6, ""},
    +		{"(CompressionType).String", Method, 6, ""},
    +		{"(Data).GoString", Method, 0, ""},
    +		{"(Data).String", Method, 0, ""},
    +		{"(DynFlag).GoString", Method, 0, ""},
    +		{"(DynFlag).String", Method, 0, ""},
    +		{"(DynFlag1).GoString", Method, 21, ""},
    +		{"(DynFlag1).String", Method, 21, ""},
    +		{"(DynTag).GoString", Method, 0, ""},
    +		{"(DynTag).String", Method, 0, ""},
    +		{"(Machine).GoString", Method, 0, ""},
    +		{"(Machine).String", Method, 0, ""},
    +		{"(NType).GoString", Method, 0, ""},
    +		{"(NType).String", Method, 0, ""},
    +		{"(OSABI).GoString", Method, 0, ""},
    +		{"(OSABI).String", Method, 0, ""},
    +		{"(Prog).ReadAt", Method, 0, ""},
    +		{"(ProgFlag).GoString", Method, 0, ""},
    +		{"(ProgFlag).String", Method, 0, ""},
    +		{"(ProgType).GoString", Method, 0, ""},
    +		{"(ProgType).String", Method, 0, ""},
    +		{"(R_386).GoString", Method, 0, ""},
    +		{"(R_386).String", Method, 0, ""},
    +		{"(R_390).GoString", Method, 7, ""},
    +		{"(R_390).String", Method, 7, ""},
    +		{"(R_AARCH64).GoString", Method, 4, ""},
    +		{"(R_AARCH64).String", Method, 4, ""},
    +		{"(R_ALPHA).GoString", Method, 0, ""},
    +		{"(R_ALPHA).String", Method, 0, ""},
    +		{"(R_ARM).GoString", Method, 0, ""},
    +		{"(R_ARM).String", Method, 0, ""},
    +		{"(R_LARCH).GoString", Method, 19, ""},
    +		{"(R_LARCH).String", Method, 19, ""},
    +		{"(R_MIPS).GoString", Method, 6, ""},
    +		{"(R_MIPS).String", Method, 6, ""},
    +		{"(R_PPC).GoString", Method, 0, ""},
    +		{"(R_PPC).String", Method, 0, ""},
    +		{"(R_PPC64).GoString", Method, 5, ""},
    +		{"(R_PPC64).String", Method, 5, ""},
    +		{"(R_RISCV).GoString", Method, 11, ""},
    +		{"(R_RISCV).String", Method, 11, ""},
    +		{"(R_SPARC).GoString", Method, 0, ""},
    +		{"(R_SPARC).String", Method, 0, ""},
    +		{"(R_X86_64).GoString", Method, 0, ""},
    +		{"(R_X86_64).String", Method, 0, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(SectionFlag).GoString", Method, 0, ""},
    +		{"(SectionFlag).String", Method, 0, ""},
    +		{"(SectionIndex).GoString", Method, 0, ""},
    +		{"(SectionIndex).String", Method, 0, ""},
    +		{"(SectionType).GoString", Method, 0, ""},
    +		{"(SectionType).String", Method, 0, ""},
    +		{"(SymBind).GoString", Method, 0, ""},
    +		{"(SymBind).String", Method, 0, ""},
    +		{"(SymType).GoString", Method, 0, ""},
    +		{"(SymType).String", Method, 0, ""},
    +		{"(SymVis).GoString", Method, 0, ""},
    +		{"(SymVis).String", Method, 0, ""},
    +		{"(Type).GoString", Method, 0, ""},
    +		{"(Type).String", Method, 0, ""},
    +		{"(Version).GoString", Method, 0, ""},
    +		{"(Version).String", Method, 0, ""},
    +		{"(VersionIndex).Index", Method, 24, ""},
    +		{"(VersionIndex).IsHidden", Method, 24, ""},
    +		{"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""},
    +		{"COMPRESS_HIOS", Const, 6, ""},
    +		{"COMPRESS_HIPROC", Const, 6, ""},
    +		{"COMPRESS_LOOS", Const, 6, ""},
    +		{"COMPRESS_LOPROC", Const, 6, ""},
    +		{"COMPRESS_ZLIB", Const, 6, ""},
    +		{"COMPRESS_ZSTD", Const, 21, ""},
    +		{"Chdr32", Type, 6, ""},
    +		{"Chdr32.Addralign", Field, 6, ""},
    +		{"Chdr32.Size", Field, 6, ""},
    +		{"Chdr32.Type", Field, 6, ""},
    +		{"Chdr64", Type, 6, ""},
    +		{"Chdr64.Addralign", Field, 6, ""},
    +		{"Chdr64.Size", Field, 6, ""},
    +		{"Chdr64.Type", Field, 6, ""},
    +		{"Class", Type, 0, ""},
    +		{"CompressionType", Type, 6, ""},
    +		{"DF_1_CONFALT", Const, 21, ""},
    +		{"DF_1_DIRECT", Const, 21, ""},
    +		{"DF_1_DISPRELDNE", Const, 21, ""},
    +		{"DF_1_DISPRELPND", Const, 21, ""},
    +		{"DF_1_EDITED", Const, 21, ""},
    +		{"DF_1_ENDFILTEE", Const, 21, ""},
    +		{"DF_1_GLOBAL", Const, 21, ""},
    +		{"DF_1_GLOBAUDIT", Const, 21, ""},
    +		{"DF_1_GROUP", Const, 21, ""},
    +		{"DF_1_IGNMULDEF", Const, 21, ""},
    +		{"DF_1_INITFIRST", Const, 21, ""},
    +		{"DF_1_INTERPOSE", Const, 21, ""},
    +		{"DF_1_KMOD", Const, 21, ""},
    +		{"DF_1_LOADFLTR", Const, 21, ""},
    +		{"DF_1_NOCOMMON", Const, 21, ""},
    +		{"DF_1_NODEFLIB", Const, 21, ""},
    +		{"DF_1_NODELETE", Const, 21, ""},
    +		{"DF_1_NODIRECT", Const, 21, ""},
    +		{"DF_1_NODUMP", Const, 21, ""},
    +		{"DF_1_NOHDR", Const, 21, ""},
    +		{"DF_1_NOKSYMS", Const, 21, ""},
    +		{"DF_1_NOOPEN", Const, 21, ""},
    +		{"DF_1_NORELOC", Const, 21, ""},
    +		{"DF_1_NOW", Const, 21, ""},
    +		{"DF_1_ORIGIN", Const, 21, ""},
    +		{"DF_1_PIE", Const, 21, ""},
    +		{"DF_1_SINGLETON", Const, 21, ""},
    +		{"DF_1_STUB", Const, 21, ""},
    +		{"DF_1_SYMINTPOSE", Const, 21, ""},
    +		{"DF_1_TRANS", Const, 21, ""},
    +		{"DF_1_WEAKFILTER", Const, 21, ""},
    +		{"DF_BIND_NOW", Const, 0, ""},
    +		{"DF_ORIGIN", Const, 0, ""},
    +		{"DF_STATIC_TLS", Const, 0, ""},
    +		{"DF_SYMBOLIC", Const, 0, ""},
    +		{"DF_TEXTREL", Const, 0, ""},
    +		{"DT_ADDRRNGHI", Const, 16, ""},
    +		{"DT_ADDRRNGLO", Const, 16, ""},
    +		{"DT_AUDIT", Const, 16, ""},
    +		{"DT_AUXILIARY", Const, 16, ""},
    +		{"DT_BIND_NOW", Const, 0, ""},
    +		{"DT_CHECKSUM", Const, 16, ""},
    +		{"DT_CONFIG", Const, 16, ""},
    +		{"DT_DEBUG", Const, 0, ""},
    +		{"DT_DEPAUDIT", Const, 16, ""},
    +		{"DT_ENCODING", Const, 0, ""},
    +		{"DT_FEATURE", Const, 16, ""},
    +		{"DT_FILTER", Const, 16, ""},
    +		{"DT_FINI", Const, 0, ""},
    +		{"DT_FINI_ARRAY", Const, 0, ""},
    +		{"DT_FINI_ARRAYSZ", Const, 0, ""},
    +		{"DT_FLAGS", Const, 0, ""},
    +		{"DT_FLAGS_1", Const, 16, ""},
    +		{"DT_GNU_CONFLICT", Const, 16, ""},
    +		{"DT_GNU_CONFLICTSZ", Const, 16, ""},
    +		{"DT_GNU_HASH", Const, 16, ""},
    +		{"DT_GNU_LIBLIST", Const, 16, ""},
    +		{"DT_GNU_LIBLISTSZ", Const, 16, ""},
    +		{"DT_GNU_PRELINKED", Const, 16, ""},
    +		{"DT_HASH", Const, 0, ""},
    +		{"DT_HIOS", Const, 0, ""},
    +		{"DT_HIPROC", Const, 0, ""},
    +		{"DT_INIT", Const, 0, ""},
    +		{"DT_INIT_ARRAY", Const, 0, ""},
    +		{"DT_INIT_ARRAYSZ", Const, 0, ""},
    +		{"DT_JMPREL", Const, 0, ""},
    +		{"DT_LOOS", Const, 0, ""},
    +		{"DT_LOPROC", Const, 0, ""},
    +		{"DT_MIPS_AUX_DYNAMIC", Const, 16, ""},
    +		{"DT_MIPS_BASE_ADDRESS", Const, 16, ""},
    +		{"DT_MIPS_COMPACT_SIZE", Const, 16, ""},
    +		{"DT_MIPS_CONFLICT", Const, 16, ""},
    +		{"DT_MIPS_CONFLICTNO", Const, 16, ""},
    +		{"DT_MIPS_CXX_FLAGS", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASS", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_INSTANCE", Const, 16, ""},
    +		{"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_RELOC", Const, 16, ""},
    +		{"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""},
    +		{"DT_MIPS_DELTA_SYM", Const, 16, ""},
    +		{"DT_MIPS_DELTA_SYM_NO", Const, 16, ""},
    +		{"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""},
    +		{"DT_MIPS_FLAGS", Const, 16, ""},
    +		{"DT_MIPS_GOTSYM", Const, 16, ""},
    +		{"DT_MIPS_GP_VALUE", Const, 16, ""},
    +		{"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_HIPAGENO", Const, 16, ""},
    +		{"DT_MIPS_ICHECKSUM", Const, 16, ""},
    +		{"DT_MIPS_INTERFACE", Const, 16, ""},
    +		{"DT_MIPS_INTERFACE_SIZE", Const, 16, ""},
    +		{"DT_MIPS_IVERSION", Const, 16, ""},
    +		{"DT_MIPS_LIBLIST", Const, 16, ""},
    +		{"DT_MIPS_LIBLISTNO", Const, 16, ""},
    +		{"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_LOCAL_GOTNO", Const, 16, ""},
    +		{"DT_MIPS_MSYM", Const, 16, ""},
    +		{"DT_MIPS_OPTIONS", Const, 16, ""},
    +		{"DT_MIPS_PERF_SUFFIX", Const, 16, ""},
    +		{"DT_MIPS_PIXIE_INIT", Const, 16, ""},
    +		{"DT_MIPS_PLTGOT", Const, 16, ""},
    +		{"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""},
    +		{"DT_MIPS_RLD_MAP", Const, 16, ""},
    +		{"DT_MIPS_RLD_MAP_REL", Const, 16, ""},
    +		{"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""},
    +		{"DT_MIPS_RLD_VERSION", Const, 16, ""},
    +		{"DT_MIPS_RWPLT", Const, 16, ""},
    +		{"DT_MIPS_SYMBOL_LIB", Const, 16, ""},
    +		{"DT_MIPS_SYMTABNO", Const, 16, ""},
    +		{"DT_MIPS_TIME_STAMP", Const, 16, ""},
    +		{"DT_MIPS_UNREFEXTNO", Const, 16, ""},
    +		{"DT_MOVEENT", Const, 16, ""},
    +		{"DT_MOVESZ", Const, 16, ""},
    +		{"DT_MOVETAB", Const, 16, ""},
    +		{"DT_NEEDED", Const, 0, ""},
    +		{"DT_NULL", Const, 0, ""},
    +		{"DT_PLTGOT", Const, 0, ""},
    +		{"DT_PLTPAD", Const, 16, ""},
    +		{"DT_PLTPADSZ", Const, 16, ""},
    +		{"DT_PLTREL", Const, 0, ""},
    +		{"DT_PLTRELSZ", Const, 0, ""},
    +		{"DT_POSFLAG_1", Const, 16, ""},
    +		{"DT_PPC64_GLINK", Const, 16, ""},
    +		{"DT_PPC64_OPD", Const, 16, ""},
    +		{"DT_PPC64_OPDSZ", Const, 16, ""},
    +		{"DT_PPC64_OPT", Const, 16, ""},
    +		{"DT_PPC_GOT", Const, 16, ""},
    +		{"DT_PPC_OPT", Const, 16, ""},
    +		{"DT_PREINIT_ARRAY", Const, 0, ""},
    +		{"DT_PREINIT_ARRAYSZ", Const, 0, ""},
    +		{"DT_REL", Const, 0, ""},
    +		{"DT_RELA", Const, 0, ""},
    +		{"DT_RELACOUNT", Const, 16, ""},
    +		{"DT_RELAENT", Const, 0, ""},
    +		{"DT_RELASZ", Const, 0, ""},
    +		{"DT_RELCOUNT", Const, 16, ""},
    +		{"DT_RELENT", Const, 0, ""},
    +		{"DT_RELSZ", Const, 0, ""},
    +		{"DT_RPATH", Const, 0, ""},
    +		{"DT_RUNPATH", Const, 0, ""},
    +		{"DT_SONAME", Const, 0, ""},
    +		{"DT_SPARC_REGISTER", Const, 16, ""},
    +		{"DT_STRSZ", Const, 0, ""},
    +		{"DT_STRTAB", Const, 0, ""},
    +		{"DT_SYMBOLIC", Const, 0, ""},
    +		{"DT_SYMENT", Const, 0, ""},
    +		{"DT_SYMINENT", Const, 16, ""},
    +		{"DT_SYMINFO", Const, 16, ""},
    +		{"DT_SYMINSZ", Const, 16, ""},
    +		{"DT_SYMTAB", Const, 0, ""},
    +		{"DT_SYMTAB_SHNDX", Const, 16, ""},
    +		{"DT_TEXTREL", Const, 0, ""},
    +		{"DT_TLSDESC_GOT", Const, 16, ""},
    +		{"DT_TLSDESC_PLT", Const, 16, ""},
    +		{"DT_USED", Const, 16, ""},
    +		{"DT_VALRNGHI", Const, 16, ""},
    +		{"DT_VALRNGLO", Const, 16, ""},
    +		{"DT_VERDEF", Const, 16, ""},
    +		{"DT_VERDEFNUM", Const, 16, ""},
    +		{"DT_VERNEED", Const, 0, ""},
    +		{"DT_VERNEEDNUM", Const, 0, ""},
    +		{"DT_VERSYM", Const, 0, ""},
    +		{"Data", Type, 0, ""},
    +		{"Dyn32", Type, 0, ""},
    +		{"Dyn32.Tag", Field, 0, ""},
    +		{"Dyn32.Val", Field, 0, ""},
    +		{"Dyn64", Type, 0, ""},
    +		{"Dyn64.Tag", Field, 0, ""},
    +		{"Dyn64.Val", Field, 0, ""},
    +		{"DynFlag", Type, 0, ""},
    +		{"DynFlag1", Type, 21, ""},
    +		{"DynTag", Type, 0, ""},
    +		{"DynamicVersion", Type, 24, ""},
    +		{"DynamicVersion.Deps", Field, 24, ""},
    +		{"DynamicVersion.Flags", Field, 24, ""},
    +		{"DynamicVersion.Index", Field, 24, ""},
    +		{"DynamicVersion.Name", Field, 24, ""},
    +		{"DynamicVersionDep", Type, 24, ""},
    +		{"DynamicVersionDep.Dep", Field, 24, ""},
    +		{"DynamicVersionDep.Flags", Field, 24, ""},
    +		{"DynamicVersionDep.Index", Field, 24, ""},
    +		{"DynamicVersionFlag", Type, 24, ""},
    +		{"DynamicVersionNeed", Type, 24, ""},
    +		{"DynamicVersionNeed.Name", Field, 24, ""},
    +		{"DynamicVersionNeed.Needs", Field, 24, ""},
    +		{"EI_ABIVERSION", Const, 0, ""},
    +		{"EI_CLASS", Const, 0, ""},
    +		{"EI_DATA", Const, 0, ""},
    +		{"EI_NIDENT", Const, 0, ""},
    +		{"EI_OSABI", Const, 0, ""},
    +		{"EI_PAD", Const, 0, ""},
    +		{"EI_VERSION", Const, 0, ""},
    +		{"ELFCLASS32", Const, 0, ""},
    +		{"ELFCLASS64", Const, 0, ""},
    +		{"ELFCLASSNONE", Const, 0, ""},
    +		{"ELFDATA2LSB", Const, 0, ""},
    +		{"ELFDATA2MSB", Const, 0, ""},
    +		{"ELFDATANONE", Const, 0, ""},
    +		{"ELFMAG", Const, 0, ""},
    +		{"ELFOSABI_86OPEN", Const, 0, ""},
    +		{"ELFOSABI_AIX", Const, 0, ""},
    +		{"ELFOSABI_ARM", Const, 0, ""},
    +		{"ELFOSABI_AROS", Const, 11, ""},
    +		{"ELFOSABI_CLOUDABI", Const, 11, ""},
    +		{"ELFOSABI_FENIXOS", Const, 11, ""},
    +		{"ELFOSABI_FREEBSD", Const, 0, ""},
    +		{"ELFOSABI_HPUX", Const, 0, ""},
    +		{"ELFOSABI_HURD", Const, 0, ""},
    +		{"ELFOSABI_IRIX", Const, 0, ""},
    +		{"ELFOSABI_LINUX", Const, 0, ""},
    +		{"ELFOSABI_MODESTO", Const, 0, ""},
    +		{"ELFOSABI_NETBSD", Const, 0, ""},
    +		{"ELFOSABI_NONE", Const, 0, ""},
    +		{"ELFOSABI_NSK", Const, 0, ""},
    +		{"ELFOSABI_OPENBSD", Const, 0, ""},
    +		{"ELFOSABI_OPENVMS", Const, 0, ""},
    +		{"ELFOSABI_SOLARIS", Const, 0, ""},
    +		{"ELFOSABI_STANDALONE", Const, 0, ""},
    +		{"ELFOSABI_TRU64", Const, 0, ""},
    +		{"EM_386", Const, 0, ""},
    +		{"EM_486", Const, 0, ""},
    +		{"EM_56800EX", Const, 11, ""},
    +		{"EM_68HC05", Const, 11, ""},
    +		{"EM_68HC08", Const, 11, ""},
    +		{"EM_68HC11", Const, 11, ""},
    +		{"EM_68HC12", Const, 0, ""},
    +		{"EM_68HC16", Const, 11, ""},
    +		{"EM_68K", Const, 0, ""},
    +		{"EM_78KOR", Const, 11, ""},
    +		{"EM_8051", Const, 11, ""},
    +		{"EM_860", Const, 0, ""},
    +		{"EM_88K", Const, 0, ""},
    +		{"EM_960", Const, 0, ""},
    +		{"EM_AARCH64", Const, 4, ""},
    +		{"EM_ALPHA", Const, 0, ""},
    +		{"EM_ALPHA_STD", Const, 0, ""},
    +		{"EM_ALTERA_NIOS2", Const, 11, ""},
    +		{"EM_AMDGPU", Const, 11, ""},
    +		{"EM_ARC", Const, 0, ""},
    +		{"EM_ARCA", Const, 11, ""},
    +		{"EM_ARC_COMPACT", Const, 11, ""},
    +		{"EM_ARC_COMPACT2", Const, 11, ""},
    +		{"EM_ARM", Const, 0, ""},
    +		{"EM_AVR", Const, 11, ""},
    +		{"EM_AVR32", Const, 11, ""},
    +		{"EM_BA1", Const, 11, ""},
    +		{"EM_BA2", Const, 11, ""},
    +		{"EM_BLACKFIN", Const, 11, ""},
    +		{"EM_BPF", Const, 11, ""},
    +		{"EM_C166", Const, 11, ""},
    +		{"EM_CDP", Const, 11, ""},
    +		{"EM_CE", Const, 11, ""},
    +		{"EM_CLOUDSHIELD", Const, 11, ""},
    +		{"EM_COGE", Const, 11, ""},
    +		{"EM_COLDFIRE", Const, 0, ""},
    +		{"EM_COOL", Const, 11, ""},
    +		{"EM_COREA_1ST", Const, 11, ""},
    +		{"EM_COREA_2ND", Const, 11, ""},
    +		{"EM_CR", Const, 11, ""},
    +		{"EM_CR16", Const, 11, ""},
    +		{"EM_CRAYNV2", Const, 11, ""},
    +		{"EM_CRIS", Const, 11, ""},
    +		{"EM_CRX", Const, 11, ""},
    +		{"EM_CSR_KALIMBA", Const, 11, ""},
    +		{"EM_CUDA", Const, 11, ""},
    +		{"EM_CYPRESS_M8C", Const, 11, ""},
    +		{"EM_D10V", Const, 11, ""},
    +		{"EM_D30V", Const, 11, ""},
    +		{"EM_DSP24", Const, 11, ""},
    +		{"EM_DSPIC30F", Const, 11, ""},
    +		{"EM_DXP", Const, 11, ""},
    +		{"EM_ECOG1", Const, 11, ""},
    +		{"EM_ECOG16", Const, 11, ""},
    +		{"EM_ECOG1X", Const, 11, ""},
    +		{"EM_ECOG2", Const, 11, ""},
    +		{"EM_ETPU", Const, 11, ""},
    +		{"EM_EXCESS", Const, 11, ""},
    +		{"EM_F2MC16", Const, 11, ""},
    +		{"EM_FIREPATH", Const, 11, ""},
    +		{"EM_FR20", Const, 0, ""},
    +		{"EM_FR30", Const, 11, ""},
    +		{"EM_FT32", Const, 11, ""},
    +		{"EM_FX66", Const, 11, ""},
    +		{"EM_H8S", Const, 0, ""},
    +		{"EM_H8_300", Const, 0, ""},
    +		{"EM_H8_300H", Const, 0, ""},
    +		{"EM_H8_500", Const, 0, ""},
    +		{"EM_HUANY", Const, 11, ""},
    +		{"EM_IA_64", Const, 0, ""},
    +		{"EM_INTEL205", Const, 11, ""},
    +		{"EM_INTEL206", Const, 11, ""},
    +		{"EM_INTEL207", Const, 11, ""},
    +		{"EM_INTEL208", Const, 11, ""},
    +		{"EM_INTEL209", Const, 11, ""},
    +		{"EM_IP2K", Const, 11, ""},
    +		{"EM_JAVELIN", Const, 11, ""},
    +		{"EM_K10M", Const, 11, ""},
    +		{"EM_KM32", Const, 11, ""},
    +		{"EM_KMX16", Const, 11, ""},
    +		{"EM_KMX32", Const, 11, ""},
    +		{"EM_KMX8", Const, 11, ""},
    +		{"EM_KVARC", Const, 11, ""},
    +		{"EM_L10M", Const, 11, ""},
    +		{"EM_LANAI", Const, 11, ""},
    +		{"EM_LATTICEMICO32", Const, 11, ""},
    +		{"EM_LOONGARCH", Const, 19, ""},
    +		{"EM_M16C", Const, 11, ""},
    +		{"EM_M32", Const, 0, ""},
    +		{"EM_M32C", Const, 11, ""},
    +		{"EM_M32R", Const, 11, ""},
    +		{"EM_MANIK", Const, 11, ""},
    +		{"EM_MAX", Const, 11, ""},
    +		{"EM_MAXQ30", Const, 11, ""},
    +		{"EM_MCHP_PIC", Const, 11, ""},
    +		{"EM_MCST_ELBRUS", Const, 11, ""},
    +		{"EM_ME16", Const, 0, ""},
    +		{"EM_METAG", Const, 11, ""},
    +		{"EM_MICROBLAZE", Const, 11, ""},
    +		{"EM_MIPS", Const, 0, ""},
    +		{"EM_MIPS_RS3_LE", Const, 0, ""},
    +		{"EM_MIPS_RS4_BE", Const, 0, ""},
    +		{"EM_MIPS_X", Const, 0, ""},
    +		{"EM_MMA", Const, 0, ""},
    +		{"EM_MMDSP_PLUS", Const, 11, ""},
    +		{"EM_MMIX", Const, 11, ""},
    +		{"EM_MN10200", Const, 11, ""},
    +		{"EM_MN10300", Const, 11, ""},
    +		{"EM_MOXIE", Const, 11, ""},
    +		{"EM_MSP430", Const, 11, ""},
    +		{"EM_NCPU", Const, 0, ""},
    +		{"EM_NDR1", Const, 0, ""},
    +		{"EM_NDS32", Const, 11, ""},
    +		{"EM_NONE", Const, 0, ""},
    +		{"EM_NORC", Const, 11, ""},
    +		{"EM_NS32K", Const, 11, ""},
    +		{"EM_OPEN8", Const, 11, ""},
    +		{"EM_OPENRISC", Const, 11, ""},
    +		{"EM_PARISC", Const, 0, ""},
    +		{"EM_PCP", Const, 0, ""},
    +		{"EM_PDP10", Const, 11, ""},
    +		{"EM_PDP11", Const, 11, ""},
    +		{"EM_PDSP", Const, 11, ""},
    +		{"EM_PJ", Const, 11, ""},
    +		{"EM_PPC", Const, 0, ""},
    +		{"EM_PPC64", Const, 0, ""},
    +		{"EM_PRISM", Const, 11, ""},
    +		{"EM_QDSP6", Const, 11, ""},
    +		{"EM_R32C", Const, 11, ""},
    +		{"EM_RCE", Const, 0, ""},
    +		{"EM_RH32", Const, 0, ""},
    +		{"EM_RISCV", Const, 11, ""},
    +		{"EM_RL78", Const, 11, ""},
    +		{"EM_RS08", Const, 11, ""},
    +		{"EM_RX", Const, 11, ""},
    +		{"EM_S370", Const, 0, ""},
    +		{"EM_S390", Const, 0, ""},
    +		{"EM_SCORE7", Const, 11, ""},
    +		{"EM_SEP", Const, 11, ""},
    +		{"EM_SE_C17", Const, 11, ""},
    +		{"EM_SE_C33", Const, 11, ""},
    +		{"EM_SH", Const, 0, ""},
    +		{"EM_SHARC", Const, 11, ""},
    +		{"EM_SLE9X", Const, 11, ""},
    +		{"EM_SNP1K", Const, 11, ""},
    +		{"EM_SPARC", Const, 0, ""},
    +		{"EM_SPARC32PLUS", Const, 0, ""},
    +		{"EM_SPARCV9", Const, 0, ""},
    +		{"EM_ST100", Const, 0, ""},
    +		{"EM_ST19", Const, 11, ""},
    +		{"EM_ST200", Const, 11, ""},
    +		{"EM_ST7", Const, 11, ""},
    +		{"EM_ST9PLUS", Const, 11, ""},
    +		{"EM_STARCORE", Const, 0, ""},
    +		{"EM_STM8", Const, 11, ""},
    +		{"EM_STXP7X", Const, 11, ""},
    +		{"EM_SVX", Const, 11, ""},
    +		{"EM_TILE64", Const, 11, ""},
    +		{"EM_TILEGX", Const, 11, ""},
    +		{"EM_TILEPRO", Const, 11, ""},
    +		{"EM_TINYJ", Const, 0, ""},
    +		{"EM_TI_ARP32", Const, 11, ""},
    +		{"EM_TI_C2000", Const, 11, ""},
    +		{"EM_TI_C5500", Const, 11, ""},
    +		{"EM_TI_C6000", Const, 11, ""},
    +		{"EM_TI_PRU", Const, 11, ""},
    +		{"EM_TMM_GPP", Const, 11, ""},
    +		{"EM_TPC", Const, 11, ""},
    +		{"EM_TRICORE", Const, 0, ""},
    +		{"EM_TRIMEDIA", Const, 11, ""},
    +		{"EM_TSK3000", Const, 11, ""},
    +		{"EM_UNICORE", Const, 11, ""},
    +		{"EM_V800", Const, 0, ""},
    +		{"EM_V850", Const, 11, ""},
    +		{"EM_VAX", Const, 11, ""},
    +		{"EM_VIDEOCORE", Const, 11, ""},
    +		{"EM_VIDEOCORE3", Const, 11, ""},
    +		{"EM_VIDEOCORE5", Const, 11, ""},
    +		{"EM_VISIUM", Const, 11, ""},
    +		{"EM_VPP500", Const, 0, ""},
    +		{"EM_X86_64", Const, 0, ""},
    +		{"EM_XCORE", Const, 11, ""},
    +		{"EM_XGATE", Const, 11, ""},
    +		{"EM_XIMO16", Const, 11, ""},
    +		{"EM_XTENSA", Const, 11, ""},
    +		{"EM_Z80", Const, 11, ""},
    +		{"EM_ZSP", Const, 11, ""},
    +		{"ET_CORE", Const, 0, ""},
    +		{"ET_DYN", Const, 0, ""},
    +		{"ET_EXEC", Const, 0, ""},
    +		{"ET_HIOS", Const, 0, ""},
    +		{"ET_HIPROC", Const, 0, ""},
    +		{"ET_LOOS", Const, 0, ""},
    +		{"ET_LOPROC", Const, 0, ""},
    +		{"ET_NONE", Const, 0, ""},
    +		{"ET_REL", Const, 0, ""},
    +		{"EV_CURRENT", Const, 0, ""},
    +		{"EV_NONE", Const, 0, ""},
    +		{"ErrNoSymbols", Var, 4, ""},
    +		{"File", Type, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.Progs", Field, 0, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.ABIVersion", Field, 0, ""},
    +		{"FileHeader.ByteOrder", Field, 0, ""},
    +		{"FileHeader.Class", Field, 0, ""},
    +		{"FileHeader.Data", Field, 0, ""},
    +		{"FileHeader.Entry", Field, 1, ""},
    +		{"FileHeader.Machine", Field, 0, ""},
    +		{"FileHeader.OSABI", Field, 0, ""},
    +		{"FileHeader.Type", Field, 0, ""},
    +		{"FileHeader.Version", Field, 0, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"Header32", Type, 0, ""},
    +		{"Header32.Ehsize", Field, 0, ""},
    +		{"Header32.Entry", Field, 0, ""},
    +		{"Header32.Flags", Field, 0, ""},
    +		{"Header32.Ident", Field, 0, ""},
    +		{"Header32.Machine", Field, 0, ""},
    +		{"Header32.Phentsize", Field, 0, ""},
    +		{"Header32.Phnum", Field, 0, ""},
    +		{"Header32.Phoff", Field, 0, ""},
    +		{"Header32.Shentsize", Field, 0, ""},
    +		{"Header32.Shnum", Field, 0, ""},
    +		{"Header32.Shoff", Field, 0, ""},
    +		{"Header32.Shstrndx", Field, 0, ""},
    +		{"Header32.Type", Field, 0, ""},
    +		{"Header32.Version", Field, 0, ""},
    +		{"Header64", Type, 0, ""},
    +		{"Header64.Ehsize", Field, 0, ""},
    +		{"Header64.Entry", Field, 0, ""},
    +		{"Header64.Flags", Field, 0, ""},
    +		{"Header64.Ident", Field, 0, ""},
    +		{"Header64.Machine", Field, 0, ""},
    +		{"Header64.Phentsize", Field, 0, ""},
    +		{"Header64.Phnum", Field, 0, ""},
    +		{"Header64.Phoff", Field, 0, ""},
    +		{"Header64.Shentsize", Field, 0, ""},
    +		{"Header64.Shnum", Field, 0, ""},
    +		{"Header64.Shoff", Field, 0, ""},
    +		{"Header64.Shstrndx", Field, 0, ""},
    +		{"Header64.Type", Field, 0, ""},
    +		{"Header64.Version", Field, 0, ""},
    +		{"ImportedSymbol", Type, 0, ""},
    +		{"ImportedSymbol.Library", Field, 0, ""},
    +		{"ImportedSymbol.Name", Field, 0, ""},
    +		{"ImportedSymbol.Version", Field, 0, ""},
    +		{"Machine", Type, 0, ""},
    +		{"NT_FPREGSET", Const, 0, ""},
    +		{"NT_PRPSINFO", Const, 0, ""},
    +		{"NT_PRSTATUS", Const, 0, ""},
    +		{"NType", Type, 0, ""},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"OSABI", Type, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"PF_MASKOS", Const, 0, ""},
    +		{"PF_MASKPROC", Const, 0, ""},
    +		{"PF_R", Const, 0, ""},
    +		{"PF_W", Const, 0, ""},
    +		{"PF_X", Const, 0, ""},
    +		{"PT_AARCH64_ARCHEXT", Const, 16, ""},
    +		{"PT_AARCH64_UNWIND", Const, 16, ""},
    +		{"PT_ARM_ARCHEXT", Const, 16, ""},
    +		{"PT_ARM_EXIDX", Const, 16, ""},
    +		{"PT_DYNAMIC", Const, 0, ""},
    +		{"PT_GNU_EH_FRAME", Const, 16, ""},
    +		{"PT_GNU_MBIND_HI", Const, 16, ""},
    +		{"PT_GNU_MBIND_LO", Const, 16, ""},
    +		{"PT_GNU_PROPERTY", Const, 16, ""},
    +		{"PT_GNU_RELRO", Const, 16, ""},
    +		{"PT_GNU_STACK", Const, 16, ""},
    +		{"PT_HIOS", Const, 0, ""},
    +		{"PT_HIPROC", Const, 0, ""},
    +		{"PT_INTERP", Const, 0, ""},
    +		{"PT_LOAD", Const, 0, ""},
    +		{"PT_LOOS", Const, 0, ""},
    +		{"PT_LOPROC", Const, 0, ""},
    +		{"PT_MIPS_ABIFLAGS", Const, 16, ""},
    +		{"PT_MIPS_OPTIONS", Const, 16, ""},
    +		{"PT_MIPS_REGINFO", Const, 16, ""},
    +		{"PT_MIPS_RTPROC", Const, 16, ""},
    +		{"PT_NOTE", Const, 0, ""},
    +		{"PT_NULL", Const, 0, ""},
    +		{"PT_OPENBSD_BOOTDATA", Const, 16, ""},
    +		{"PT_OPENBSD_NOBTCFI", Const, 23, ""},
    +		{"PT_OPENBSD_RANDOMIZE", Const, 16, ""},
    +		{"PT_OPENBSD_WXNEEDED", Const, 16, ""},
    +		{"PT_PAX_FLAGS", Const, 16, ""},
    +		{"PT_PHDR", Const, 0, ""},
    +		{"PT_RISCV_ATTRIBUTES", Const, 25, ""},
    +		{"PT_S390_PGSTE", Const, 16, ""},
    +		{"PT_SHLIB", Const, 0, ""},
    +		{"PT_SUNWSTACK", Const, 16, ""},
    +		{"PT_SUNW_EH_FRAME", Const, 16, ""},
    +		{"PT_TLS", Const, 0, ""},
    +		{"Prog", Type, 0, ""},
    +		{"Prog.ProgHeader", Field, 0, ""},
    +		{"Prog.ReaderAt", Field, 0, ""},
    +		{"Prog32", Type, 0, ""},
    +		{"Prog32.Align", Field, 0, ""},
    +		{"Prog32.Filesz", Field, 0, ""},
    +		{"Prog32.Flags", Field, 0, ""},
    +		{"Prog32.Memsz", Field, 0, ""},
    +		{"Prog32.Off", Field, 0, ""},
    +		{"Prog32.Paddr", Field, 0, ""},
    +		{"Prog32.Type", Field, 0, ""},
    +		{"Prog32.Vaddr", Field, 0, ""},
    +		{"Prog64", Type, 0, ""},
    +		{"Prog64.Align", Field, 0, ""},
    +		{"Prog64.Filesz", Field, 0, ""},
    +		{"Prog64.Flags", Field, 0, ""},
    +		{"Prog64.Memsz", Field, 0, ""},
    +		{"Prog64.Off", Field, 0, ""},
    +		{"Prog64.Paddr", Field, 0, ""},
    +		{"Prog64.Type", Field, 0, ""},
    +		{"Prog64.Vaddr", Field, 0, ""},
    +		{"ProgFlag", Type, 0, ""},
    +		{"ProgHeader", Type, 0, ""},
    +		{"ProgHeader.Align", Field, 0, ""},
    +		{"ProgHeader.Filesz", Field, 0, ""},
    +		{"ProgHeader.Flags", Field, 0, ""},
    +		{"ProgHeader.Memsz", Field, 0, ""},
    +		{"ProgHeader.Off", Field, 0, ""},
    +		{"ProgHeader.Paddr", Field, 0, ""},
    +		{"ProgHeader.Type", Field, 0, ""},
    +		{"ProgHeader.Vaddr", Field, 0, ""},
    +		{"ProgType", Type, 0, ""},
    +		{"R_386", Type, 0, ""},
    +		{"R_386_16", Const, 10, ""},
    +		{"R_386_32", Const, 0, ""},
    +		{"R_386_32PLT", Const, 10, ""},
    +		{"R_386_8", Const, 10, ""},
    +		{"R_386_COPY", Const, 0, ""},
    +		{"R_386_GLOB_DAT", Const, 0, ""},
    +		{"R_386_GOT32", Const, 0, ""},
    +		{"R_386_GOT32X", Const, 10, ""},
    +		{"R_386_GOTOFF", Const, 0, ""},
    +		{"R_386_GOTPC", Const, 0, ""},
    +		{"R_386_IRELATIVE", Const, 10, ""},
    +		{"R_386_JMP_SLOT", Const, 0, ""},
    +		{"R_386_NONE", Const, 0, ""},
    +		{"R_386_PC16", Const, 10, ""},
    +		{"R_386_PC32", Const, 0, ""},
    +		{"R_386_PC8", Const, 10, ""},
    +		{"R_386_PLT32", Const, 0, ""},
    +		{"R_386_RELATIVE", Const, 0, ""},
    +		{"R_386_SIZE32", Const, 10, ""},
    +		{"R_386_TLS_DESC", Const, 10, ""},
    +		{"R_386_TLS_DESC_CALL", Const, 10, ""},
    +		{"R_386_TLS_DTPMOD32", Const, 0, ""},
    +		{"R_386_TLS_DTPOFF32", Const, 0, ""},
    +		{"R_386_TLS_GD", Const, 0, ""},
    +		{"R_386_TLS_GD_32", Const, 0, ""},
    +		{"R_386_TLS_GD_CALL", Const, 0, ""},
    +		{"R_386_TLS_GD_POP", Const, 0, ""},
    +		{"R_386_TLS_GD_PUSH", Const, 0, ""},
    +		{"R_386_TLS_GOTDESC", Const, 10, ""},
    +		{"R_386_TLS_GOTIE", Const, 0, ""},
    +		{"R_386_TLS_IE", Const, 0, ""},
    +		{"R_386_TLS_IE_32", Const, 0, ""},
    +		{"R_386_TLS_LDM", Const, 0, ""},
    +		{"R_386_TLS_LDM_32", Const, 0, ""},
    +		{"R_386_TLS_LDM_CALL", Const, 0, ""},
    +		{"R_386_TLS_LDM_POP", Const, 0, ""},
    +		{"R_386_TLS_LDM_PUSH", Const, 0, ""},
    +		{"R_386_TLS_LDO_32", Const, 0, ""},
    +		{"R_386_TLS_LE", Const, 0, ""},
    +		{"R_386_TLS_LE_32", Const, 0, ""},
    +		{"R_386_TLS_TPOFF", Const, 0, ""},
    +		{"R_386_TLS_TPOFF32", Const, 0, ""},
    +		{"R_390", Type, 7, ""},
    +		{"R_390_12", Const, 7, ""},
    +		{"R_390_16", Const, 7, ""},
    +		{"R_390_20", Const, 7, ""},
    +		{"R_390_32", Const, 7, ""},
    +		{"R_390_64", Const, 7, ""},
    +		{"R_390_8", Const, 7, ""},
    +		{"R_390_COPY", Const, 7, ""},
    +		{"R_390_GLOB_DAT", Const, 7, ""},
    +		{"R_390_GOT12", Const, 7, ""},
    +		{"R_390_GOT16", Const, 7, ""},
    +		{"R_390_GOT20", Const, 7, ""},
    +		{"R_390_GOT32", Const, 7, ""},
    +		{"R_390_GOT64", Const, 7, ""},
    +		{"R_390_GOTENT", Const, 7, ""},
    +		{"R_390_GOTOFF", Const, 7, ""},
    +		{"R_390_GOTOFF16", Const, 7, ""},
    +		{"R_390_GOTOFF64", Const, 7, ""},
    +		{"R_390_GOTPC", Const, 7, ""},
    +		{"R_390_GOTPCDBL", Const, 7, ""},
    +		{"R_390_GOTPLT12", Const, 7, ""},
    +		{"R_390_GOTPLT16", Const, 7, ""},
    +		{"R_390_GOTPLT20", Const, 7, ""},
    +		{"R_390_GOTPLT32", Const, 7, ""},
    +		{"R_390_GOTPLT64", Const, 7, ""},
    +		{"R_390_GOTPLTENT", Const, 7, ""},
    +		{"R_390_GOTPLTOFF16", Const, 7, ""},
    +		{"R_390_GOTPLTOFF32", Const, 7, ""},
    +		{"R_390_GOTPLTOFF64", Const, 7, ""},
    +		{"R_390_JMP_SLOT", Const, 7, ""},
    +		{"R_390_NONE", Const, 7, ""},
    +		{"R_390_PC16", Const, 7, ""},
    +		{"R_390_PC16DBL", Const, 7, ""},
    +		{"R_390_PC32", Const, 7, ""},
    +		{"R_390_PC32DBL", Const, 7, ""},
    +		{"R_390_PC64", Const, 7, ""},
    +		{"R_390_PLT16DBL", Const, 7, ""},
    +		{"R_390_PLT32", Const, 7, ""},
    +		{"R_390_PLT32DBL", Const, 7, ""},
    +		{"R_390_PLT64", Const, 7, ""},
    +		{"R_390_RELATIVE", Const, 7, ""},
    +		{"R_390_TLS_DTPMOD", Const, 7, ""},
    +		{"R_390_TLS_DTPOFF", Const, 7, ""},
    +		{"R_390_TLS_GD32", Const, 7, ""},
    +		{"R_390_TLS_GD64", Const, 7, ""},
    +		{"R_390_TLS_GDCALL", Const, 7, ""},
    +		{"R_390_TLS_GOTIE12", Const, 7, ""},
    +		{"R_390_TLS_GOTIE20", Const, 7, ""},
    +		{"R_390_TLS_GOTIE32", Const, 7, ""},
    +		{"R_390_TLS_GOTIE64", Const, 7, ""},
    +		{"R_390_TLS_IE32", Const, 7, ""},
    +		{"R_390_TLS_IE64", Const, 7, ""},
    +		{"R_390_TLS_IEENT", Const, 7, ""},
    +		{"R_390_TLS_LDCALL", Const, 7, ""},
    +		{"R_390_TLS_LDM32", Const, 7, ""},
    +		{"R_390_TLS_LDM64", Const, 7, ""},
    +		{"R_390_TLS_LDO32", Const, 7, ""},
    +		{"R_390_TLS_LDO64", Const, 7, ""},
    +		{"R_390_TLS_LE32", Const, 7, ""},
    +		{"R_390_TLS_LE64", Const, 7, ""},
    +		{"R_390_TLS_LOAD", Const, 7, ""},
    +		{"R_390_TLS_TPOFF", Const, 7, ""},
    +		{"R_AARCH64", Type, 4, ""},
    +		{"R_AARCH64_ABS16", Const, 4, ""},
    +		{"R_AARCH64_ABS32", Const, 4, ""},
    +		{"R_AARCH64_ABS64", Const, 4, ""},
    +		{"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_LO21", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""},
    +		{"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""},
    +		{"R_AARCH64_CALL26", Const, 4, ""},
    +		{"R_AARCH64_CONDBR19", Const, 4, ""},
    +		{"R_AARCH64_COPY", Const, 4, ""},
    +		{"R_AARCH64_GLOB_DAT", Const, 4, ""},
    +		{"R_AARCH64_GOT_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_IRELATIVE", Const, 4, ""},
    +		{"R_AARCH64_JUMP26", Const, 4, ""},
    +		{"R_AARCH64_JUMP_SLOT", Const, 4, ""},
    +		{"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""},
    +		{"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""},
    +		{"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_LD_PREL_LO19", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G0", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G1", Const, 4, ""},
    +		{"R_AARCH64_MOVW_SABS_G2", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G0", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G1", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G2", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""},
    +		{"R_AARCH64_MOVW_UABS_G3", Const, 4, ""},
    +		{"R_AARCH64_NONE", Const, 4, ""},
    +		{"R_AARCH64_NULL", Const, 4, ""},
    +		{"R_AARCH64_P32_ABS16", Const, 4, ""},
    +		{"R_AARCH64_P32_ABS32", Const, 4, ""},
    +		{"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""},
    +		{"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""},
    +		{"R_AARCH64_P32_CALL26", Const, 4, ""},
    +		{"R_AARCH64_P32_CONDBR19", Const, 4, ""},
    +		{"R_AARCH64_P32_COPY", Const, 4, ""},
    +		{"R_AARCH64_P32_GLOB_DAT", Const, 4, ""},
    +		{"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_IRELATIVE", Const, 4, ""},
    +		{"R_AARCH64_P32_JUMP26", Const, 4, ""},
    +		{"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""},
    +		{"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""},
    +		{"R_AARCH64_P32_PREL16", Const, 4, ""},
    +		{"R_AARCH64_P32_PREL32", Const, 4, ""},
    +		{"R_AARCH64_P32_RELATIVE", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""},
    +		{"R_AARCH64_P32_TLS_TPREL", Const, 4, ""},
    +		{"R_AARCH64_P32_TSTBR14", Const, 4, ""},
    +		{"R_AARCH64_PREL16", Const, 4, ""},
    +		{"R_AARCH64_PREL32", Const, 4, ""},
    +		{"R_AARCH64_PREL64", Const, 4, ""},
    +		{"R_AARCH64_RELATIVE", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADD", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_CALL", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LDR", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""},
    +		{"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""},
    +		{"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""},
    +		{"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""},
    +		{"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""},
    +		{"R_AARCH64_TLS_DTPMOD64", Const, 4, ""},
    +		{"R_AARCH64_TLS_DTPREL64", Const, 4, ""},
    +		{"R_AARCH64_TLS_TPREL64", Const, 4, ""},
    +		{"R_AARCH64_TSTBR14", Const, 4, ""},
    +		{"R_ALPHA", Type, 0, ""},
    +		{"R_ALPHA_BRADDR", Const, 0, ""},
    +		{"R_ALPHA_COPY", Const, 0, ""},
    +		{"R_ALPHA_GLOB_DAT", Const, 0, ""},
    +		{"R_ALPHA_GPDISP", Const, 0, ""},
    +		{"R_ALPHA_GPREL32", Const, 0, ""},
    +		{"R_ALPHA_GPRELHIGH", Const, 0, ""},
    +		{"R_ALPHA_GPRELLOW", Const, 0, ""},
    +		{"R_ALPHA_GPVALUE", Const, 0, ""},
    +		{"R_ALPHA_HINT", Const, 0, ""},
    +		{"R_ALPHA_IMMED_BR_HI32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_GP_16", Const, 0, ""},
    +		{"R_ALPHA_IMMED_GP_HI32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_LO32", Const, 0, ""},
    +		{"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""},
    +		{"R_ALPHA_JMP_SLOT", Const, 0, ""},
    +		{"R_ALPHA_LITERAL", Const, 0, ""},
    +		{"R_ALPHA_LITUSE", Const, 0, ""},
    +		{"R_ALPHA_NONE", Const, 0, ""},
    +		{"R_ALPHA_OP_PRSHIFT", Const, 0, ""},
    +		{"R_ALPHA_OP_PSUB", Const, 0, ""},
    +		{"R_ALPHA_OP_PUSH", Const, 0, ""},
    +		{"R_ALPHA_OP_STORE", Const, 0, ""},
    +		{"R_ALPHA_REFLONG", Const, 0, ""},
    +		{"R_ALPHA_REFQUAD", Const, 0, ""},
    +		{"R_ALPHA_RELATIVE", Const, 0, ""},
    +		{"R_ALPHA_SREL16", Const, 0, ""},
    +		{"R_ALPHA_SREL32", Const, 0, ""},
    +		{"R_ALPHA_SREL64", Const, 0, ""},
    +		{"R_ARM", Type, 0, ""},
    +		{"R_ARM_ABS12", Const, 0, ""},
    +		{"R_ARM_ABS16", Const, 0, ""},
    +		{"R_ARM_ABS32", Const, 0, ""},
    +		{"R_ARM_ABS32_NOI", Const, 10, ""},
    +		{"R_ARM_ABS8", Const, 0, ""},
    +		{"R_ARM_ALU_PCREL_15_8", Const, 10, ""},
    +		{"R_ARM_ALU_PCREL_23_15", Const, 10, ""},
    +		{"R_ARM_ALU_PCREL_7_0", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G0", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G0_NC", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G1", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G1_NC", Const, 10, ""},
    +		{"R_ARM_ALU_PC_G2", Const, 10, ""},
    +		{"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G0", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G0_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G1", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G1_NC", Const, 10, ""},
    +		{"R_ARM_ALU_SB_G2", Const, 10, ""},
    +		{"R_ARM_AMP_VCALL9", Const, 0, ""},
    +		{"R_ARM_BASE_ABS", Const, 10, ""},
    +		{"R_ARM_CALL", Const, 10, ""},
    +		{"R_ARM_COPY", Const, 0, ""},
    +		{"R_ARM_GLOB_DAT", Const, 0, ""},
    +		{"R_ARM_GNU_VTENTRY", Const, 0, ""},
    +		{"R_ARM_GNU_VTINHERIT", Const, 0, ""},
    +		{"R_ARM_GOT32", Const, 0, ""},
    +		{"R_ARM_GOTOFF", Const, 0, ""},
    +		{"R_ARM_GOTOFF12", Const, 10, ""},
    +		{"R_ARM_GOTPC", Const, 0, ""},
    +		{"R_ARM_GOTRELAX", Const, 10, ""},
    +		{"R_ARM_GOT_ABS", Const, 10, ""},
    +		{"R_ARM_GOT_BREL12", Const, 10, ""},
    +		{"R_ARM_GOT_PREL", Const, 10, ""},
    +		{"R_ARM_IRELATIVE", Const, 10, ""},
    +		{"R_ARM_JUMP24", Const, 10, ""},
    +		{"R_ARM_JUMP_SLOT", Const, 0, ""},
    +		{"R_ARM_LDC_PC_G0", Const, 10, ""},
    +		{"R_ARM_LDC_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDC_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDC_SB_G2", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G0", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDRS_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDRS_SB_G2", Const, 10, ""},
    +		{"R_ARM_LDR_PC_G1", Const, 10, ""},
    +		{"R_ARM_LDR_PC_G2", Const, 10, ""},
    +		{"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G0", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G1", Const, 10, ""},
    +		{"R_ARM_LDR_SB_G2", Const, 10, ""},
    +		{"R_ARM_ME_TOO", Const, 10, ""},
    +		{"R_ARM_MOVT_ABS", Const, 10, ""},
    +		{"R_ARM_MOVT_BREL", Const, 10, ""},
    +		{"R_ARM_MOVT_PREL", Const, 10, ""},
    +		{"R_ARM_MOVW_ABS_NC", Const, 10, ""},
    +		{"R_ARM_MOVW_BREL", Const, 10, ""},
    +		{"R_ARM_MOVW_BREL_NC", Const, 10, ""},
    +		{"R_ARM_MOVW_PREL_NC", Const, 10, ""},
    +		{"R_ARM_NONE", Const, 0, ""},
    +		{"R_ARM_PC13", Const, 0, ""},
    +		{"R_ARM_PC24", Const, 0, ""},
    +		{"R_ARM_PLT32", Const, 0, ""},
    +		{"R_ARM_PLT32_ABS", Const, 10, ""},
    +		{"R_ARM_PREL31", Const, 10, ""},
    +		{"R_ARM_PRIVATE_0", Const, 10, ""},
    +		{"R_ARM_PRIVATE_1", Const, 10, ""},
    +		{"R_ARM_PRIVATE_10", Const, 10, ""},
    +		{"R_ARM_PRIVATE_11", Const, 10, ""},
    +		{"R_ARM_PRIVATE_12", Const, 10, ""},
    +		{"R_ARM_PRIVATE_13", Const, 10, ""},
    +		{"R_ARM_PRIVATE_14", Const, 10, ""},
    +		{"R_ARM_PRIVATE_15", Const, 10, ""},
    +		{"R_ARM_PRIVATE_2", Const, 10, ""},
    +		{"R_ARM_PRIVATE_3", Const, 10, ""},
    +		{"R_ARM_PRIVATE_4", Const, 10, ""},
    +		{"R_ARM_PRIVATE_5", Const, 10, ""},
    +		{"R_ARM_PRIVATE_6", Const, 10, ""},
    +		{"R_ARM_PRIVATE_7", Const, 10, ""},
    +		{"R_ARM_PRIVATE_8", Const, 10, ""},
    +		{"R_ARM_PRIVATE_9", Const, 10, ""},
    +		{"R_ARM_RABS32", Const, 0, ""},
    +		{"R_ARM_RBASE", Const, 0, ""},
    +		{"R_ARM_REL32", Const, 0, ""},
    +		{"R_ARM_REL32_NOI", Const, 10, ""},
    +		{"R_ARM_RELATIVE", Const, 0, ""},
    +		{"R_ARM_RPC24", Const, 0, ""},
    +		{"R_ARM_RREL32", Const, 0, ""},
    +		{"R_ARM_RSBREL32", Const, 0, ""},
    +		{"R_ARM_RXPC25", Const, 10, ""},
    +		{"R_ARM_SBREL31", Const, 10, ""},
    +		{"R_ARM_SBREL32", Const, 0, ""},
    +		{"R_ARM_SWI24", Const, 0, ""},
    +		{"R_ARM_TARGET1", Const, 10, ""},
    +		{"R_ARM_TARGET2", Const, 10, ""},
    +		{"R_ARM_THM_ABS5", Const, 0, ""},
    +		{"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""},
    +		{"R_ARM_THM_ALU_ABS_G3", Const, 10, ""},
    +		{"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""},
    +		{"R_ARM_THM_GOT_BREL12", Const, 10, ""},
    +		{"R_ARM_THM_JUMP11", Const, 10, ""},
    +		{"R_ARM_THM_JUMP19", Const, 10, ""},
    +		{"R_ARM_THM_JUMP24", Const, 10, ""},
    +		{"R_ARM_THM_JUMP6", Const, 10, ""},
    +		{"R_ARM_THM_JUMP8", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_ABS", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_BREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVT_PREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_BREL", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""},
    +		{"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""},
    +		{"R_ARM_THM_PC12", Const, 10, ""},
    +		{"R_ARM_THM_PC22", Const, 0, ""},
    +		{"R_ARM_THM_PC8", Const, 0, ""},
    +		{"R_ARM_THM_RPC22", Const, 0, ""},
    +		{"R_ARM_THM_SWI8", Const, 0, ""},
    +		{"R_ARM_THM_TLS_CALL", Const, 10, ""},
    +		{"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""},
    +		{"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""},
    +		{"R_ARM_THM_XPC22", Const, 0, ""},
    +		{"R_ARM_TLS_CALL", Const, 10, ""},
    +		{"R_ARM_TLS_DESCSEQ", Const, 10, ""},
    +		{"R_ARM_TLS_DTPMOD32", Const, 10, ""},
    +		{"R_ARM_TLS_DTPOFF32", Const, 10, ""},
    +		{"R_ARM_TLS_GD32", Const, 10, ""},
    +		{"R_ARM_TLS_GOTDESC", Const, 10, ""},
    +		{"R_ARM_TLS_IE12GP", Const, 10, ""},
    +		{"R_ARM_TLS_IE32", Const, 10, ""},
    +		{"R_ARM_TLS_LDM32", Const, 10, ""},
    +		{"R_ARM_TLS_LDO12", Const, 10, ""},
    +		{"R_ARM_TLS_LDO32", Const, 10, ""},
    +		{"R_ARM_TLS_LE12", Const, 10, ""},
    +		{"R_ARM_TLS_LE32", Const, 10, ""},
    +		{"R_ARM_TLS_TPOFF32", Const, 10, ""},
    +		{"R_ARM_V4BX", Const, 10, ""},
    +		{"R_ARM_XPC25", Const, 0, ""},
    +		{"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"},
    +		{"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"},
    +		{"R_LARCH", Type, 19, ""},
    +		{"R_LARCH_32", Const, 19, ""},
    +		{"R_LARCH_32_PCREL", Const, 20, ""},
    +		{"R_LARCH_64", Const, 19, ""},
    +		{"R_LARCH_64_PCREL", Const, 22, ""},
    +		{"R_LARCH_ABS64_HI12", Const, 20, ""},
    +		{"R_LARCH_ABS64_LO20", Const, 20, ""},
    +		{"R_LARCH_ABS_HI20", Const, 20, ""},
    +		{"R_LARCH_ABS_LO12", Const, 20, ""},
    +		{"R_LARCH_ADD16", Const, 19, ""},
    +		{"R_LARCH_ADD24", Const, 19, ""},
    +		{"R_LARCH_ADD32", Const, 19, ""},
    +		{"R_LARCH_ADD6", Const, 22, ""},
    +		{"R_LARCH_ADD64", Const, 19, ""},
    +		{"R_LARCH_ADD8", Const, 19, ""},
    +		{"R_LARCH_ADD_ULEB128", Const, 22, ""},
    +		{"R_LARCH_ALIGN", Const, 22, ""},
    +		{"R_LARCH_B16", Const, 20, ""},
    +		{"R_LARCH_B21", Const, 20, ""},
    +		{"R_LARCH_B26", Const, 20, ""},
    +		{"R_LARCH_CFA", Const, 22, ""},
    +		{"R_LARCH_COPY", Const, 19, ""},
    +		{"R_LARCH_DELETE", Const, 22, ""},
    +		{"R_LARCH_GNU_VTENTRY", Const, 20, ""},
    +		{"R_LARCH_GNU_VTINHERIT", Const, 20, ""},
    +		{"R_LARCH_GOT64_HI12", Const, 20, ""},
    +		{"R_LARCH_GOT64_LO20", Const, 20, ""},
    +		{"R_LARCH_GOT64_PC_HI12", Const, 20, ""},
    +		{"R_LARCH_GOT64_PC_LO20", Const, 20, ""},
    +		{"R_LARCH_GOT_HI20", Const, 20, ""},
    +		{"R_LARCH_GOT_LO12", Const, 20, ""},
    +		{"R_LARCH_GOT_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_GOT_PC_LO12", Const, 20, ""},
    +		{"R_LARCH_IRELATIVE", Const, 19, ""},
    +		{"R_LARCH_JUMP_SLOT", Const, 19, ""},
    +		{"R_LARCH_MARK_LA", Const, 19, ""},
    +		{"R_LARCH_MARK_PCREL", Const, 19, ""},
    +		{"R_LARCH_NONE", Const, 19, ""},
    +		{"R_LARCH_PCALA64_HI12", Const, 20, ""},
    +		{"R_LARCH_PCALA64_LO20", Const, 20, ""},
    +		{"R_LARCH_PCALA_HI20", Const, 20, ""},
    +		{"R_LARCH_PCALA_LO12", Const, 20, ""},
    +		{"R_LARCH_PCREL20_S2", Const, 22, ""},
    +		{"R_LARCH_RELATIVE", Const, 19, ""},
    +		{"R_LARCH_RELAX", Const, 20, ""},
    +		{"R_LARCH_SOP_ADD", Const, 19, ""},
    +		{"R_LARCH_SOP_AND", Const, 19, ""},
    +		{"R_LARCH_SOP_ASSERT", Const, 19, ""},
    +		{"R_LARCH_SOP_IF_ELSE", Const, 19, ""},
    +		{"R_LARCH_SOP_NOT", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_U", Const, 19, ""},
    +		{"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_DUP", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""},
    +		{"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""},
    +		{"R_LARCH_SOP_SL", Const, 19, ""},
    +		{"R_LARCH_SOP_SR", Const, 19, ""},
    +		{"R_LARCH_SOP_SUB", Const, 19, ""},
    +		{"R_LARCH_SUB16", Const, 19, ""},
    +		{"R_LARCH_SUB24", Const, 19, ""},
    +		{"R_LARCH_SUB32", Const, 19, ""},
    +		{"R_LARCH_SUB6", Const, 22, ""},
    +		{"R_LARCH_SUB64", Const, 19, ""},
    +		{"R_LARCH_SUB8", Const, 19, ""},
    +		{"R_LARCH_SUB_ULEB128", Const, 22, ""},
    +		{"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPREL32", Const, 19, ""},
    +		{"R_LARCH_TLS_DTPREL64", Const, 19, ""},
    +		{"R_LARCH_TLS_GD_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_LD_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
    +		{"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE_HI20", Const, 20, ""},
    +		{"R_LARCH_TLS_LE_LO12", Const, 20, ""},
    +		{"R_LARCH_TLS_TPREL32", Const, 19, ""},
    +		{"R_LARCH_TLS_TPREL64", Const, 19, ""},
    +		{"R_MIPS", Type, 6, ""},
    +		{"R_MIPS_16", Const, 6, ""},
    +		{"R_MIPS_26", Const, 6, ""},
    +		{"R_MIPS_32", Const, 6, ""},
    +		{"R_MIPS_64", Const, 6, ""},
    +		{"R_MIPS_ADD_IMMEDIATE", Const, 6, ""},
    +		{"R_MIPS_CALL16", Const, 6, ""},
    +		{"R_MIPS_CALL_HI16", Const, 6, ""},
    +		{"R_MIPS_CALL_LO16", Const, 6, ""},
    +		{"R_MIPS_DELETE", Const, 6, ""},
    +		{"R_MIPS_GOT16", Const, 6, ""},
    +		{"R_MIPS_GOT_DISP", Const, 6, ""},
    +		{"R_MIPS_GOT_HI16", Const, 6, ""},
    +		{"R_MIPS_GOT_LO16", Const, 6, ""},
    +		{"R_MIPS_GOT_OFST", Const, 6, ""},
    +		{"R_MIPS_GOT_PAGE", Const, 6, ""},
    +		{"R_MIPS_GPREL16", Const, 6, ""},
    +		{"R_MIPS_GPREL32", Const, 6, ""},
    +		{"R_MIPS_HI16", Const, 6, ""},
    +		{"R_MIPS_HIGHER", Const, 6, ""},
    +		{"R_MIPS_HIGHEST", Const, 6, ""},
    +		{"R_MIPS_INSERT_A", Const, 6, ""},
    +		{"R_MIPS_INSERT_B", Const, 6, ""},
    +		{"R_MIPS_JALR", Const, 6, ""},
    +		{"R_MIPS_LITERAL", Const, 6, ""},
    +		{"R_MIPS_LO16", Const, 6, ""},
    +		{"R_MIPS_NONE", Const, 6, ""},
    +		{"R_MIPS_PC16", Const, 6, ""},
    +		{"R_MIPS_PC32", Const, 22, ""},
    +		{"R_MIPS_PJUMP", Const, 6, ""},
    +		{"R_MIPS_REL16", Const, 6, ""},
    +		{"R_MIPS_REL32", Const, 6, ""},
    +		{"R_MIPS_RELGOT", Const, 6, ""},
    +		{"R_MIPS_SCN_DISP", Const, 6, ""},
    +		{"R_MIPS_SHIFT5", Const, 6, ""},
    +		{"R_MIPS_SHIFT6", Const, 6, ""},
    +		{"R_MIPS_SUB", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPMOD32", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPMOD64", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL32", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL64", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""},
    +		{"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""},
    +		{"R_MIPS_TLS_GD", Const, 6, ""},
    +		{"R_MIPS_TLS_GOTTPREL", Const, 6, ""},
    +		{"R_MIPS_TLS_LDM", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL32", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL64", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL_HI16", Const, 6, ""},
    +		{"R_MIPS_TLS_TPREL_LO16", Const, 6, ""},
    +		{"R_PPC", Type, 0, ""},
    +		{"R_PPC64", Type, 5, ""},
    +		{"R_PPC64_ADDR14", Const, 5, ""},
    +		{"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""},
    +		{"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""},
    +		{"R_PPC64_ADDR16", Const, 5, ""},
    +		{"R_PPC64_ADDR16_DS", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HI", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGH", Const, 10, ""},
    +		{"R_PPC64_ADDR16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_ADDR16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHER34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""},
    +		{"R_PPC64_ADDR16_LO", Const, 5, ""},
    +		{"R_PPC64_ADDR16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_ADDR24", Const, 5, ""},
    +		{"R_PPC64_ADDR32", Const, 5, ""},
    +		{"R_PPC64_ADDR64", Const, 5, ""},
    +		{"R_PPC64_ADDR64_LOCAL", Const, 10, ""},
    +		{"R_PPC64_COPY", Const, 20, ""},
    +		{"R_PPC64_D28", Const, 20, ""},
    +		{"R_PPC64_D34", Const, 20, ""},
    +		{"R_PPC64_D34_HA30", Const, 20, ""},
    +		{"R_PPC64_D34_HI30", Const, 20, ""},
    +		{"R_PPC64_D34_LO", Const, 20, ""},
    +		{"R_PPC64_DTPMOD64", Const, 5, ""},
    +		{"R_PPC64_DTPREL16", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGH", Const, 10, ""},
    +		{"R_PPC64_DTPREL16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_DTPREL16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_LO", Const, 5, ""},
    +		{"R_PPC64_DTPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_DTPREL34", Const, 20, ""},
    +		{"R_PPC64_DTPREL64", Const, 5, ""},
    +		{"R_PPC64_ENTRY", Const, 10, ""},
    +		{"R_PPC64_GLOB_DAT", Const, 20, ""},
    +		{"R_PPC64_GNU_VTENTRY", Const, 20, ""},
    +		{"R_PPC64_GNU_VTINHERIT", Const, 20, ""},
    +		{"R_PPC64_GOT16", Const, 5, ""},
    +		{"R_PPC64_GOT16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TLSGD16", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TLSLD16", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""},
    +		{"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""},
    +		{"R_PPC64_GOT_TPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""},
    +		{"R_PPC64_IRELATIVE", Const, 10, ""},
    +		{"R_PPC64_JMP_IREL", Const, 10, ""},
    +		{"R_PPC64_JMP_SLOT", Const, 5, ""},
    +		{"R_PPC64_NONE", Const, 5, ""},
    +		{"R_PPC64_PCREL28", Const, 20, ""},
    +		{"R_PPC64_PCREL34", Const, 20, ""},
    +		{"R_PPC64_PCREL_OPT", Const, 20, ""},
    +		{"R_PPC64_PLT16_HA", Const, 20, ""},
    +		{"R_PPC64_PLT16_HI", Const, 20, ""},
    +		{"R_PPC64_PLT16_LO", Const, 20, ""},
    +		{"R_PPC64_PLT16_LO_DS", Const, 10, ""},
    +		{"R_PPC64_PLT32", Const, 20, ""},
    +		{"R_PPC64_PLT64", Const, 20, ""},
    +		{"R_PPC64_PLTCALL", Const, 20, ""},
    +		{"R_PPC64_PLTCALL_NOTOC", Const, 20, ""},
    +		{"R_PPC64_PLTGOT16", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_DS", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_HA", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_HI", Const, 10, ""},
    +		{"R_PPC64_PLTGOT16_LO", Const, 10, ""},
    +		{"R_PPC64_PLTGOT_LO_DS", Const, 10, ""},
    +		{"R_PPC64_PLTREL32", Const, 20, ""},
    +		{"R_PPC64_PLTREL64", Const, 20, ""},
    +		{"R_PPC64_PLTSEQ", Const, 20, ""},
    +		{"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""},
    +		{"R_PPC64_PLT_PCREL34", Const, 20, ""},
    +		{"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""},
    +		{"R_PPC64_REL14", Const, 5, ""},
    +		{"R_PPC64_REL14_BRNTAKEN", Const, 5, ""},
    +		{"R_PPC64_REL14_BRTAKEN", Const, 5, ""},
    +		{"R_PPC64_REL16", Const, 5, ""},
    +		{"R_PPC64_REL16DX_HA", Const, 10, ""},
    +		{"R_PPC64_REL16_HA", Const, 5, ""},
    +		{"R_PPC64_REL16_HI", Const, 5, ""},
    +		{"R_PPC64_REL16_HIGH", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHER", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHER34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHERA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHERA34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHEST", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHEST34", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHESTA", Const, 20, ""},
    +		{"R_PPC64_REL16_HIGHESTA34", Const, 20, ""},
    +		{"R_PPC64_REL16_LO", Const, 5, ""},
    +		{"R_PPC64_REL24", Const, 5, ""},
    +		{"R_PPC64_REL24_NOTOC", Const, 10, ""},
    +		{"R_PPC64_REL24_P9NOTOC", Const, 21, ""},
    +		{"R_PPC64_REL30", Const, 20, ""},
    +		{"R_PPC64_REL32", Const, 5, ""},
    +		{"R_PPC64_REL64", Const, 5, ""},
    +		{"R_PPC64_RELATIVE", Const, 18, ""},
    +		{"R_PPC64_SECTOFF", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_DS", Const, 10, ""},
    +		{"R_PPC64_SECTOFF_HA", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_HI", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_LO", Const, 20, ""},
    +		{"R_PPC64_SECTOFF_LO_DS", Const, 10, ""},
    +		{"R_PPC64_TLS", Const, 5, ""},
    +		{"R_PPC64_TLSGD", Const, 5, ""},
    +		{"R_PPC64_TLSLD", Const, 5, ""},
    +		{"R_PPC64_TOC", Const, 5, ""},
    +		{"R_PPC64_TOC16", Const, 5, ""},
    +		{"R_PPC64_TOC16_DS", Const, 5, ""},
    +		{"R_PPC64_TOC16_HA", Const, 5, ""},
    +		{"R_PPC64_TOC16_HI", Const, 5, ""},
    +		{"R_PPC64_TOC16_LO", Const, 5, ""},
    +		{"R_PPC64_TOC16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_TOCSAVE", Const, 10, ""},
    +		{"R_PPC64_TPREL16", Const, 5, ""},
    +		{"R_PPC64_TPREL16_DS", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HI", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGH", Const, 10, ""},
    +		{"R_PPC64_TPREL16_HIGHA", Const, 10, ""},
    +		{"R_PPC64_TPREL16_HIGHER", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHERA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHEST", Const, 5, ""},
    +		{"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""},
    +		{"R_PPC64_TPREL16_LO", Const, 5, ""},
    +		{"R_PPC64_TPREL16_LO_DS", Const, 5, ""},
    +		{"R_PPC64_TPREL34", Const, 20, ""},
    +		{"R_PPC64_TPREL64", Const, 5, ""},
    +		{"R_PPC64_UADDR16", Const, 20, ""},
    +		{"R_PPC64_UADDR32", Const, 20, ""},
    +		{"R_PPC64_UADDR64", Const, 20, ""},
    +		{"R_PPC_ADDR14", Const, 0, ""},
    +		{"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""},
    +		{"R_PPC_ADDR14_BRTAKEN", Const, 0, ""},
    +		{"R_PPC_ADDR16", Const, 0, ""},
    +		{"R_PPC_ADDR16_HA", Const, 0, ""},
    +		{"R_PPC_ADDR16_HI", Const, 0, ""},
    +		{"R_PPC_ADDR16_LO", Const, 0, ""},
    +		{"R_PPC_ADDR24", Const, 0, ""},
    +		{"R_PPC_ADDR32", Const, 0, ""},
    +		{"R_PPC_COPY", Const, 0, ""},
    +		{"R_PPC_DTPMOD32", Const, 0, ""},
    +		{"R_PPC_DTPREL16", Const, 0, ""},
    +		{"R_PPC_DTPREL16_HA", Const, 0, ""},
    +		{"R_PPC_DTPREL16_HI", Const, 0, ""},
    +		{"R_PPC_DTPREL16_LO", Const, 0, ""},
    +		{"R_PPC_DTPREL32", Const, 0, ""},
    +		{"R_PPC_EMB_BIT_FLD", Const, 0, ""},
    +		{"R_PPC_EMB_MRKREF", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_HA", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_HI", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR16_LO", Const, 0, ""},
    +		{"R_PPC_EMB_NADDR32", Const, 0, ""},
    +		{"R_PPC_EMB_RELSDA", Const, 0, ""},
    +		{"R_PPC_EMB_RELSEC16", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_HA", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_HI", Const, 0, ""},
    +		{"R_PPC_EMB_RELST_LO", Const, 0, ""},
    +		{"R_PPC_EMB_SDA21", Const, 0, ""},
    +		{"R_PPC_EMB_SDA2I16", Const, 0, ""},
    +		{"R_PPC_EMB_SDA2REL", Const, 0, ""},
    +		{"R_PPC_EMB_SDAI16", Const, 0, ""},
    +		{"R_PPC_GLOB_DAT", Const, 0, ""},
    +		{"R_PPC_GOT16", Const, 0, ""},
    +		{"R_PPC_GOT16_HA", Const, 0, ""},
    +		{"R_PPC_GOT16_HI", Const, 0, ""},
    +		{"R_PPC_GOT16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TLSGD16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TLSLD16_LO", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_HA", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_HI", Const, 0, ""},
    +		{"R_PPC_GOT_TPREL16_LO", Const, 0, ""},
    +		{"R_PPC_JMP_SLOT", Const, 0, ""},
    +		{"R_PPC_LOCAL24PC", Const, 0, ""},
    +		{"R_PPC_NONE", Const, 0, ""},
    +		{"R_PPC_PLT16_HA", Const, 0, ""},
    +		{"R_PPC_PLT16_HI", Const, 0, ""},
    +		{"R_PPC_PLT16_LO", Const, 0, ""},
    +		{"R_PPC_PLT32", Const, 0, ""},
    +		{"R_PPC_PLTREL24", Const, 0, ""},
    +		{"R_PPC_PLTREL32", Const, 0, ""},
    +		{"R_PPC_REL14", Const, 0, ""},
    +		{"R_PPC_REL14_BRNTAKEN", Const, 0, ""},
    +		{"R_PPC_REL14_BRTAKEN", Const, 0, ""},
    +		{"R_PPC_REL24", Const, 0, ""},
    +		{"R_PPC_REL32", Const, 0, ""},
    +		{"R_PPC_RELATIVE", Const, 0, ""},
    +		{"R_PPC_SDAREL16", Const, 0, ""},
    +		{"R_PPC_SECTOFF", Const, 0, ""},
    +		{"R_PPC_SECTOFF_HA", Const, 0, ""},
    +		{"R_PPC_SECTOFF_HI", Const, 0, ""},
    +		{"R_PPC_SECTOFF_LO", Const, 0, ""},
    +		{"R_PPC_TLS", Const, 0, ""},
    +		{"R_PPC_TPREL16", Const, 0, ""},
    +		{"R_PPC_TPREL16_HA", Const, 0, ""},
    +		{"R_PPC_TPREL16_HI", Const, 0, ""},
    +		{"R_PPC_TPREL16_LO", Const, 0, ""},
    +		{"R_PPC_TPREL32", Const, 0, ""},
    +		{"R_PPC_UADDR16", Const, 0, ""},
    +		{"R_PPC_UADDR32", Const, 0, ""},
    +		{"R_RISCV", Type, 11, ""},
    +		{"R_RISCV_32", Const, 11, ""},
    +		{"R_RISCV_32_PCREL", Const, 12, ""},
    +		{"R_RISCV_64", Const, 11, ""},
    +		{"R_RISCV_ADD16", Const, 11, ""},
    +		{"R_RISCV_ADD32", Const, 11, ""},
    +		{"R_RISCV_ADD64", Const, 11, ""},
    +		{"R_RISCV_ADD8", Const, 11, ""},
    +		{"R_RISCV_ALIGN", Const, 11, ""},
    +		{"R_RISCV_BRANCH", Const, 11, ""},
    +		{"R_RISCV_CALL", Const, 11, ""},
    +		{"R_RISCV_CALL_PLT", Const, 11, ""},
    +		{"R_RISCV_COPY", Const, 11, ""},
    +		{"R_RISCV_GNU_VTENTRY", Const, 11, ""},
    +		{"R_RISCV_GNU_VTINHERIT", Const, 11, ""},
    +		{"R_RISCV_GOT_HI20", Const, 11, ""},
    +		{"R_RISCV_GPREL_I", Const, 11, ""},
    +		{"R_RISCV_GPREL_S", Const, 11, ""},
    +		{"R_RISCV_HI20", Const, 11, ""},
    +		{"R_RISCV_JAL", Const, 11, ""},
    +		{"R_RISCV_JUMP_SLOT", Const, 11, ""},
    +		{"R_RISCV_LO12_I", Const, 11, ""},
    +		{"R_RISCV_LO12_S", Const, 11, ""},
    +		{"R_RISCV_NONE", Const, 11, ""},
    +		{"R_RISCV_PCREL_HI20", Const, 11, ""},
    +		{"R_RISCV_PCREL_LO12_I", Const, 11, ""},
    +		{"R_RISCV_PCREL_LO12_S", Const, 11, ""},
    +		{"R_RISCV_RELATIVE", Const, 11, ""},
    +		{"R_RISCV_RELAX", Const, 11, ""},
    +		{"R_RISCV_RVC_BRANCH", Const, 11, ""},
    +		{"R_RISCV_RVC_JUMP", Const, 11, ""},
    +		{"R_RISCV_RVC_LUI", Const, 11, ""},
    +		{"R_RISCV_SET16", Const, 11, ""},
    +		{"R_RISCV_SET32", Const, 11, ""},
    +		{"R_RISCV_SET6", Const, 11, ""},
    +		{"R_RISCV_SET8", Const, 11, ""},
    +		{"R_RISCV_SUB16", Const, 11, ""},
    +		{"R_RISCV_SUB32", Const, 11, ""},
    +		{"R_RISCV_SUB6", Const, 11, ""},
    +		{"R_RISCV_SUB64", Const, 11, ""},
    +		{"R_RISCV_SUB8", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPMOD32", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPMOD64", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPREL32", Const, 11, ""},
    +		{"R_RISCV_TLS_DTPREL64", Const, 11, ""},
    +		{"R_RISCV_TLS_GD_HI20", Const, 11, ""},
    +		{"R_RISCV_TLS_GOT_HI20", Const, 11, ""},
    +		{"R_RISCV_TLS_TPREL32", Const, 11, ""},
    +		{"R_RISCV_TLS_TPREL64", Const, 11, ""},
    +		{"R_RISCV_TPREL_ADD", Const, 11, ""},
    +		{"R_RISCV_TPREL_HI20", Const, 11, ""},
    +		{"R_RISCV_TPREL_I", Const, 11, ""},
    +		{"R_RISCV_TPREL_LO12_I", Const, 11, ""},
    +		{"R_RISCV_TPREL_LO12_S", Const, 11, ""},
    +		{"R_RISCV_TPREL_S", Const, 11, ""},
    +		{"R_SPARC", Type, 0, ""},
    +		{"R_SPARC_10", Const, 0, ""},
    +		{"R_SPARC_11", Const, 0, ""},
    +		{"R_SPARC_13", Const, 0, ""},
    +		{"R_SPARC_16", Const, 0, ""},
    +		{"R_SPARC_22", Const, 0, ""},
    +		{"R_SPARC_32", Const, 0, ""},
    +		{"R_SPARC_5", Const, 0, ""},
    +		{"R_SPARC_6", Const, 0, ""},
    +		{"R_SPARC_64", Const, 0, ""},
    +		{"R_SPARC_7", Const, 0, ""},
    +		{"R_SPARC_8", Const, 0, ""},
    +		{"R_SPARC_COPY", Const, 0, ""},
    +		{"R_SPARC_DISP16", Const, 0, ""},
    +		{"R_SPARC_DISP32", Const, 0, ""},
    +		{"R_SPARC_DISP64", Const, 0, ""},
    +		{"R_SPARC_DISP8", Const, 0, ""},
    +		{"R_SPARC_GLOB_DAT", Const, 0, ""},
    +		{"R_SPARC_GLOB_JMP", Const, 0, ""},
    +		{"R_SPARC_GOT10", Const, 0, ""},
    +		{"R_SPARC_GOT13", Const, 0, ""},
    +		{"R_SPARC_GOT22", Const, 0, ""},
    +		{"R_SPARC_H44", Const, 0, ""},
    +		{"R_SPARC_HH22", Const, 0, ""},
    +		{"R_SPARC_HI22", Const, 0, ""},
    +		{"R_SPARC_HIPLT22", Const, 0, ""},
    +		{"R_SPARC_HIX22", Const, 0, ""},
    +		{"R_SPARC_HM10", Const, 0, ""},
    +		{"R_SPARC_JMP_SLOT", Const, 0, ""},
    +		{"R_SPARC_L44", Const, 0, ""},
    +		{"R_SPARC_LM22", Const, 0, ""},
    +		{"R_SPARC_LO10", Const, 0, ""},
    +		{"R_SPARC_LOPLT10", Const, 0, ""},
    +		{"R_SPARC_LOX10", Const, 0, ""},
    +		{"R_SPARC_M44", Const, 0, ""},
    +		{"R_SPARC_NONE", Const, 0, ""},
    +		{"R_SPARC_OLO10", Const, 0, ""},
    +		{"R_SPARC_PC10", Const, 0, ""},
    +		{"R_SPARC_PC22", Const, 0, ""},
    +		{"R_SPARC_PCPLT10", Const, 0, ""},
    +		{"R_SPARC_PCPLT22", Const, 0, ""},
    +		{"R_SPARC_PCPLT32", Const, 0, ""},
    +		{"R_SPARC_PC_HH22", Const, 0, ""},
    +		{"R_SPARC_PC_HM10", Const, 0, ""},
    +		{"R_SPARC_PC_LM22", Const, 0, ""},
    +		{"R_SPARC_PLT32", Const, 0, ""},
    +		{"R_SPARC_PLT64", Const, 0, ""},
    +		{"R_SPARC_REGISTER", Const, 0, ""},
    +		{"R_SPARC_RELATIVE", Const, 0, ""},
    +		{"R_SPARC_UA16", Const, 0, ""},
    +		{"R_SPARC_UA32", Const, 0, ""},
    +		{"R_SPARC_UA64", Const, 0, ""},
    +		{"R_SPARC_WDISP16", Const, 0, ""},
    +		{"R_SPARC_WDISP19", Const, 0, ""},
    +		{"R_SPARC_WDISP22", Const, 0, ""},
    +		{"R_SPARC_WDISP30", Const, 0, ""},
    +		{"R_SPARC_WPLT30", Const, 0, ""},
    +		{"R_SYM32", Func, 0, "func(info uint32) uint32"},
    +		{"R_SYM64", Func, 0, "func(info uint64) uint32"},
    +		{"R_TYPE32", Func, 0, "func(info uint32) uint32"},
    +		{"R_TYPE64", Func, 0, "func(info uint64) uint32"},
    +		{"R_X86_64", Type, 0, ""},
    +		{"R_X86_64_16", Const, 0, ""},
    +		{"R_X86_64_32", Const, 0, ""},
    +		{"R_X86_64_32S", Const, 0, ""},
    +		{"R_X86_64_64", Const, 0, ""},
    +		{"R_X86_64_8", Const, 0, ""},
    +		{"R_X86_64_COPY", Const, 0, ""},
    +		{"R_X86_64_DTPMOD64", Const, 0, ""},
    +		{"R_X86_64_DTPOFF32", Const, 0, ""},
    +		{"R_X86_64_DTPOFF64", Const, 0, ""},
    +		{"R_X86_64_GLOB_DAT", Const, 0, ""},
    +		{"R_X86_64_GOT32", Const, 0, ""},
    +		{"R_X86_64_GOT64", Const, 10, ""},
    +		{"R_X86_64_GOTOFF64", Const, 10, ""},
    +		{"R_X86_64_GOTPC32", Const, 10, ""},
    +		{"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""},
    +		{"R_X86_64_GOTPC64", Const, 10, ""},
    +		{"R_X86_64_GOTPCREL", Const, 0, ""},
    +		{"R_X86_64_GOTPCREL64", Const, 10, ""},
    +		{"R_X86_64_GOTPCRELX", Const, 10, ""},
    +		{"R_X86_64_GOTPLT64", Const, 10, ""},
    +		{"R_X86_64_GOTTPOFF", Const, 0, ""},
    +		{"R_X86_64_IRELATIVE", Const, 10, ""},
    +		{"R_X86_64_JMP_SLOT", Const, 0, ""},
    +		{"R_X86_64_NONE", Const, 0, ""},
    +		{"R_X86_64_PC16", Const, 0, ""},
    +		{"R_X86_64_PC32", Const, 0, ""},
    +		{"R_X86_64_PC32_BND", Const, 10, ""},
    +		{"R_X86_64_PC64", Const, 10, ""},
    +		{"R_X86_64_PC8", Const, 0, ""},
    +		{"R_X86_64_PLT32", Const, 0, ""},
    +		{"R_X86_64_PLT32_BND", Const, 10, ""},
    +		{"R_X86_64_PLTOFF64", Const, 10, ""},
    +		{"R_X86_64_RELATIVE", Const, 0, ""},
    +		{"R_X86_64_RELATIVE64", Const, 10, ""},
    +		{"R_X86_64_REX_GOTPCRELX", Const, 10, ""},
    +		{"R_X86_64_SIZE32", Const, 10, ""},
    +		{"R_X86_64_SIZE64", Const, 10, ""},
    +		{"R_X86_64_TLSDESC", Const, 10, ""},
    +		{"R_X86_64_TLSDESC_CALL", Const, 10, ""},
    +		{"R_X86_64_TLSGD", Const, 0, ""},
    +		{"R_X86_64_TLSLD", Const, 0, ""},
    +		{"R_X86_64_TPOFF32", Const, 0, ""},
    +		{"R_X86_64_TPOFF64", Const, 0, ""},
    +		{"Rel32", Type, 0, ""},
    +		{"Rel32.Info", Field, 0, ""},
    +		{"Rel32.Off", Field, 0, ""},
    +		{"Rel64", Type, 0, ""},
    +		{"Rel64.Info", Field, 0, ""},
    +		{"Rel64.Off", Field, 0, ""},
    +		{"Rela32", Type, 0, ""},
    +		{"Rela32.Addend", Field, 0, ""},
    +		{"Rela32.Info", Field, 0, ""},
    +		{"Rela32.Off", Field, 0, ""},
    +		{"Rela64", Type, 0, ""},
    +		{"Rela64.Addend", Field, 0, ""},
    +		{"Rela64.Info", Field, 0, ""},
    +		{"Rela64.Off", Field, 0, ""},
    +		{"SHF_ALLOC", Const, 0, ""},
    +		{"SHF_COMPRESSED", Const, 6, ""},
    +		{"SHF_EXECINSTR", Const, 0, ""},
    +		{"SHF_GROUP", Const, 0, ""},
    +		{"SHF_INFO_LINK", Const, 0, ""},
    +		{"SHF_LINK_ORDER", Const, 0, ""},
    +		{"SHF_MASKOS", Const, 0, ""},
    +		{"SHF_MASKPROC", Const, 0, ""},
    +		{"SHF_MERGE", Const, 0, ""},
    +		{"SHF_OS_NONCONFORMING", Const, 0, ""},
    +		{"SHF_STRINGS", Const, 0, ""},
    +		{"SHF_TLS", Const, 0, ""},
    +		{"SHF_WRITE", Const, 0, ""},
    +		{"SHN_ABS", Const, 0, ""},
    +		{"SHN_COMMON", Const, 0, ""},
    +		{"SHN_HIOS", Const, 0, ""},
    +		{"SHN_HIPROC", Const, 0, ""},
    +		{"SHN_HIRESERVE", Const, 0, ""},
    +		{"SHN_LOOS", Const, 0, ""},
    +		{"SHN_LOPROC", Const, 0, ""},
    +		{"SHN_LORESERVE", Const, 0, ""},
    +		{"SHN_UNDEF", Const, 0, ""},
    +		{"SHN_XINDEX", Const, 0, ""},
    +		{"SHT_DYNAMIC", Const, 0, ""},
    +		{"SHT_DYNSYM", Const, 0, ""},
    +		{"SHT_FINI_ARRAY", Const, 0, ""},
    +		{"SHT_GNU_ATTRIBUTES", Const, 0, ""},
    +		{"SHT_GNU_HASH", Const, 0, ""},
    +		{"SHT_GNU_LIBLIST", Const, 0, ""},
    +		{"SHT_GNU_VERDEF", Const, 0, ""},
    +		{"SHT_GNU_VERNEED", Const, 0, ""},
    +		{"SHT_GNU_VERSYM", Const, 0, ""},
    +		{"SHT_GROUP", Const, 0, ""},
    +		{"SHT_HASH", Const, 0, ""},
    +		{"SHT_HIOS", Const, 0, ""},
    +		{"SHT_HIPROC", Const, 0, ""},
    +		{"SHT_HIUSER", Const, 0, ""},
    +		{"SHT_INIT_ARRAY", Const, 0, ""},
    +		{"SHT_LOOS", Const, 0, ""},
    +		{"SHT_LOPROC", Const, 0, ""},
    +		{"SHT_LOUSER", Const, 0, ""},
    +		{"SHT_MIPS_ABIFLAGS", Const, 17, ""},
    +		{"SHT_NOBITS", Const, 0, ""},
    +		{"SHT_NOTE", Const, 0, ""},
    +		{"SHT_NULL", Const, 0, ""},
    +		{"SHT_PREINIT_ARRAY", Const, 0, ""},
    +		{"SHT_PROGBITS", Const, 0, ""},
    +		{"SHT_REL", Const, 0, ""},
    +		{"SHT_RELA", Const, 0, ""},
    +		{"SHT_RISCV_ATTRIBUTES", Const, 25, ""},
    +		{"SHT_SHLIB", Const, 0, ""},
    +		{"SHT_STRTAB", Const, 0, ""},
    +		{"SHT_SYMTAB", Const, 0, ""},
    +		{"SHT_SYMTAB_SHNDX", Const, 0, ""},
    +		{"STB_GLOBAL", Const, 0, ""},
    +		{"STB_HIOS", Const, 0, ""},
    +		{"STB_HIPROC", Const, 0, ""},
    +		{"STB_LOCAL", Const, 0, ""},
    +		{"STB_LOOS", Const, 0, ""},
    +		{"STB_LOPROC", Const, 0, ""},
    +		{"STB_WEAK", Const, 0, ""},
    +		{"STT_COMMON", Const, 0, ""},
    +		{"STT_FILE", Const, 0, ""},
    +		{"STT_FUNC", Const, 0, ""},
    +		{"STT_GNU_IFUNC", Const, 23, ""},
    +		{"STT_HIOS", Const, 0, ""},
    +		{"STT_HIPROC", Const, 0, ""},
    +		{"STT_LOOS", Const, 0, ""},
    +		{"STT_LOPROC", Const, 0, ""},
    +		{"STT_NOTYPE", Const, 0, ""},
    +		{"STT_OBJECT", Const, 0, ""},
    +		{"STT_RELC", Const, 23, ""},
    +		{"STT_SECTION", Const, 0, ""},
    +		{"STT_SRELC", Const, 23, ""},
    +		{"STT_TLS", Const, 0, ""},
    +		{"STV_DEFAULT", Const, 0, ""},
    +		{"STV_HIDDEN", Const, 0, ""},
    +		{"STV_INTERNAL", Const, 0, ""},
    +		{"STV_PROTECTED", Const, 0, ""},
    +		{"ST_BIND", Func, 0, "func(info uint8) SymBind"},
    +		{"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"},
    +		{"ST_TYPE", Func, 0, "func(info uint8) SymType"},
    +		{"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"Section32", Type, 0, ""},
    +		{"Section32.Addr", Field, 0, ""},
    +		{"Section32.Addralign", Field, 0, ""},
    +		{"Section32.Entsize", Field, 0, ""},
    +		{"Section32.Flags", Field, 0, ""},
    +		{"Section32.Info", Field, 0, ""},
    +		{"Section32.Link", Field, 0, ""},
    +		{"Section32.Name", Field, 0, ""},
    +		{"Section32.Off", Field, 0, ""},
    +		{"Section32.Size", Field, 0, ""},
    +		{"Section32.Type", Field, 0, ""},
    +		{"Section64", Type, 0, ""},
    +		{"Section64.Addr", Field, 0, ""},
    +		{"Section64.Addralign", Field, 0, ""},
    +		{"Section64.Entsize", Field, 0, ""},
    +		{"Section64.Flags", Field, 0, ""},
    +		{"Section64.Info", Field, 0, ""},
    +		{"Section64.Link", Field, 0, ""},
    +		{"Section64.Name", Field, 0, ""},
    +		{"Section64.Off", Field, 0, ""},
    +		{"Section64.Size", Field, 0, ""},
    +		{"Section64.Type", Field, 0, ""},
    +		{"SectionFlag", Type, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Addr", Field, 0, ""},
    +		{"SectionHeader.Addralign", Field, 0, ""},
    +		{"SectionHeader.Entsize", Field, 0, ""},
    +		{"SectionHeader.FileSize", Field, 6, ""},
    +		{"SectionHeader.Flags", Field, 0, ""},
    +		{"SectionHeader.Info", Field, 0, ""},
    +		{"SectionHeader.Link", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"SectionHeader.Type", Field, 0, ""},
    +		{"SectionIndex", Type, 0, ""},
    +		{"SectionType", Type, 0, ""},
    +		{"Sym32", Type, 0, ""},
    +		{"Sym32.Info", Field, 0, ""},
    +		{"Sym32.Name", Field, 0, ""},
    +		{"Sym32.Other", Field, 0, ""},
    +		{"Sym32.Shndx", Field, 0, ""},
    +		{"Sym32.Size", Field, 0, ""},
    +		{"Sym32.Value", Field, 0, ""},
    +		{"Sym32Size", Const, 0, ""},
    +		{"Sym64", Type, 0, ""},
    +		{"Sym64.Info", Field, 0, ""},
    +		{"Sym64.Name", Field, 0, ""},
    +		{"Sym64.Other", Field, 0, ""},
    +		{"Sym64.Shndx", Field, 0, ""},
    +		{"Sym64.Size", Field, 0, ""},
    +		{"Sym64.Value", Field, 0, ""},
    +		{"Sym64Size", Const, 0, ""},
    +		{"SymBind", Type, 0, ""},
    +		{"SymType", Type, 0, ""},
    +		{"SymVis", Type, 0, ""},
    +		{"Symbol", Type, 0, ""},
    +		{"Symbol.HasVersion", Field, 24, ""},
    +		{"Symbol.Info", Field, 0, ""},
    +		{"Symbol.Library", Field, 13, ""},
    +		{"Symbol.Name", Field, 0, ""},
    +		{"Symbol.Other", Field, 0, ""},
    +		{"Symbol.Section", Field, 0, ""},
    +		{"Symbol.Size", Field, 0, ""},
    +		{"Symbol.Value", Field, 0, ""},
    +		{"Symbol.Version", Field, 13, ""},
    +		{"Symbol.VersionIndex", Field, 24, ""},
    +		{"Type", Type, 0, ""},
    +		{"VER_FLG_BASE", Const, 24, ""},
    +		{"VER_FLG_INFO", Const, 24, ""},
    +		{"VER_FLG_WEAK", Const, 24, ""},
    +		{"Version", Type, 0, ""},
    +		{"VersionIndex", Type, 24, ""},
     	},
     	"debug/gosym": {
    -		{"(*DecodingError).Error", Method, 0},
    -		{"(*LineTable).LineToPC", Method, 0},
    -		{"(*LineTable).PCToLine", Method, 0},
    -		{"(*Sym).BaseName", Method, 0},
    -		{"(*Sym).PackageName", Method, 0},
    -		{"(*Sym).ReceiverName", Method, 0},
    -		{"(*Sym).Static", Method, 0},
    -		{"(*Table).LineToPC", Method, 0},
    -		{"(*Table).LookupFunc", Method, 0},
    -		{"(*Table).LookupSym", Method, 0},
    -		{"(*Table).PCToFunc", Method, 0},
    -		{"(*Table).PCToLine", Method, 0},
    -		{"(*Table).SymByAddr", Method, 0},
    -		{"(*UnknownLineError).Error", Method, 0},
    -		{"(Func).BaseName", Method, 0},
    -		{"(Func).PackageName", Method, 0},
    -		{"(Func).ReceiverName", Method, 0},
    -		{"(Func).Static", Method, 0},
    -		{"(UnknownFileError).Error", Method, 0},
    -		{"DecodingError", Type, 0},
    -		{"Func", Type, 0},
    -		{"Func.End", Field, 0},
    -		{"Func.Entry", Field, 0},
    -		{"Func.FrameSize", Field, 0},
    -		{"Func.LineTable", Field, 0},
    -		{"Func.Locals", Field, 0},
    -		{"Func.Obj", Field, 0},
    -		{"Func.Params", Field, 0},
    -		{"Func.Sym", Field, 0},
    -		{"LineTable", Type, 0},
    -		{"LineTable.Data", Field, 0},
    -		{"LineTable.Line", Field, 0},
    -		{"LineTable.PC", Field, 0},
    -		{"NewLineTable", Func, 0},
    -		{"NewTable", Func, 0},
    -		{"Obj", Type, 0},
    -		{"Obj.Funcs", Field, 0},
    -		{"Obj.Paths", Field, 0},
    -		{"Sym", Type, 0},
    -		{"Sym.Func", Field, 0},
    -		{"Sym.GoType", Field, 0},
    -		{"Sym.Name", Field, 0},
    -		{"Sym.Type", Field, 0},
    -		{"Sym.Value", Field, 0},
    -		{"Table", Type, 0},
    -		{"Table.Files", Field, 0},
    -		{"Table.Funcs", Field, 0},
    -		{"Table.Objs", Field, 0},
    -		{"Table.Syms", Field, 0},
    -		{"UnknownFileError", Type, 0},
    -		{"UnknownLineError", Type, 0},
    -		{"UnknownLineError.File", Field, 0},
    -		{"UnknownLineError.Line", Field, 0},
    +		{"(*DecodingError).Error", Method, 0, ""},
    +		{"(*LineTable).LineToPC", Method, 0, ""},
    +		{"(*LineTable).PCToLine", Method, 0, ""},
    +		{"(*Sym).BaseName", Method, 0, ""},
    +		{"(*Sym).PackageName", Method, 0, ""},
    +		{"(*Sym).ReceiverName", Method, 0, ""},
    +		{"(*Sym).Static", Method, 0, ""},
    +		{"(*Table).LineToPC", Method, 0, ""},
    +		{"(*Table).LookupFunc", Method, 0, ""},
    +		{"(*Table).LookupSym", Method, 0, ""},
    +		{"(*Table).PCToFunc", Method, 0, ""},
    +		{"(*Table).PCToLine", Method, 0, ""},
    +		{"(*Table).SymByAddr", Method, 0, ""},
    +		{"(*UnknownLineError).Error", Method, 0, ""},
    +		{"(Func).BaseName", Method, 0, ""},
    +		{"(Func).PackageName", Method, 0, ""},
    +		{"(Func).ReceiverName", Method, 0, ""},
    +		{"(Func).Static", Method, 0, ""},
    +		{"(UnknownFileError).Error", Method, 0, ""},
    +		{"DecodingError", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Func.End", Field, 0, ""},
    +		{"Func.Entry", Field, 0, ""},
    +		{"Func.FrameSize", Field, 0, ""},
    +		{"Func.LineTable", Field, 0, ""},
    +		{"Func.Locals", Field, 0, ""},
    +		{"Func.Obj", Field, 0, ""},
    +		{"Func.Params", Field, 0, ""},
    +		{"Func.Sym", Field, 0, ""},
    +		{"LineTable", Type, 0, ""},
    +		{"LineTable.Data", Field, 0, ""},
    +		{"LineTable.Line", Field, 0, ""},
    +		{"LineTable.PC", Field, 0, ""},
    +		{"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"},
    +		{"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"},
    +		{"Obj", Type, 0, ""},
    +		{"Obj.Funcs", Field, 0, ""},
    +		{"Obj.Paths", Field, 0, ""},
    +		{"Sym", Type, 0, ""},
    +		{"Sym.Func", Field, 0, ""},
    +		{"Sym.GoType", Field, 0, ""},
    +		{"Sym.Name", Field, 0, ""},
    +		{"Sym.Type", Field, 0, ""},
    +		{"Sym.Value", Field, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Table.Files", Field, 0, ""},
    +		{"Table.Funcs", Field, 0, ""},
    +		{"Table.Objs", Field, 0, ""},
    +		{"Table.Syms", Field, 0, ""},
    +		{"UnknownFileError", Type, 0, ""},
    +		{"UnknownLineError", Type, 0, ""},
    +		{"UnknownLineError.File", Field, 0, ""},
    +		{"UnknownLineError.Line", Field, 0, ""},
     	},
     	"debug/macho": {
    -		{"(*FatFile).Close", Method, 3},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*File).Segment", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(*Segment).Data", Method, 0},
    -		{"(*Segment).Open", Method, 0},
    -		{"(Cpu).GoString", Method, 0},
    -		{"(Cpu).String", Method, 0},
    -		{"(Dylib).Raw", Method, 0},
    -		{"(Dysymtab).Raw", Method, 0},
    -		{"(FatArch).Close", Method, 3},
    -		{"(FatArch).DWARF", Method, 3},
    -		{"(FatArch).ImportedLibraries", Method, 3},
    -		{"(FatArch).ImportedSymbols", Method, 3},
    -		{"(FatArch).Section", Method, 3},
    -		{"(FatArch).Segment", Method, 3},
    -		{"(LoadBytes).Raw", Method, 0},
    -		{"(LoadCmd).GoString", Method, 0},
    -		{"(LoadCmd).String", Method, 0},
    -		{"(RelocTypeARM).GoString", Method, 10},
    -		{"(RelocTypeARM).String", Method, 10},
    -		{"(RelocTypeARM64).GoString", Method, 10},
    -		{"(RelocTypeARM64).String", Method, 10},
    -		{"(RelocTypeGeneric).GoString", Method, 10},
    -		{"(RelocTypeGeneric).String", Method, 10},
    -		{"(RelocTypeX86_64).GoString", Method, 10},
    -		{"(RelocTypeX86_64).String", Method, 10},
    -		{"(Rpath).Raw", Method, 10},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(Segment).Raw", Method, 0},
    -		{"(Segment).ReadAt", Method, 0},
    -		{"(Symtab).Raw", Method, 0},
    -		{"(Type).GoString", Method, 10},
    -		{"(Type).String", Method, 10},
    -		{"ARM64_RELOC_ADDEND", Const, 10},
    -		{"ARM64_RELOC_BRANCH26", Const, 10},
    -		{"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10},
    -		{"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_PAGE21", Const, 10},
    -		{"ARM64_RELOC_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_POINTER_TO_GOT", Const, 10},
    -		{"ARM64_RELOC_SUBTRACTOR", Const, 10},
    -		{"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10},
    -		{"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10},
    -		{"ARM64_RELOC_UNSIGNED", Const, 10},
    -		{"ARM_RELOC_BR24", Const, 10},
    -		{"ARM_RELOC_HALF", Const, 10},
    -		{"ARM_RELOC_HALF_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_LOCAL_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_PAIR", Const, 10},
    -		{"ARM_RELOC_PB_LA_PTR", Const, 10},
    -		{"ARM_RELOC_SECTDIFF", Const, 10},
    -		{"ARM_RELOC_VANILLA", Const, 10},
    -		{"ARM_THUMB_32BIT_BRANCH", Const, 10},
    -		{"ARM_THUMB_RELOC_BR22", Const, 10},
    -		{"Cpu", Type, 0},
    -		{"Cpu386", Const, 0},
    -		{"CpuAmd64", Const, 0},
    -		{"CpuArm", Const, 3},
    -		{"CpuArm64", Const, 11},
    -		{"CpuPpc", Const, 3},
    -		{"CpuPpc64", Const, 3},
    -		{"Dylib", Type, 0},
    -		{"Dylib.CompatVersion", Field, 0},
    -		{"Dylib.CurrentVersion", Field, 0},
    -		{"Dylib.LoadBytes", Field, 0},
    -		{"Dylib.Name", Field, 0},
    -		{"Dylib.Time", Field, 0},
    -		{"DylibCmd", Type, 0},
    -		{"DylibCmd.Cmd", Field, 0},
    -		{"DylibCmd.CompatVersion", Field, 0},
    -		{"DylibCmd.CurrentVersion", Field, 0},
    -		{"DylibCmd.Len", Field, 0},
    -		{"DylibCmd.Name", Field, 0},
    -		{"DylibCmd.Time", Field, 0},
    -		{"Dysymtab", Type, 0},
    -		{"Dysymtab.DysymtabCmd", Field, 0},
    -		{"Dysymtab.IndirectSyms", Field, 0},
    -		{"Dysymtab.LoadBytes", Field, 0},
    -		{"DysymtabCmd", Type, 0},
    -		{"DysymtabCmd.Cmd", Field, 0},
    -		{"DysymtabCmd.Extrefsymoff", Field, 0},
    -		{"DysymtabCmd.Extreloff", Field, 0},
    -		{"DysymtabCmd.Iextdefsym", Field, 0},
    -		{"DysymtabCmd.Ilocalsym", Field, 0},
    -		{"DysymtabCmd.Indirectsymoff", Field, 0},
    -		{"DysymtabCmd.Iundefsym", Field, 0},
    -		{"DysymtabCmd.Len", Field, 0},
    -		{"DysymtabCmd.Locreloff", Field, 0},
    -		{"DysymtabCmd.Modtaboff", Field, 0},
    -		{"DysymtabCmd.Nextdefsym", Field, 0},
    -		{"DysymtabCmd.Nextrefsyms", Field, 0},
    -		{"DysymtabCmd.Nextrel", Field, 0},
    -		{"DysymtabCmd.Nindirectsyms", Field, 0},
    -		{"DysymtabCmd.Nlocalsym", Field, 0},
    -		{"DysymtabCmd.Nlocrel", Field, 0},
    -		{"DysymtabCmd.Nmodtab", Field, 0},
    -		{"DysymtabCmd.Ntoc", Field, 0},
    -		{"DysymtabCmd.Nundefsym", Field, 0},
    -		{"DysymtabCmd.Tocoffset", Field, 0},
    -		{"ErrNotFat", Var, 3},
    -		{"FatArch", Type, 3},
    -		{"FatArch.FatArchHeader", Field, 3},
    -		{"FatArch.File", Field, 3},
    -		{"FatArchHeader", Type, 3},
    -		{"FatArchHeader.Align", Field, 3},
    -		{"FatArchHeader.Cpu", Field, 3},
    -		{"FatArchHeader.Offset", Field, 3},
    -		{"FatArchHeader.Size", Field, 3},
    -		{"FatArchHeader.SubCpu", Field, 3},
    -		{"FatFile", Type, 3},
    -		{"FatFile.Arches", Field, 3},
    -		{"FatFile.Magic", Field, 3},
    -		{"File", Type, 0},
    -		{"File.ByteOrder", Field, 0},
    -		{"File.Dysymtab", Field, 0},
    -		{"File.FileHeader", Field, 0},
    -		{"File.Loads", Field, 0},
    -		{"File.Sections", Field, 0},
    -		{"File.Symtab", Field, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Cmdsz", Field, 0},
    -		{"FileHeader.Cpu", Field, 0},
    -		{"FileHeader.Flags", Field, 0},
    -		{"FileHeader.Magic", Field, 0},
    -		{"FileHeader.Ncmd", Field, 0},
    -		{"FileHeader.SubCpu", Field, 0},
    -		{"FileHeader.Type", Field, 0},
    -		{"FlagAllModsBound", Const, 10},
    -		{"FlagAllowStackExecution", Const, 10},
    -		{"FlagAppExtensionSafe", Const, 10},
    -		{"FlagBindAtLoad", Const, 10},
    -		{"FlagBindsToWeak", Const, 10},
    -		{"FlagCanonical", Const, 10},
    -		{"FlagDeadStrippableDylib", Const, 10},
    -		{"FlagDyldLink", Const, 10},
    -		{"FlagForceFlat", Const, 10},
    -		{"FlagHasTLVDescriptors", Const, 10},
    -		{"FlagIncrLink", Const, 10},
    -		{"FlagLazyInit", Const, 10},
    -		{"FlagNoFixPrebinding", Const, 10},
    -		{"FlagNoHeapExecution", Const, 10},
    -		{"FlagNoMultiDefs", Const, 10},
    -		{"FlagNoReexportedDylibs", Const, 10},
    -		{"FlagNoUndefs", Const, 10},
    -		{"FlagPIE", Const, 10},
    -		{"FlagPrebindable", Const, 10},
    -		{"FlagPrebound", Const, 10},
    -		{"FlagRootSafe", Const, 10},
    -		{"FlagSetuidSafe", Const, 10},
    -		{"FlagSplitSegs", Const, 10},
    -		{"FlagSubsectionsViaSymbols", Const, 10},
    -		{"FlagTwoLevel", Const, 10},
    -		{"FlagWeakDefines", Const, 10},
    -		{"FormatError", Type, 0},
    -		{"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10},
    -		{"GENERIC_RELOC_PAIR", Const, 10},
    -		{"GENERIC_RELOC_PB_LA_PTR", Const, 10},
    -		{"GENERIC_RELOC_SECTDIFF", Const, 10},
    -		{"GENERIC_RELOC_TLV", Const, 10},
    -		{"GENERIC_RELOC_VANILLA", Const, 10},
    -		{"Load", Type, 0},
    -		{"LoadBytes", Type, 0},
    -		{"LoadCmd", Type, 0},
    -		{"LoadCmdDylib", Const, 0},
    -		{"LoadCmdDylinker", Const, 0},
    -		{"LoadCmdDysymtab", Const, 0},
    -		{"LoadCmdRpath", Const, 10},
    -		{"LoadCmdSegment", Const, 0},
    -		{"LoadCmdSegment64", Const, 0},
    -		{"LoadCmdSymtab", Const, 0},
    -		{"LoadCmdThread", Const, 0},
    -		{"LoadCmdUnixThread", Const, 0},
    -		{"Magic32", Const, 0},
    -		{"Magic64", Const, 0},
    -		{"MagicFat", Const, 3},
    -		{"NewFatFile", Func, 3},
    -		{"NewFile", Func, 0},
    -		{"Nlist32", Type, 0},
    -		{"Nlist32.Desc", Field, 0},
    -		{"Nlist32.Name", Field, 0},
    -		{"Nlist32.Sect", Field, 0},
    -		{"Nlist32.Type", Field, 0},
    -		{"Nlist32.Value", Field, 0},
    -		{"Nlist64", Type, 0},
    -		{"Nlist64.Desc", Field, 0},
    -		{"Nlist64.Name", Field, 0},
    -		{"Nlist64.Sect", Field, 0},
    -		{"Nlist64.Type", Field, 0},
    -		{"Nlist64.Value", Field, 0},
    -		{"Open", Func, 0},
    -		{"OpenFat", Func, 3},
    -		{"Regs386", Type, 0},
    -		{"Regs386.AX", Field, 0},
    -		{"Regs386.BP", Field, 0},
    -		{"Regs386.BX", Field, 0},
    -		{"Regs386.CS", Field, 0},
    -		{"Regs386.CX", Field, 0},
    -		{"Regs386.DI", Field, 0},
    -		{"Regs386.DS", Field, 0},
    -		{"Regs386.DX", Field, 0},
    -		{"Regs386.ES", Field, 0},
    -		{"Regs386.FLAGS", Field, 0},
    -		{"Regs386.FS", Field, 0},
    -		{"Regs386.GS", Field, 0},
    -		{"Regs386.IP", Field, 0},
    -		{"Regs386.SI", Field, 0},
    -		{"Regs386.SP", Field, 0},
    -		{"Regs386.SS", Field, 0},
    -		{"RegsAMD64", Type, 0},
    -		{"RegsAMD64.AX", Field, 0},
    -		{"RegsAMD64.BP", Field, 0},
    -		{"RegsAMD64.BX", Field, 0},
    -		{"RegsAMD64.CS", Field, 0},
    -		{"RegsAMD64.CX", Field, 0},
    -		{"RegsAMD64.DI", Field, 0},
    -		{"RegsAMD64.DX", Field, 0},
    -		{"RegsAMD64.FLAGS", Field, 0},
    -		{"RegsAMD64.FS", Field, 0},
    -		{"RegsAMD64.GS", Field, 0},
    -		{"RegsAMD64.IP", Field, 0},
    -		{"RegsAMD64.R10", Field, 0},
    -		{"RegsAMD64.R11", Field, 0},
    -		{"RegsAMD64.R12", Field, 0},
    -		{"RegsAMD64.R13", Field, 0},
    -		{"RegsAMD64.R14", Field, 0},
    -		{"RegsAMD64.R15", Field, 0},
    -		{"RegsAMD64.R8", Field, 0},
    -		{"RegsAMD64.R9", Field, 0},
    -		{"RegsAMD64.SI", Field, 0},
    -		{"RegsAMD64.SP", Field, 0},
    -		{"Reloc", Type, 10},
    -		{"Reloc.Addr", Field, 10},
    -		{"Reloc.Extern", Field, 10},
    -		{"Reloc.Len", Field, 10},
    -		{"Reloc.Pcrel", Field, 10},
    -		{"Reloc.Scattered", Field, 10},
    -		{"Reloc.Type", Field, 10},
    -		{"Reloc.Value", Field, 10},
    -		{"RelocTypeARM", Type, 10},
    -		{"RelocTypeARM64", Type, 10},
    -		{"RelocTypeGeneric", Type, 10},
    -		{"RelocTypeX86_64", Type, 10},
    -		{"Rpath", Type, 10},
    -		{"Rpath.LoadBytes", Field, 10},
    -		{"Rpath.Path", Field, 10},
    -		{"RpathCmd", Type, 10},
    -		{"RpathCmd.Cmd", Field, 10},
    -		{"RpathCmd.Len", Field, 10},
    -		{"RpathCmd.Path", Field, 10},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.Relocs", Field, 10},
    -		{"Section.SectionHeader", Field, 0},
    -		{"Section32", Type, 0},
    -		{"Section32.Addr", Field, 0},
    -		{"Section32.Align", Field, 0},
    -		{"Section32.Flags", Field, 0},
    -		{"Section32.Name", Field, 0},
    -		{"Section32.Nreloc", Field, 0},
    -		{"Section32.Offset", Field, 0},
    -		{"Section32.Reloff", Field, 0},
    -		{"Section32.Reserve1", Field, 0},
    -		{"Section32.Reserve2", Field, 0},
    -		{"Section32.Seg", Field, 0},
    -		{"Section32.Size", Field, 0},
    -		{"Section64", Type, 0},
    -		{"Section64.Addr", Field, 0},
    -		{"Section64.Align", Field, 0},
    -		{"Section64.Flags", Field, 0},
    -		{"Section64.Name", Field, 0},
    -		{"Section64.Nreloc", Field, 0},
    -		{"Section64.Offset", Field, 0},
    -		{"Section64.Reloff", Field, 0},
    -		{"Section64.Reserve1", Field, 0},
    -		{"Section64.Reserve2", Field, 0},
    -		{"Section64.Reserve3", Field, 0},
    -		{"Section64.Seg", Field, 0},
    -		{"Section64.Size", Field, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Addr", Field, 0},
    -		{"SectionHeader.Align", Field, 0},
    -		{"SectionHeader.Flags", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.Nreloc", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.Reloff", Field, 0},
    -		{"SectionHeader.Seg", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"Segment", Type, 0},
    -		{"Segment.LoadBytes", Field, 0},
    -		{"Segment.ReaderAt", Field, 0},
    -		{"Segment.SegmentHeader", Field, 0},
    -		{"Segment32", Type, 0},
    -		{"Segment32.Addr", Field, 0},
    -		{"Segment32.Cmd", Field, 0},
    -		{"Segment32.Filesz", Field, 0},
    -		{"Segment32.Flag", Field, 0},
    -		{"Segment32.Len", Field, 0},
    -		{"Segment32.Maxprot", Field, 0},
    -		{"Segment32.Memsz", Field, 0},
    -		{"Segment32.Name", Field, 0},
    -		{"Segment32.Nsect", Field, 0},
    -		{"Segment32.Offset", Field, 0},
    -		{"Segment32.Prot", Field, 0},
    -		{"Segment64", Type, 0},
    -		{"Segment64.Addr", Field, 0},
    -		{"Segment64.Cmd", Field, 0},
    -		{"Segment64.Filesz", Field, 0},
    -		{"Segment64.Flag", Field, 0},
    -		{"Segment64.Len", Field, 0},
    -		{"Segment64.Maxprot", Field, 0},
    -		{"Segment64.Memsz", Field, 0},
    -		{"Segment64.Name", Field, 0},
    -		{"Segment64.Nsect", Field, 0},
    -		{"Segment64.Offset", Field, 0},
    -		{"Segment64.Prot", Field, 0},
    -		{"SegmentHeader", Type, 0},
    -		{"SegmentHeader.Addr", Field, 0},
    -		{"SegmentHeader.Cmd", Field, 0},
    -		{"SegmentHeader.Filesz", Field, 0},
    -		{"SegmentHeader.Flag", Field, 0},
    -		{"SegmentHeader.Len", Field, 0},
    -		{"SegmentHeader.Maxprot", Field, 0},
    -		{"SegmentHeader.Memsz", Field, 0},
    -		{"SegmentHeader.Name", Field, 0},
    -		{"SegmentHeader.Nsect", Field, 0},
    -		{"SegmentHeader.Offset", Field, 0},
    -		{"SegmentHeader.Prot", Field, 0},
    -		{"Symbol", Type, 0},
    -		{"Symbol.Desc", Field, 0},
    -		{"Symbol.Name", Field, 0},
    -		{"Symbol.Sect", Field, 0},
    -		{"Symbol.Type", Field, 0},
    -		{"Symbol.Value", Field, 0},
    -		{"Symtab", Type, 0},
    -		{"Symtab.LoadBytes", Field, 0},
    -		{"Symtab.Syms", Field, 0},
    -		{"Symtab.SymtabCmd", Field, 0},
    -		{"SymtabCmd", Type, 0},
    -		{"SymtabCmd.Cmd", Field, 0},
    -		{"SymtabCmd.Len", Field, 0},
    -		{"SymtabCmd.Nsyms", Field, 0},
    -		{"SymtabCmd.Stroff", Field, 0},
    -		{"SymtabCmd.Strsize", Field, 0},
    -		{"SymtabCmd.Symoff", Field, 0},
    -		{"Thread", Type, 0},
    -		{"Thread.Cmd", Field, 0},
    -		{"Thread.Data", Field, 0},
    -		{"Thread.Len", Field, 0},
    -		{"Thread.Type", Field, 0},
    -		{"Type", Type, 0},
    -		{"TypeBundle", Const, 3},
    -		{"TypeDylib", Const, 3},
    -		{"TypeExec", Const, 0},
    -		{"TypeObj", Const, 0},
    -		{"X86_64_RELOC_BRANCH", Const, 10},
    -		{"X86_64_RELOC_GOT", Const, 10},
    -		{"X86_64_RELOC_GOT_LOAD", Const, 10},
    -		{"X86_64_RELOC_SIGNED", Const, 10},
    -		{"X86_64_RELOC_SIGNED_1", Const, 10},
    -		{"X86_64_RELOC_SIGNED_2", Const, 10},
    -		{"X86_64_RELOC_SIGNED_4", Const, 10},
    -		{"X86_64_RELOC_SUBTRACTOR", Const, 10},
    -		{"X86_64_RELOC_TLV", Const, 10},
    -		{"X86_64_RELOC_UNSIGNED", Const, 10},
    +		{"(*FatFile).Close", Method, 3, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*File).Segment", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(*Segment).Data", Method, 0, ""},
    +		{"(*Segment).Open", Method, 0, ""},
    +		{"(Cpu).GoString", Method, 0, ""},
    +		{"(Cpu).String", Method, 0, ""},
    +		{"(Dylib).Raw", Method, 0, ""},
    +		{"(Dysymtab).Raw", Method, 0, ""},
    +		{"(FatArch).Close", Method, 3, ""},
    +		{"(FatArch).DWARF", Method, 3, ""},
    +		{"(FatArch).ImportedLibraries", Method, 3, ""},
    +		{"(FatArch).ImportedSymbols", Method, 3, ""},
    +		{"(FatArch).Section", Method, 3, ""},
    +		{"(FatArch).Segment", Method, 3, ""},
    +		{"(LoadBytes).Raw", Method, 0, ""},
    +		{"(LoadCmd).GoString", Method, 0, ""},
    +		{"(LoadCmd).String", Method, 0, ""},
    +		{"(RelocTypeARM).GoString", Method, 10, ""},
    +		{"(RelocTypeARM).String", Method, 10, ""},
    +		{"(RelocTypeARM64).GoString", Method, 10, ""},
    +		{"(RelocTypeARM64).String", Method, 10, ""},
    +		{"(RelocTypeGeneric).GoString", Method, 10, ""},
    +		{"(RelocTypeGeneric).String", Method, 10, ""},
    +		{"(RelocTypeX86_64).GoString", Method, 10, ""},
    +		{"(RelocTypeX86_64).String", Method, 10, ""},
    +		{"(Rpath).Raw", Method, 10, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(Segment).Raw", Method, 0, ""},
    +		{"(Segment).ReadAt", Method, 0, ""},
    +		{"(Symtab).Raw", Method, 0, ""},
    +		{"(Type).GoString", Method, 10, ""},
    +		{"(Type).String", Method, 10, ""},
    +		{"ARM64_RELOC_ADDEND", Const, 10, ""},
    +		{"ARM64_RELOC_BRANCH26", Const, 10, ""},
    +		{"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""},
    +		{"ARM64_RELOC_SUBTRACTOR", Const, 10, ""},
    +		{"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""},
    +		{"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""},
    +		{"ARM64_RELOC_UNSIGNED", Const, 10, ""},
    +		{"ARM_RELOC_BR24", Const, 10, ""},
    +		{"ARM_RELOC_HALF", Const, 10, ""},
    +		{"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_PAIR", Const, 10, ""},
    +		{"ARM_RELOC_PB_LA_PTR", Const, 10, ""},
    +		{"ARM_RELOC_SECTDIFF", Const, 10, ""},
    +		{"ARM_RELOC_VANILLA", Const, 10, ""},
    +		{"ARM_THUMB_32BIT_BRANCH", Const, 10, ""},
    +		{"ARM_THUMB_RELOC_BR22", Const, 10, ""},
    +		{"Cpu", Type, 0, ""},
    +		{"Cpu386", Const, 0, ""},
    +		{"CpuAmd64", Const, 0, ""},
    +		{"CpuArm", Const, 3, ""},
    +		{"CpuArm64", Const, 11, ""},
    +		{"CpuPpc", Const, 3, ""},
    +		{"CpuPpc64", Const, 3, ""},
    +		{"Dylib", Type, 0, ""},
    +		{"Dylib.CompatVersion", Field, 0, ""},
    +		{"Dylib.CurrentVersion", Field, 0, ""},
    +		{"Dylib.LoadBytes", Field, 0, ""},
    +		{"Dylib.Name", Field, 0, ""},
    +		{"Dylib.Time", Field, 0, ""},
    +		{"DylibCmd", Type, 0, ""},
    +		{"DylibCmd.Cmd", Field, 0, ""},
    +		{"DylibCmd.CompatVersion", Field, 0, ""},
    +		{"DylibCmd.CurrentVersion", Field, 0, ""},
    +		{"DylibCmd.Len", Field, 0, ""},
    +		{"DylibCmd.Name", Field, 0, ""},
    +		{"DylibCmd.Time", Field, 0, ""},
    +		{"Dysymtab", Type, 0, ""},
    +		{"Dysymtab.DysymtabCmd", Field, 0, ""},
    +		{"Dysymtab.IndirectSyms", Field, 0, ""},
    +		{"Dysymtab.LoadBytes", Field, 0, ""},
    +		{"DysymtabCmd", Type, 0, ""},
    +		{"DysymtabCmd.Cmd", Field, 0, ""},
    +		{"DysymtabCmd.Extrefsymoff", Field, 0, ""},
    +		{"DysymtabCmd.Extreloff", Field, 0, ""},
    +		{"DysymtabCmd.Iextdefsym", Field, 0, ""},
    +		{"DysymtabCmd.Ilocalsym", Field, 0, ""},
    +		{"DysymtabCmd.Indirectsymoff", Field, 0, ""},
    +		{"DysymtabCmd.Iundefsym", Field, 0, ""},
    +		{"DysymtabCmd.Len", Field, 0, ""},
    +		{"DysymtabCmd.Locreloff", Field, 0, ""},
    +		{"DysymtabCmd.Modtaboff", Field, 0, ""},
    +		{"DysymtabCmd.Nextdefsym", Field, 0, ""},
    +		{"DysymtabCmd.Nextrefsyms", Field, 0, ""},
    +		{"DysymtabCmd.Nextrel", Field, 0, ""},
    +		{"DysymtabCmd.Nindirectsyms", Field, 0, ""},
    +		{"DysymtabCmd.Nlocalsym", Field, 0, ""},
    +		{"DysymtabCmd.Nlocrel", Field, 0, ""},
    +		{"DysymtabCmd.Nmodtab", Field, 0, ""},
    +		{"DysymtabCmd.Ntoc", Field, 0, ""},
    +		{"DysymtabCmd.Nundefsym", Field, 0, ""},
    +		{"DysymtabCmd.Tocoffset", Field, 0, ""},
    +		{"ErrNotFat", Var, 3, ""},
    +		{"FatArch", Type, 3, ""},
    +		{"FatArch.FatArchHeader", Field, 3, ""},
    +		{"FatArch.File", Field, 3, ""},
    +		{"FatArchHeader", Type, 3, ""},
    +		{"FatArchHeader.Align", Field, 3, ""},
    +		{"FatArchHeader.Cpu", Field, 3, ""},
    +		{"FatArchHeader.Offset", Field, 3, ""},
    +		{"FatArchHeader.Size", Field, 3, ""},
    +		{"FatArchHeader.SubCpu", Field, 3, ""},
    +		{"FatFile", Type, 3, ""},
    +		{"FatFile.Arches", Field, 3, ""},
    +		{"FatFile.Magic", Field, 3, ""},
    +		{"File", Type, 0, ""},
    +		{"File.ByteOrder", Field, 0, ""},
    +		{"File.Dysymtab", Field, 0, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.Loads", Field, 0, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"File.Symtab", Field, 0, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Cmdsz", Field, 0, ""},
    +		{"FileHeader.Cpu", Field, 0, ""},
    +		{"FileHeader.Flags", Field, 0, ""},
    +		{"FileHeader.Magic", Field, 0, ""},
    +		{"FileHeader.Ncmd", Field, 0, ""},
    +		{"FileHeader.SubCpu", Field, 0, ""},
    +		{"FileHeader.Type", Field, 0, ""},
    +		{"FlagAllModsBound", Const, 10, ""},
    +		{"FlagAllowStackExecution", Const, 10, ""},
    +		{"FlagAppExtensionSafe", Const, 10, ""},
    +		{"FlagBindAtLoad", Const, 10, ""},
    +		{"FlagBindsToWeak", Const, 10, ""},
    +		{"FlagCanonical", Const, 10, ""},
    +		{"FlagDeadStrippableDylib", Const, 10, ""},
    +		{"FlagDyldLink", Const, 10, ""},
    +		{"FlagForceFlat", Const, 10, ""},
    +		{"FlagHasTLVDescriptors", Const, 10, ""},
    +		{"FlagIncrLink", Const, 10, ""},
    +		{"FlagLazyInit", Const, 10, ""},
    +		{"FlagNoFixPrebinding", Const, 10, ""},
    +		{"FlagNoHeapExecution", Const, 10, ""},
    +		{"FlagNoMultiDefs", Const, 10, ""},
    +		{"FlagNoReexportedDylibs", Const, 10, ""},
    +		{"FlagNoUndefs", Const, 10, ""},
    +		{"FlagPIE", Const, 10, ""},
    +		{"FlagPrebindable", Const, 10, ""},
    +		{"FlagPrebound", Const, 10, ""},
    +		{"FlagRootSafe", Const, 10, ""},
    +		{"FlagSetuidSafe", Const, 10, ""},
    +		{"FlagSplitSegs", Const, 10, ""},
    +		{"FlagSubsectionsViaSymbols", Const, 10, ""},
    +		{"FlagTwoLevel", Const, 10, ""},
    +		{"FlagWeakDefines", Const, 10, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
    +		{"GENERIC_RELOC_PAIR", Const, 10, ""},
    +		{"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""},
    +		{"GENERIC_RELOC_SECTDIFF", Const, 10, ""},
    +		{"GENERIC_RELOC_TLV", Const, 10, ""},
    +		{"GENERIC_RELOC_VANILLA", Const, 10, ""},
    +		{"Load", Type, 0, ""},
    +		{"LoadBytes", Type, 0, ""},
    +		{"LoadCmd", Type, 0, ""},
    +		{"LoadCmdDylib", Const, 0, ""},
    +		{"LoadCmdDylinker", Const, 0, ""},
    +		{"LoadCmdDysymtab", Const, 0, ""},
    +		{"LoadCmdRpath", Const, 10, ""},
    +		{"LoadCmdSegment", Const, 0, ""},
    +		{"LoadCmdSegment64", Const, 0, ""},
    +		{"LoadCmdSymtab", Const, 0, ""},
    +		{"LoadCmdThread", Const, 0, ""},
    +		{"LoadCmdUnixThread", Const, 0, ""},
    +		{"Magic32", Const, 0, ""},
    +		{"Magic64", Const, 0, ""},
    +		{"MagicFat", Const, 3, ""},
    +		{"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"Nlist32", Type, 0, ""},
    +		{"Nlist32.Desc", Field, 0, ""},
    +		{"Nlist32.Name", Field, 0, ""},
    +		{"Nlist32.Sect", Field, 0, ""},
    +		{"Nlist32.Type", Field, 0, ""},
    +		{"Nlist32.Value", Field, 0, ""},
    +		{"Nlist64", Type, 0, ""},
    +		{"Nlist64.Desc", Field, 0, ""},
    +		{"Nlist64.Name", Field, 0, ""},
    +		{"Nlist64.Sect", Field, 0, ""},
    +		{"Nlist64.Type", Field, 0, ""},
    +		{"Nlist64.Value", Field, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OpenFat", Func, 3, "func(name string) (*FatFile, error)"},
    +		{"Regs386", Type, 0, ""},
    +		{"Regs386.AX", Field, 0, ""},
    +		{"Regs386.BP", Field, 0, ""},
    +		{"Regs386.BX", Field, 0, ""},
    +		{"Regs386.CS", Field, 0, ""},
    +		{"Regs386.CX", Field, 0, ""},
    +		{"Regs386.DI", Field, 0, ""},
    +		{"Regs386.DS", Field, 0, ""},
    +		{"Regs386.DX", Field, 0, ""},
    +		{"Regs386.ES", Field, 0, ""},
    +		{"Regs386.FLAGS", Field, 0, ""},
    +		{"Regs386.FS", Field, 0, ""},
    +		{"Regs386.GS", Field, 0, ""},
    +		{"Regs386.IP", Field, 0, ""},
    +		{"Regs386.SI", Field, 0, ""},
    +		{"Regs386.SP", Field, 0, ""},
    +		{"Regs386.SS", Field, 0, ""},
    +		{"RegsAMD64", Type, 0, ""},
    +		{"RegsAMD64.AX", Field, 0, ""},
    +		{"RegsAMD64.BP", Field, 0, ""},
    +		{"RegsAMD64.BX", Field, 0, ""},
    +		{"RegsAMD64.CS", Field, 0, ""},
    +		{"RegsAMD64.CX", Field, 0, ""},
    +		{"RegsAMD64.DI", Field, 0, ""},
    +		{"RegsAMD64.DX", Field, 0, ""},
    +		{"RegsAMD64.FLAGS", Field, 0, ""},
    +		{"RegsAMD64.FS", Field, 0, ""},
    +		{"RegsAMD64.GS", Field, 0, ""},
    +		{"RegsAMD64.IP", Field, 0, ""},
    +		{"RegsAMD64.R10", Field, 0, ""},
    +		{"RegsAMD64.R11", Field, 0, ""},
    +		{"RegsAMD64.R12", Field, 0, ""},
    +		{"RegsAMD64.R13", Field, 0, ""},
    +		{"RegsAMD64.R14", Field, 0, ""},
    +		{"RegsAMD64.R15", Field, 0, ""},
    +		{"RegsAMD64.R8", Field, 0, ""},
    +		{"RegsAMD64.R9", Field, 0, ""},
    +		{"RegsAMD64.SI", Field, 0, ""},
    +		{"RegsAMD64.SP", Field, 0, ""},
    +		{"Reloc", Type, 10, ""},
    +		{"Reloc.Addr", Field, 10, ""},
    +		{"Reloc.Extern", Field, 10, ""},
    +		{"Reloc.Len", Field, 10, ""},
    +		{"Reloc.Pcrel", Field, 10, ""},
    +		{"Reloc.Scattered", Field, 10, ""},
    +		{"Reloc.Type", Field, 10, ""},
    +		{"Reloc.Value", Field, 10, ""},
    +		{"RelocTypeARM", Type, 10, ""},
    +		{"RelocTypeARM64", Type, 10, ""},
    +		{"RelocTypeGeneric", Type, 10, ""},
    +		{"RelocTypeX86_64", Type, 10, ""},
    +		{"Rpath", Type, 10, ""},
    +		{"Rpath.LoadBytes", Field, 10, ""},
    +		{"Rpath.Path", Field, 10, ""},
    +		{"RpathCmd", Type, 10, ""},
    +		{"RpathCmd.Cmd", Field, 10, ""},
    +		{"RpathCmd.Len", Field, 10, ""},
    +		{"RpathCmd.Path", Field, 10, ""},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.Relocs", Field, 10, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"Section32", Type, 0, ""},
    +		{"Section32.Addr", Field, 0, ""},
    +		{"Section32.Align", Field, 0, ""},
    +		{"Section32.Flags", Field, 0, ""},
    +		{"Section32.Name", Field, 0, ""},
    +		{"Section32.Nreloc", Field, 0, ""},
    +		{"Section32.Offset", Field, 0, ""},
    +		{"Section32.Reloff", Field, 0, ""},
    +		{"Section32.Reserve1", Field, 0, ""},
    +		{"Section32.Reserve2", Field, 0, ""},
    +		{"Section32.Seg", Field, 0, ""},
    +		{"Section32.Size", Field, 0, ""},
    +		{"Section64", Type, 0, ""},
    +		{"Section64.Addr", Field, 0, ""},
    +		{"Section64.Align", Field, 0, ""},
    +		{"Section64.Flags", Field, 0, ""},
    +		{"Section64.Name", Field, 0, ""},
    +		{"Section64.Nreloc", Field, 0, ""},
    +		{"Section64.Offset", Field, 0, ""},
    +		{"Section64.Reloff", Field, 0, ""},
    +		{"Section64.Reserve1", Field, 0, ""},
    +		{"Section64.Reserve2", Field, 0, ""},
    +		{"Section64.Reserve3", Field, 0, ""},
    +		{"Section64.Seg", Field, 0, ""},
    +		{"Section64.Size", Field, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Addr", Field, 0, ""},
    +		{"SectionHeader.Align", Field, 0, ""},
    +		{"SectionHeader.Flags", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.Nreloc", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.Reloff", Field, 0, ""},
    +		{"SectionHeader.Seg", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"Segment", Type, 0, ""},
    +		{"Segment.LoadBytes", Field, 0, ""},
    +		{"Segment.ReaderAt", Field, 0, ""},
    +		{"Segment.SegmentHeader", Field, 0, ""},
    +		{"Segment32", Type, 0, ""},
    +		{"Segment32.Addr", Field, 0, ""},
    +		{"Segment32.Cmd", Field, 0, ""},
    +		{"Segment32.Filesz", Field, 0, ""},
    +		{"Segment32.Flag", Field, 0, ""},
    +		{"Segment32.Len", Field, 0, ""},
    +		{"Segment32.Maxprot", Field, 0, ""},
    +		{"Segment32.Memsz", Field, 0, ""},
    +		{"Segment32.Name", Field, 0, ""},
    +		{"Segment32.Nsect", Field, 0, ""},
    +		{"Segment32.Offset", Field, 0, ""},
    +		{"Segment32.Prot", Field, 0, ""},
    +		{"Segment64", Type, 0, ""},
    +		{"Segment64.Addr", Field, 0, ""},
    +		{"Segment64.Cmd", Field, 0, ""},
    +		{"Segment64.Filesz", Field, 0, ""},
    +		{"Segment64.Flag", Field, 0, ""},
    +		{"Segment64.Len", Field, 0, ""},
    +		{"Segment64.Maxprot", Field, 0, ""},
    +		{"Segment64.Memsz", Field, 0, ""},
    +		{"Segment64.Name", Field, 0, ""},
    +		{"Segment64.Nsect", Field, 0, ""},
    +		{"Segment64.Offset", Field, 0, ""},
    +		{"Segment64.Prot", Field, 0, ""},
    +		{"SegmentHeader", Type, 0, ""},
    +		{"SegmentHeader.Addr", Field, 0, ""},
    +		{"SegmentHeader.Cmd", Field, 0, ""},
    +		{"SegmentHeader.Filesz", Field, 0, ""},
    +		{"SegmentHeader.Flag", Field, 0, ""},
    +		{"SegmentHeader.Len", Field, 0, ""},
    +		{"SegmentHeader.Maxprot", Field, 0, ""},
    +		{"SegmentHeader.Memsz", Field, 0, ""},
    +		{"SegmentHeader.Name", Field, 0, ""},
    +		{"SegmentHeader.Nsect", Field, 0, ""},
    +		{"SegmentHeader.Offset", Field, 0, ""},
    +		{"SegmentHeader.Prot", Field, 0, ""},
    +		{"Symbol", Type, 0, ""},
    +		{"Symbol.Desc", Field, 0, ""},
    +		{"Symbol.Name", Field, 0, ""},
    +		{"Symbol.Sect", Field, 0, ""},
    +		{"Symbol.Type", Field, 0, ""},
    +		{"Symbol.Value", Field, 0, ""},
    +		{"Symtab", Type, 0, ""},
    +		{"Symtab.LoadBytes", Field, 0, ""},
    +		{"Symtab.Syms", Field, 0, ""},
    +		{"Symtab.SymtabCmd", Field, 0, ""},
    +		{"SymtabCmd", Type, 0, ""},
    +		{"SymtabCmd.Cmd", Field, 0, ""},
    +		{"SymtabCmd.Len", Field, 0, ""},
    +		{"SymtabCmd.Nsyms", Field, 0, ""},
    +		{"SymtabCmd.Stroff", Field, 0, ""},
    +		{"SymtabCmd.Strsize", Field, 0, ""},
    +		{"SymtabCmd.Symoff", Field, 0, ""},
    +		{"Thread", Type, 0, ""},
    +		{"Thread.Cmd", Field, 0, ""},
    +		{"Thread.Data", Field, 0, ""},
    +		{"Thread.Len", Field, 0, ""},
    +		{"Thread.Type", Field, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypeBundle", Const, 3, ""},
    +		{"TypeDylib", Const, 3, ""},
    +		{"TypeExec", Const, 0, ""},
    +		{"TypeObj", Const, 0, ""},
    +		{"X86_64_RELOC_BRANCH", Const, 10, ""},
    +		{"X86_64_RELOC_GOT", Const, 10, ""},
    +		{"X86_64_RELOC_GOT_LOAD", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_1", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_2", Const, 10, ""},
    +		{"X86_64_RELOC_SIGNED_4", Const, 10, ""},
    +		{"X86_64_RELOC_SUBTRACTOR", Const, 10, ""},
    +		{"X86_64_RELOC_TLV", Const, 10, ""},
    +		{"X86_64_RELOC_UNSIGNED", Const, 10, ""},
     	},
     	"debug/pe": {
    -		{"(*COFFSymbol).FullName", Method, 8},
    -		{"(*File).COFFSymbolReadSectionDefAux", Method, 19},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).DWARF", Method, 0},
    -		{"(*File).ImportedLibraries", Method, 0},
    -		{"(*File).ImportedSymbols", Method, 0},
    -		{"(*File).Section", Method, 0},
    -		{"(*FormatError).Error", Method, 0},
    -		{"(*Section).Data", Method, 0},
    -		{"(*Section).Open", Method, 0},
    -		{"(Section).ReadAt", Method, 0},
    -		{"(StringTable).String", Method, 8},
    -		{"COFFSymbol", Type, 1},
    -		{"COFFSymbol.Name", Field, 1},
    -		{"COFFSymbol.NumberOfAuxSymbols", Field, 1},
    -		{"COFFSymbol.SectionNumber", Field, 1},
    -		{"COFFSymbol.StorageClass", Field, 1},
    -		{"COFFSymbol.Type", Field, 1},
    -		{"COFFSymbol.Value", Field, 1},
    -		{"COFFSymbolAuxFormat5", Type, 19},
    -		{"COFFSymbolAuxFormat5.Checksum", Field, 19},
    -		{"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19},
    -		{"COFFSymbolAuxFormat5.NumRelocs", Field, 19},
    -		{"COFFSymbolAuxFormat5.SecNum", Field, 19},
    -		{"COFFSymbolAuxFormat5.Selection", Field, 19},
    -		{"COFFSymbolAuxFormat5.Size", Field, 19},
    -		{"COFFSymbolSize", Const, 1},
    -		{"DataDirectory", Type, 3},
    -		{"DataDirectory.Size", Field, 3},
    -		{"DataDirectory.VirtualAddress", Field, 3},
    -		{"File", Type, 0},
    -		{"File.COFFSymbols", Field, 8},
    -		{"File.FileHeader", Field, 0},
    -		{"File.OptionalHeader", Field, 3},
    -		{"File.Sections", Field, 0},
    -		{"File.StringTable", Field, 8},
    -		{"File.Symbols", Field, 1},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Characteristics", Field, 0},
    -		{"FileHeader.Machine", Field, 0},
    -		{"FileHeader.NumberOfSections", Field, 0},
    -		{"FileHeader.NumberOfSymbols", Field, 0},
    -		{"FileHeader.PointerToSymbolTable", Field, 0},
    -		{"FileHeader.SizeOfOptionalHeader", Field, 0},
    -		{"FileHeader.TimeDateStamp", Field, 0},
    -		{"FormatError", Type, 0},
    -		{"IMAGE_COMDAT_SELECT_ANY", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_LARGEST", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19},
    -		{"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19},
    -		{"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11},
    -		{"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11},
    -		{"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15},
    -		{"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15},
    -		{"IMAGE_FILE_32BIT_MACHINE", Const, 15},
    -		{"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15},
    -		{"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15},
    -		{"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15},
    -		{"IMAGE_FILE_DEBUG_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_DLL", Const, 15},
    -		{"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15},
    -		{"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15},
    -		{"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_MACHINE_AM33", Const, 0},
    -		{"IMAGE_FILE_MACHINE_AMD64", Const, 0},
    -		{"IMAGE_FILE_MACHINE_ARM", Const, 0},
    -		{"IMAGE_FILE_MACHINE_ARM64", Const, 11},
    -		{"IMAGE_FILE_MACHINE_ARMNT", Const, 12},
    -		{"IMAGE_FILE_MACHINE_EBC", Const, 0},
    -		{"IMAGE_FILE_MACHINE_I386", Const, 0},
    -		{"IMAGE_FILE_MACHINE_IA64", Const, 0},
    -		{"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19},
    -		{"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19},
    -		{"IMAGE_FILE_MACHINE_M32R", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPS16", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0},
    -		{"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0},
    -		{"IMAGE_FILE_MACHINE_POWERPC", Const, 0},
    -		{"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0},
    -		{"IMAGE_FILE_MACHINE_R4000", Const, 0},
    -		{"IMAGE_FILE_MACHINE_RISCV128", Const, 20},
    -		{"IMAGE_FILE_MACHINE_RISCV32", Const, 20},
    -		{"IMAGE_FILE_MACHINE_RISCV64", Const, 20},
    -		{"IMAGE_FILE_MACHINE_SH3", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH3DSP", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH4", Const, 0},
    -		{"IMAGE_FILE_MACHINE_SH5", Const, 0},
    -		{"IMAGE_FILE_MACHINE_THUMB", Const, 0},
    -		{"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0},
    -		{"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0},
    -		{"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15},
    -		{"IMAGE_FILE_RELOCS_STRIPPED", Const, 15},
    -		{"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15},
    -		{"IMAGE_FILE_SYSTEM", Const, 15},
    -		{"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15},
    -		{"IMAGE_SCN_CNT_CODE", Const, 19},
    -		{"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19},
    -		{"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19},
    -		{"IMAGE_SCN_LNK_COMDAT", Const, 19},
    -		{"IMAGE_SCN_MEM_DISCARDABLE", Const, 19},
    -		{"IMAGE_SCN_MEM_EXECUTE", Const, 19},
    -		{"IMAGE_SCN_MEM_READ", Const, 19},
    -		{"IMAGE_SCN_MEM_WRITE", Const, 19},
    -		{"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15},
    -		{"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15},
    -		{"IMAGE_SUBSYSTEM_NATIVE", Const, 15},
    -		{"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15},
    -		{"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15},
    -		{"IMAGE_SUBSYSTEM_XBOX", Const, 15},
    -		{"ImportDirectory", Type, 0},
    -		{"ImportDirectory.FirstThunk", Field, 0},
    -		{"ImportDirectory.ForwarderChain", Field, 0},
    -		{"ImportDirectory.Name", Field, 0},
    -		{"ImportDirectory.OriginalFirstThunk", Field, 0},
    -		{"ImportDirectory.TimeDateStamp", Field, 0},
    -		{"NewFile", Func, 0},
    -		{"Open", Func, 0},
    -		{"OptionalHeader32", Type, 3},
    -		{"OptionalHeader32.AddressOfEntryPoint", Field, 3},
    -		{"OptionalHeader32.BaseOfCode", Field, 3},
    -		{"OptionalHeader32.BaseOfData", Field, 3},
    -		{"OptionalHeader32.CheckSum", Field, 3},
    -		{"OptionalHeader32.DataDirectory", Field, 3},
    -		{"OptionalHeader32.DllCharacteristics", Field, 3},
    -		{"OptionalHeader32.FileAlignment", Field, 3},
    -		{"OptionalHeader32.ImageBase", Field, 3},
    -		{"OptionalHeader32.LoaderFlags", Field, 3},
    -		{"OptionalHeader32.Magic", Field, 3},
    -		{"OptionalHeader32.MajorImageVersion", Field, 3},
    -		{"OptionalHeader32.MajorLinkerVersion", Field, 3},
    -		{"OptionalHeader32.MajorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader32.MajorSubsystemVersion", Field, 3},
    -		{"OptionalHeader32.MinorImageVersion", Field, 3},
    -		{"OptionalHeader32.MinorLinkerVersion", Field, 3},
    -		{"OptionalHeader32.MinorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader32.MinorSubsystemVersion", Field, 3},
    -		{"OptionalHeader32.NumberOfRvaAndSizes", Field, 3},
    -		{"OptionalHeader32.SectionAlignment", Field, 3},
    -		{"OptionalHeader32.SizeOfCode", Field, 3},
    -		{"OptionalHeader32.SizeOfHeaders", Field, 3},
    -		{"OptionalHeader32.SizeOfHeapCommit", Field, 3},
    -		{"OptionalHeader32.SizeOfHeapReserve", Field, 3},
    -		{"OptionalHeader32.SizeOfImage", Field, 3},
    -		{"OptionalHeader32.SizeOfInitializedData", Field, 3},
    -		{"OptionalHeader32.SizeOfStackCommit", Field, 3},
    -		{"OptionalHeader32.SizeOfStackReserve", Field, 3},
    -		{"OptionalHeader32.SizeOfUninitializedData", Field, 3},
    -		{"OptionalHeader32.Subsystem", Field, 3},
    -		{"OptionalHeader32.Win32VersionValue", Field, 3},
    -		{"OptionalHeader64", Type, 3},
    -		{"OptionalHeader64.AddressOfEntryPoint", Field, 3},
    -		{"OptionalHeader64.BaseOfCode", Field, 3},
    -		{"OptionalHeader64.CheckSum", Field, 3},
    -		{"OptionalHeader64.DataDirectory", Field, 3},
    -		{"OptionalHeader64.DllCharacteristics", Field, 3},
    -		{"OptionalHeader64.FileAlignment", Field, 3},
    -		{"OptionalHeader64.ImageBase", Field, 3},
    -		{"OptionalHeader64.LoaderFlags", Field, 3},
    -		{"OptionalHeader64.Magic", Field, 3},
    -		{"OptionalHeader64.MajorImageVersion", Field, 3},
    -		{"OptionalHeader64.MajorLinkerVersion", Field, 3},
    -		{"OptionalHeader64.MajorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader64.MajorSubsystemVersion", Field, 3},
    -		{"OptionalHeader64.MinorImageVersion", Field, 3},
    -		{"OptionalHeader64.MinorLinkerVersion", Field, 3},
    -		{"OptionalHeader64.MinorOperatingSystemVersion", Field, 3},
    -		{"OptionalHeader64.MinorSubsystemVersion", Field, 3},
    -		{"OptionalHeader64.NumberOfRvaAndSizes", Field, 3},
    -		{"OptionalHeader64.SectionAlignment", Field, 3},
    -		{"OptionalHeader64.SizeOfCode", Field, 3},
    -		{"OptionalHeader64.SizeOfHeaders", Field, 3},
    -		{"OptionalHeader64.SizeOfHeapCommit", Field, 3},
    -		{"OptionalHeader64.SizeOfHeapReserve", Field, 3},
    -		{"OptionalHeader64.SizeOfImage", Field, 3},
    -		{"OptionalHeader64.SizeOfInitializedData", Field, 3},
    -		{"OptionalHeader64.SizeOfStackCommit", Field, 3},
    -		{"OptionalHeader64.SizeOfStackReserve", Field, 3},
    -		{"OptionalHeader64.SizeOfUninitializedData", Field, 3},
    -		{"OptionalHeader64.Subsystem", Field, 3},
    -		{"OptionalHeader64.Win32VersionValue", Field, 3},
    -		{"Reloc", Type, 8},
    -		{"Reloc.SymbolTableIndex", Field, 8},
    -		{"Reloc.Type", Field, 8},
    -		{"Reloc.VirtualAddress", Field, 8},
    -		{"Section", Type, 0},
    -		{"Section.ReaderAt", Field, 0},
    -		{"Section.Relocs", Field, 8},
    -		{"Section.SectionHeader", Field, 0},
    -		{"SectionHeader", Type, 0},
    -		{"SectionHeader.Characteristics", Field, 0},
    -		{"SectionHeader.Name", Field, 0},
    -		{"SectionHeader.NumberOfLineNumbers", Field, 0},
    -		{"SectionHeader.NumberOfRelocations", Field, 0},
    -		{"SectionHeader.Offset", Field, 0},
    -		{"SectionHeader.PointerToLineNumbers", Field, 0},
    -		{"SectionHeader.PointerToRelocations", Field, 0},
    -		{"SectionHeader.Size", Field, 0},
    -		{"SectionHeader.VirtualAddress", Field, 0},
    -		{"SectionHeader.VirtualSize", Field, 0},
    -		{"SectionHeader32", Type, 0},
    -		{"SectionHeader32.Characteristics", Field, 0},
    -		{"SectionHeader32.Name", Field, 0},
    -		{"SectionHeader32.NumberOfLineNumbers", Field, 0},
    -		{"SectionHeader32.NumberOfRelocations", Field, 0},
    -		{"SectionHeader32.PointerToLineNumbers", Field, 0},
    -		{"SectionHeader32.PointerToRawData", Field, 0},
    -		{"SectionHeader32.PointerToRelocations", Field, 0},
    -		{"SectionHeader32.SizeOfRawData", Field, 0},
    -		{"SectionHeader32.VirtualAddress", Field, 0},
    -		{"SectionHeader32.VirtualSize", Field, 0},
    -		{"StringTable", Type, 8},
    -		{"Symbol", Type, 1},
    -		{"Symbol.Name", Field, 1},
    -		{"Symbol.SectionNumber", Field, 1},
    -		{"Symbol.StorageClass", Field, 1},
    -		{"Symbol.Type", Field, 1},
    -		{"Symbol.Value", Field, 1},
    +		{"(*COFFSymbol).FullName", Method, 8, ""},
    +		{"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).DWARF", Method, 0, ""},
    +		{"(*File).ImportedLibraries", Method, 0, ""},
    +		{"(*File).ImportedSymbols", Method, 0, ""},
    +		{"(*File).Section", Method, 0, ""},
    +		{"(*FormatError).Error", Method, 0, ""},
    +		{"(*Section).Data", Method, 0, ""},
    +		{"(*Section).Open", Method, 0, ""},
    +		{"(Section).ReadAt", Method, 0, ""},
    +		{"(StringTable).String", Method, 8, ""},
    +		{"COFFSymbol", Type, 1, ""},
    +		{"COFFSymbol.Name", Field, 1, ""},
    +		{"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""},
    +		{"COFFSymbol.SectionNumber", Field, 1, ""},
    +		{"COFFSymbol.StorageClass", Field, 1, ""},
    +		{"COFFSymbol.Type", Field, 1, ""},
    +		{"COFFSymbol.Value", Field, 1, ""},
    +		{"COFFSymbolAuxFormat5", Type, 19, ""},
    +		{"COFFSymbolAuxFormat5.Checksum", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.SecNum", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.Selection", Field, 19, ""},
    +		{"COFFSymbolAuxFormat5.Size", Field, 19, ""},
    +		{"COFFSymbolSize", Const, 1, ""},
    +		{"DataDirectory", Type, 3, ""},
    +		{"DataDirectory.Size", Field, 3, ""},
    +		{"DataDirectory.VirtualAddress", Field, 3, ""},
    +		{"File", Type, 0, ""},
    +		{"File.COFFSymbols", Field, 8, ""},
    +		{"File.FileHeader", Field, 0, ""},
    +		{"File.OptionalHeader", Field, 3, ""},
    +		{"File.Sections", Field, 0, ""},
    +		{"File.StringTable", Field, 8, ""},
    +		{"File.Symbols", Field, 1, ""},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Characteristics", Field, 0, ""},
    +		{"FileHeader.Machine", Field, 0, ""},
    +		{"FileHeader.NumberOfSections", Field, 0, ""},
    +		{"FileHeader.NumberOfSymbols", Field, 0, ""},
    +		{"FileHeader.PointerToSymbolTable", Field, 0, ""},
    +		{"FileHeader.SizeOfOptionalHeader", Field, 0, ""},
    +		{"FileHeader.TimeDateStamp", Field, 0, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""},
    +		{"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""},
    +		{"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""},
    +		{"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""},
    +		{"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""},
    +		{"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""},
    +		{"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""},
    +		{"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""},
    +		{"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_DLL", Const, 15, ""},
    +		{"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""},
    +		{"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""},
    +		{"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_MACHINE_AM33", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_ARM", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""},
    +		{"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""},
    +		{"IMAGE_FILE_MACHINE_EBC", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_I386", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_IA64", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""},
    +		{"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""},
    +		{"IMAGE_FILE_MACHINE_M32R", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_R4000", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""},
    +		{"IMAGE_FILE_MACHINE_SH3", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH4", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_SH5", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""},
    +		{"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""},
    +		{"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""},
    +		{"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""},
    +		{"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""},
    +		{"IMAGE_FILE_SYSTEM", Const, 15, ""},
    +		{"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""},
    +		{"IMAGE_SCN_CNT_CODE", Const, 19, ""},
    +		{"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""},
    +		{"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""},
    +		{"IMAGE_SCN_LNK_COMDAT", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_READ", Const, 19, ""},
    +		{"IMAGE_SCN_MEM_WRITE", Const, 19, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""},
    +		{"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""},
    +		{"ImportDirectory", Type, 0, ""},
    +		{"ImportDirectory.FirstThunk", Field, 0, ""},
    +		{"ImportDirectory.ForwarderChain", Field, 0, ""},
    +		{"ImportDirectory.Name", Field, 0, ""},
    +		{"ImportDirectory.OriginalFirstThunk", Field, 0, ""},
    +		{"ImportDirectory.TimeDateStamp", Field, 0, ""},
    +		{"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OptionalHeader32", Type, 3, ""},
    +		{"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""},
    +		{"OptionalHeader32.BaseOfCode", Field, 3, ""},
    +		{"OptionalHeader32.BaseOfData", Field, 3, ""},
    +		{"OptionalHeader32.CheckSum", Field, 3, ""},
    +		{"OptionalHeader32.DataDirectory", Field, 3, ""},
    +		{"OptionalHeader32.DllCharacteristics", Field, 3, ""},
    +		{"OptionalHeader32.FileAlignment", Field, 3, ""},
    +		{"OptionalHeader32.ImageBase", Field, 3, ""},
    +		{"OptionalHeader32.LoaderFlags", Field, 3, ""},
    +		{"OptionalHeader32.Magic", Field, 3, ""},
    +		{"OptionalHeader32.MajorImageVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorImageVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""},
    +		{"OptionalHeader32.SectionAlignment", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfCode", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeaders", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfImage", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfInitializedData", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfStackCommit", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfStackReserve", Field, 3, ""},
    +		{"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""},
    +		{"OptionalHeader32.Subsystem", Field, 3, ""},
    +		{"OptionalHeader32.Win32VersionValue", Field, 3, ""},
    +		{"OptionalHeader64", Type, 3, ""},
    +		{"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""},
    +		{"OptionalHeader64.BaseOfCode", Field, 3, ""},
    +		{"OptionalHeader64.CheckSum", Field, 3, ""},
    +		{"OptionalHeader64.DataDirectory", Field, 3, ""},
    +		{"OptionalHeader64.DllCharacteristics", Field, 3, ""},
    +		{"OptionalHeader64.FileAlignment", Field, 3, ""},
    +		{"OptionalHeader64.ImageBase", Field, 3, ""},
    +		{"OptionalHeader64.LoaderFlags", Field, 3, ""},
    +		{"OptionalHeader64.Magic", Field, 3, ""},
    +		{"OptionalHeader64.MajorImageVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorImageVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorLinkerVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""},
    +		{"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""},
    +		{"OptionalHeader64.SectionAlignment", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfCode", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeaders", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfImage", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfInitializedData", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfStackCommit", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfStackReserve", Field, 3, ""},
    +		{"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""},
    +		{"OptionalHeader64.Subsystem", Field, 3, ""},
    +		{"OptionalHeader64.Win32VersionValue", Field, 3, ""},
    +		{"Reloc", Type, 8, ""},
    +		{"Reloc.SymbolTableIndex", Field, 8, ""},
    +		{"Reloc.Type", Field, 8, ""},
    +		{"Reloc.VirtualAddress", Field, 8, ""},
    +		{"Section", Type, 0, ""},
    +		{"Section.ReaderAt", Field, 0, ""},
    +		{"Section.Relocs", Field, 8, ""},
    +		{"Section.SectionHeader", Field, 0, ""},
    +		{"SectionHeader", Type, 0, ""},
    +		{"SectionHeader.Characteristics", Field, 0, ""},
    +		{"SectionHeader.Name", Field, 0, ""},
    +		{"SectionHeader.NumberOfLineNumbers", Field, 0, ""},
    +		{"SectionHeader.NumberOfRelocations", Field, 0, ""},
    +		{"SectionHeader.Offset", Field, 0, ""},
    +		{"SectionHeader.PointerToLineNumbers", Field, 0, ""},
    +		{"SectionHeader.PointerToRelocations", Field, 0, ""},
    +		{"SectionHeader.Size", Field, 0, ""},
    +		{"SectionHeader.VirtualAddress", Field, 0, ""},
    +		{"SectionHeader.VirtualSize", Field, 0, ""},
    +		{"SectionHeader32", Type, 0, ""},
    +		{"SectionHeader32.Characteristics", Field, 0, ""},
    +		{"SectionHeader32.Name", Field, 0, ""},
    +		{"SectionHeader32.NumberOfLineNumbers", Field, 0, ""},
    +		{"SectionHeader32.NumberOfRelocations", Field, 0, ""},
    +		{"SectionHeader32.PointerToLineNumbers", Field, 0, ""},
    +		{"SectionHeader32.PointerToRawData", Field, 0, ""},
    +		{"SectionHeader32.PointerToRelocations", Field, 0, ""},
    +		{"SectionHeader32.SizeOfRawData", Field, 0, ""},
    +		{"SectionHeader32.VirtualAddress", Field, 0, ""},
    +		{"SectionHeader32.VirtualSize", Field, 0, ""},
    +		{"StringTable", Type, 8, ""},
    +		{"Symbol", Type, 1, ""},
    +		{"Symbol.Name", Field, 1, ""},
    +		{"Symbol.SectionNumber", Field, 1, ""},
    +		{"Symbol.StorageClass", Field, 1, ""},
    +		{"Symbol.Type", Field, 1, ""},
    +		{"Symbol.Value", Field, 1, ""},
     	},
     	"debug/plan9obj": {
    -		{"(*File).Close", Method, 3},
    -		{"(*File).Section", Method, 3},
    -		{"(*File).Symbols", Method, 3},
    -		{"(*Section).Data", Method, 3},
    -		{"(*Section).Open", Method, 3},
    -		{"(Section).ReadAt", Method, 3},
    -		{"ErrNoSymbols", Var, 18},
    -		{"File", Type, 3},
    -		{"File.FileHeader", Field, 3},
    -		{"File.Sections", Field, 3},
    -		{"FileHeader", Type, 3},
    -		{"FileHeader.Bss", Field, 3},
    -		{"FileHeader.Entry", Field, 3},
    -		{"FileHeader.HdrSize", Field, 4},
    -		{"FileHeader.LoadAddress", Field, 4},
    -		{"FileHeader.Magic", Field, 3},
    -		{"FileHeader.PtrSize", Field, 3},
    -		{"Magic386", Const, 3},
    -		{"Magic64", Const, 3},
    -		{"MagicAMD64", Const, 3},
    -		{"MagicARM", Const, 3},
    -		{"NewFile", Func, 3},
    -		{"Open", Func, 3},
    -		{"Section", Type, 3},
    -		{"Section.ReaderAt", Field, 3},
    -		{"Section.SectionHeader", Field, 3},
    -		{"SectionHeader", Type, 3},
    -		{"SectionHeader.Name", Field, 3},
    -		{"SectionHeader.Offset", Field, 3},
    -		{"SectionHeader.Size", Field, 3},
    -		{"Sym", Type, 3},
    -		{"Sym.Name", Field, 3},
    -		{"Sym.Type", Field, 3},
    -		{"Sym.Value", Field, 3},
    +		{"(*File).Close", Method, 3, ""},
    +		{"(*File).Section", Method, 3, ""},
    +		{"(*File).Symbols", Method, 3, ""},
    +		{"(*Section).Data", Method, 3, ""},
    +		{"(*Section).Open", Method, 3, ""},
    +		{"(Section).ReadAt", Method, 3, ""},
    +		{"ErrNoSymbols", Var, 18, ""},
    +		{"File", Type, 3, ""},
    +		{"File.FileHeader", Field, 3, ""},
    +		{"File.Sections", Field, 3, ""},
    +		{"FileHeader", Type, 3, ""},
    +		{"FileHeader.Bss", Field, 3, ""},
    +		{"FileHeader.Entry", Field, 3, ""},
    +		{"FileHeader.HdrSize", Field, 4, ""},
    +		{"FileHeader.LoadAddress", Field, 4, ""},
    +		{"FileHeader.Magic", Field, 3, ""},
    +		{"FileHeader.PtrSize", Field, 3, ""},
    +		{"Magic386", Const, 3, ""},
    +		{"Magic64", Const, 3, ""},
    +		{"MagicAMD64", Const, 3, ""},
    +		{"MagicARM", Const, 3, ""},
    +		{"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"},
    +		{"Open", Func, 3, "func(name string) (*File, error)"},
    +		{"Section", Type, 3, ""},
    +		{"Section.ReaderAt", Field, 3, ""},
    +		{"Section.SectionHeader", Field, 3, ""},
    +		{"SectionHeader", Type, 3, ""},
    +		{"SectionHeader.Name", Field, 3, ""},
    +		{"SectionHeader.Offset", Field, 3, ""},
    +		{"SectionHeader.Size", Field, 3, ""},
    +		{"Sym", Type, 3, ""},
    +		{"Sym.Name", Field, 3, ""},
    +		{"Sym.Type", Field, 3, ""},
    +		{"Sym.Value", Field, 3, ""},
     	},
     	"embed": {
    -		{"(FS).Open", Method, 16},
    -		{"(FS).ReadDir", Method, 16},
    -		{"(FS).ReadFile", Method, 16},
    -		{"FS", Type, 16},
    +		{"(FS).Open", Method, 16, ""},
    +		{"(FS).ReadDir", Method, 16, ""},
    +		{"(FS).ReadFile", Method, 16, ""},
    +		{"FS", Type, 16, ""},
     	},
     	"encoding": {
    -		{"BinaryAppender", Type, 24},
    -		{"BinaryMarshaler", Type, 2},
    -		{"BinaryUnmarshaler", Type, 2},
    -		{"TextAppender", Type, 24},
    -		{"TextMarshaler", Type, 2},
    -		{"TextUnmarshaler", Type, 2},
    +		{"BinaryAppender", Type, 24, ""},
    +		{"BinaryMarshaler", Type, 2, ""},
    +		{"BinaryUnmarshaler", Type, 2, ""},
    +		{"TextAppender", Type, 24, ""},
    +		{"TextMarshaler", Type, 2, ""},
    +		{"TextUnmarshaler", Type, 2, ""},
     	},
     	"encoding/ascii85": {
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"CorruptInputError", Type, 0},
    -		{"Decode", Func, 0},
    -		{"Encode", Func, 0},
    -		{"MaxEncodedLen", Func, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"},
    +		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
    +		{"MaxEncodedLen", Func, 0, "func(n int) int"},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"},
     	},
     	"encoding/asn1": {
    -		{"(BitString).At", Method, 0},
    -		{"(BitString).RightAlign", Method, 0},
    -		{"(ObjectIdentifier).Equal", Method, 0},
    -		{"(ObjectIdentifier).String", Method, 3},
    -		{"(StructuralError).Error", Method, 0},
    -		{"(SyntaxError).Error", Method, 0},
    -		{"BitString", Type, 0},
    -		{"BitString.BitLength", Field, 0},
    -		{"BitString.Bytes", Field, 0},
    -		{"ClassApplication", Const, 6},
    -		{"ClassContextSpecific", Const, 6},
    -		{"ClassPrivate", Const, 6},
    -		{"ClassUniversal", Const, 6},
    -		{"Enumerated", Type, 0},
    -		{"Flag", Type, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalWithParams", Func, 10},
    -		{"NullBytes", Var, 9},
    -		{"NullRawValue", Var, 9},
    -		{"ObjectIdentifier", Type, 0},
    -		{"RawContent", Type, 0},
    -		{"RawValue", Type, 0},
    -		{"RawValue.Bytes", Field, 0},
    -		{"RawValue.Class", Field, 0},
    -		{"RawValue.FullBytes", Field, 0},
    -		{"RawValue.IsCompound", Field, 0},
    -		{"RawValue.Tag", Field, 0},
    -		{"StructuralError", Type, 0},
    -		{"StructuralError.Msg", Field, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Msg", Field, 0},
    -		{"TagBMPString", Const, 14},
    -		{"TagBitString", Const, 6},
    -		{"TagBoolean", Const, 6},
    -		{"TagEnum", Const, 6},
    -		{"TagGeneralString", Const, 6},
    -		{"TagGeneralizedTime", Const, 6},
    -		{"TagIA5String", Const, 6},
    -		{"TagInteger", Const, 6},
    -		{"TagNull", Const, 9},
    -		{"TagNumericString", Const, 10},
    -		{"TagOID", Const, 6},
    -		{"TagOctetString", Const, 6},
    -		{"TagPrintableString", Const, 6},
    -		{"TagSequence", Const, 6},
    -		{"TagSet", Const, 6},
    -		{"TagT61String", Const, 6},
    -		{"TagUTCTime", Const, 6},
    -		{"TagUTF8String", Const, 6},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalWithParams", Func, 0},
    +		{"(BitString).At", Method, 0, ""},
    +		{"(BitString).RightAlign", Method, 0, ""},
    +		{"(ObjectIdentifier).Equal", Method, 0, ""},
    +		{"(ObjectIdentifier).String", Method, 3, ""},
    +		{"(StructuralError).Error", Method, 0, ""},
    +		{"(SyntaxError).Error", Method, 0, ""},
    +		{"BitString", Type, 0, ""},
    +		{"BitString.BitLength", Field, 0, ""},
    +		{"BitString.Bytes", Field, 0, ""},
    +		{"ClassApplication", Const, 6, ""},
    +		{"ClassContextSpecific", Const, 6, ""},
    +		{"ClassPrivate", Const, 6, ""},
    +		{"ClassUniversal", Const, 6, ""},
    +		{"Enumerated", Type, 0, ""},
    +		{"Flag", Type, 0, ""},
    +		{"Marshal", Func, 0, "func(val any) ([]byte, error)"},
    +		{"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"},
    +		{"NullBytes", Var, 9, ""},
    +		{"NullRawValue", Var, 9, ""},
    +		{"ObjectIdentifier", Type, 0, ""},
    +		{"RawContent", Type, 0, ""},
    +		{"RawValue", Type, 0, ""},
    +		{"RawValue.Bytes", Field, 0, ""},
    +		{"RawValue.Class", Field, 0, ""},
    +		{"RawValue.FullBytes", Field, 0, ""},
    +		{"RawValue.IsCompound", Field, 0, ""},
    +		{"RawValue.Tag", Field, 0, ""},
    +		{"StructuralError", Type, 0, ""},
    +		{"StructuralError.Msg", Field, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Msg", Field, 0, ""},
    +		{"TagBMPString", Const, 14, ""},
    +		{"TagBitString", Const, 6, ""},
    +		{"TagBoolean", Const, 6, ""},
    +		{"TagEnum", Const, 6, ""},
    +		{"TagGeneralString", Const, 6, ""},
    +		{"TagGeneralizedTime", Const, 6, ""},
    +		{"TagIA5String", Const, 6, ""},
    +		{"TagInteger", Const, 6, ""},
    +		{"TagNull", Const, 9, ""},
    +		{"TagNumericString", Const, 10, ""},
    +		{"TagOID", Const, 6, ""},
    +		{"TagOctetString", Const, 6, ""},
    +		{"TagPrintableString", Const, 6, ""},
    +		{"TagSequence", Const, 6, ""},
    +		{"TagSet", Const, 6, ""},
    +		{"TagT61String", Const, 6, ""},
    +		{"TagUTCTime", Const, 6, ""},
    +		{"TagUTF8String", Const, 6, ""},
    +		{"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"},
    +		{"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"},
     	},
     	"encoding/base32": {
    -		{"(*Encoding).AppendDecode", Method, 22},
    -		{"(*Encoding).AppendEncode", Method, 22},
    -		{"(*Encoding).Decode", Method, 0},
    -		{"(*Encoding).DecodeString", Method, 0},
    -		{"(*Encoding).DecodedLen", Method, 0},
    -		{"(*Encoding).Encode", Method, 0},
    -		{"(*Encoding).EncodeToString", Method, 0},
    -		{"(*Encoding).EncodedLen", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(Encoding).WithPadding", Method, 9},
    -		{"CorruptInputError", Type, 0},
    -		{"Encoding", Type, 0},
    -		{"HexEncoding", Var, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewEncoding", Func, 0},
    -		{"NoPadding", Const, 9},
    -		{"StdEncoding", Var, 0},
    -		{"StdPadding", Const, 9},
    +		{"(*Encoding).AppendDecode", Method, 22, ""},
    +		{"(*Encoding).AppendEncode", Method, 22, ""},
    +		{"(*Encoding).Decode", Method, 0, ""},
    +		{"(*Encoding).DecodeString", Method, 0, ""},
    +		{"(*Encoding).DecodedLen", Method, 0, ""},
    +		{"(*Encoding).Encode", Method, 0, ""},
    +		{"(*Encoding).EncodeToString", Method, 0, ""},
    +		{"(*Encoding).EncodedLen", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(Encoding).WithPadding", Method, 9, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Encoding", Type, 0, ""},
    +		{"HexEncoding", Var, 0, ""},
    +		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
    +		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
    +		{"NoPadding", Const, 9, ""},
    +		{"StdEncoding", Var, 0, ""},
    +		{"StdPadding", Const, 9, ""},
     	},
     	"encoding/base64": {
    -		{"(*Encoding).AppendDecode", Method, 22},
    -		{"(*Encoding).AppendEncode", Method, 22},
    -		{"(*Encoding).Decode", Method, 0},
    -		{"(*Encoding).DecodeString", Method, 0},
    -		{"(*Encoding).DecodedLen", Method, 0},
    -		{"(*Encoding).Encode", Method, 0},
    -		{"(*Encoding).EncodeToString", Method, 0},
    -		{"(*Encoding).EncodedLen", Method, 0},
    -		{"(CorruptInputError).Error", Method, 0},
    -		{"(Encoding).Strict", Method, 8},
    -		{"(Encoding).WithPadding", Method, 5},
    -		{"CorruptInputError", Type, 0},
    -		{"Encoding", Type, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewEncoding", Func, 0},
    -		{"NoPadding", Const, 5},
    -		{"RawStdEncoding", Var, 5},
    -		{"RawURLEncoding", Var, 5},
    -		{"StdEncoding", Var, 0},
    -		{"StdPadding", Const, 5},
    -		{"URLEncoding", Var, 0},
    +		{"(*Encoding).AppendDecode", Method, 22, ""},
    +		{"(*Encoding).AppendEncode", Method, 22, ""},
    +		{"(*Encoding).Decode", Method, 0, ""},
    +		{"(*Encoding).DecodeString", Method, 0, ""},
    +		{"(*Encoding).DecodedLen", Method, 0, ""},
    +		{"(*Encoding).Encode", Method, 0, ""},
    +		{"(*Encoding).EncodeToString", Method, 0, ""},
    +		{"(*Encoding).EncodedLen", Method, 0, ""},
    +		{"(CorruptInputError).Error", Method, 0, ""},
    +		{"(Encoding).Strict", Method, 8, ""},
    +		{"(Encoding).WithPadding", Method, 5, ""},
    +		{"CorruptInputError", Type, 0, ""},
    +		{"Encoding", Type, 0, ""},
    +		{"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
    +		{"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
    +		{"NoPadding", Const, 5, ""},
    +		{"RawStdEncoding", Var, 5, ""},
    +		{"RawURLEncoding", Var, 5, ""},
    +		{"StdEncoding", Var, 0, ""},
    +		{"StdPadding", Const, 5, ""},
    +		{"URLEncoding", Var, 0, ""},
     	},
     	"encoding/binary": {
    -		{"Append", Func, 23},
    -		{"AppendByteOrder", Type, 19},
    -		{"AppendUvarint", Func, 19},
    -		{"AppendVarint", Func, 19},
    -		{"BigEndian", Var, 0},
    -		{"ByteOrder", Type, 0},
    -		{"Decode", Func, 23},
    -		{"Encode", Func, 23},
    -		{"LittleEndian", Var, 0},
    -		{"MaxVarintLen16", Const, 0},
    -		{"MaxVarintLen32", Const, 0},
    -		{"MaxVarintLen64", Const, 0},
    -		{"NativeEndian", Var, 21},
    -		{"PutUvarint", Func, 0},
    -		{"PutVarint", Func, 0},
    -		{"Read", Func, 0},
    -		{"ReadUvarint", Func, 0},
    -		{"ReadVarint", Func, 0},
    -		{"Size", Func, 0},
    -		{"Uvarint", Func, 0},
    -		{"Varint", Func, 0},
    -		{"Write", Func, 0},
    +		{"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
    +		{"AppendByteOrder", Type, 19, ""},
    +		{"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
    +		{"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"},
    +		{"BigEndian", Var, 0, ""},
    +		{"ByteOrder", Type, 0, ""},
    +		{"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
    +		{"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
    +		{"LittleEndian", Var, 0, ""},
    +		{"MaxVarintLen16", Const, 0, ""},
    +		{"MaxVarintLen32", Const, 0, ""},
    +		{"MaxVarintLen64", Const, 0, ""},
    +		{"NativeEndian", Var, 21, ""},
    +		{"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"},
    +		{"PutVarint", Func, 0, "func(buf []byte, x int64) int"},
    +		{"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"},
    +		{"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"},
    +		{"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"},
    +		{"Size", Func, 0, "func(v any) int"},
    +		{"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"},
    +		{"Varint", Func, 0, "func(buf []byte) (int64, int)"},
    +		{"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"},
     	},
     	"encoding/csv": {
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*ParseError).Unwrap", Method, 13},
    -		{"(*Reader).FieldPos", Method, 17},
    -		{"(*Reader).InputOffset", Method, 19},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAll", Method, 0},
    -		{"(*Writer).Error", Method, 1},
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"(*Writer).WriteAll", Method, 0},
    -		{"ErrBareQuote", Var, 0},
    -		{"ErrFieldCount", Var, 0},
    -		{"ErrQuote", Var, 0},
    -		{"ErrTrailingComma", Var, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Column", Field, 0},
    -		{"ParseError.Err", Field, 0},
    -		{"ParseError.Line", Field, 0},
    -		{"ParseError.StartLine", Field, 10},
    -		{"Reader", Type, 0},
    -		{"Reader.Comma", Field, 0},
    -		{"Reader.Comment", Field, 0},
    -		{"Reader.FieldsPerRecord", Field, 0},
    -		{"Reader.LazyQuotes", Field, 0},
    -		{"Reader.ReuseRecord", Field, 9},
    -		{"Reader.TrailingComma", Field, 0},
    -		{"Reader.TrimLeadingSpace", Field, 0},
    -		{"Writer", Type, 0},
    -		{"Writer.Comma", Field, 0},
    -		{"Writer.UseCRLF", Field, 0},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*ParseError).Unwrap", Method, 13, ""},
    +		{"(*Reader).FieldPos", Method, 17, ""},
    +		{"(*Reader).InputOffset", Method, 19, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAll", Method, 0, ""},
    +		{"(*Writer).Error", Method, 1, ""},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"(*Writer).WriteAll", Method, 0, ""},
    +		{"ErrBareQuote", Var, 0, ""},
    +		{"ErrFieldCount", Var, 0, ""},
    +		{"ErrQuote", Var, 0, ""},
    +		{"ErrTrailingComma", Var, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Column", Field, 0, ""},
    +		{"ParseError.Err", Field, 0, ""},
    +		{"ParseError.Line", Field, 0, ""},
    +		{"ParseError.StartLine", Field, 10, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.Comma", Field, 0, ""},
    +		{"Reader.Comment", Field, 0, ""},
    +		{"Reader.FieldsPerRecord", Field, 0, ""},
    +		{"Reader.LazyQuotes", Field, 0, ""},
    +		{"Reader.ReuseRecord", Field, 9, ""},
    +		{"Reader.TrailingComma", Field, 0, ""},
    +		{"Reader.TrimLeadingSpace", Field, 0, ""},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.Comma", Field, 0, ""},
    +		{"Writer.UseCRLF", Field, 0, ""},
     	},
     	"encoding/gob": {
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DecodeValue", Method, 0},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).EncodeValue", Method, 0},
    -		{"CommonType", Type, 0},
    -		{"CommonType.Id", Field, 0},
    -		{"CommonType.Name", Field, 0},
    -		{"Decoder", Type, 0},
    -		{"Encoder", Type, 0},
    -		{"GobDecoder", Type, 0},
    -		{"GobEncoder", Type, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"Register", Func, 0},
    -		{"RegisterName", Func, 0},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DecodeValue", Method, 0, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).EncodeValue", Method, 0, ""},
    +		{"CommonType", Type, 0, ""},
    +		{"CommonType.Id", Field, 0, ""},
    +		{"CommonType.Name", Field, 0, ""},
    +		{"Decoder", Type, 0, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"GobDecoder", Type, 0, ""},
    +		{"GobEncoder", Type, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"Register", Func, 0, "func(value any)"},
    +		{"RegisterName", Func, 0, "func(name string, value any)"},
     	},
     	"encoding/hex": {
    -		{"(InvalidByteError).Error", Method, 0},
    -		{"AppendDecode", Func, 22},
    -		{"AppendEncode", Func, 22},
    -		{"Decode", Func, 0},
    -		{"DecodeString", Func, 0},
    -		{"DecodedLen", Func, 0},
    -		{"Dump", Func, 0},
    -		{"Dumper", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeToString", Func, 0},
    -		{"EncodedLen", Func, 0},
    -		{"ErrLength", Var, 0},
    -		{"InvalidByteError", Type, 0},
    -		{"NewDecoder", Func, 10},
    -		{"NewEncoder", Func, 10},
    +		{"(InvalidByteError).Error", Method, 0, ""},
    +		{"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"},
    +		{"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"},
    +		{"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"},
    +		{"DecodeString", Func, 0, "func(s string) ([]byte, error)"},
    +		{"DecodedLen", Func, 0, "func(x int) int"},
    +		{"Dump", Func, 0, "func(data []byte) string"},
    +		{"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"},
    +		{"Encode", Func, 0, "func(dst []byte, src []byte) int"},
    +		{"EncodeToString", Func, 0, "func(src []byte) string"},
    +		{"EncodedLen", Func, 0, "func(n int) int"},
    +		{"ErrLength", Var, 0, ""},
    +		{"InvalidByteError", Type, 0, ""},
    +		{"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"},
    +		{"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"},
     	},
     	"encoding/json": {
    -		{"(*Decoder).Buffered", Method, 1},
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DisallowUnknownFields", Method, 10},
    -		{"(*Decoder).InputOffset", Method, 14},
    -		{"(*Decoder).More", Method, 5},
    -		{"(*Decoder).Token", Method, 5},
    -		{"(*Decoder).UseNumber", Method, 1},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).SetEscapeHTML", Method, 7},
    -		{"(*Encoder).SetIndent", Method, 7},
    -		{"(*InvalidUTF8Error).Error", Method, 0},
    -		{"(*InvalidUnmarshalError).Error", Method, 0},
    -		{"(*MarshalerError).Error", Method, 0},
    -		{"(*MarshalerError).Unwrap", Method, 13},
    -		{"(*RawMessage).MarshalJSON", Method, 0},
    -		{"(*RawMessage).UnmarshalJSON", Method, 0},
    -		{"(*SyntaxError).Error", Method, 0},
    -		{"(*UnmarshalFieldError).Error", Method, 0},
    -		{"(*UnmarshalTypeError).Error", Method, 0},
    -		{"(*UnsupportedTypeError).Error", Method, 0},
    -		{"(*UnsupportedValueError).Error", Method, 0},
    -		{"(Delim).String", Method, 5},
    -		{"(Number).Float64", Method, 1},
    -		{"(Number).Int64", Method, 1},
    -		{"(Number).String", Method, 1},
    -		{"(RawMessage).MarshalJSON", Method, 8},
    -		{"Compact", Func, 0},
    -		{"Decoder", Type, 0},
    -		{"Delim", Type, 5},
    -		{"Encoder", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"Indent", Func, 0},
    -		{"InvalidUTF8Error", Type, 0},
    -		{"InvalidUTF8Error.S", Field, 0},
    -		{"InvalidUnmarshalError", Type, 0},
    -		{"InvalidUnmarshalError.Type", Field, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalIndent", Func, 0},
    -		{"Marshaler", Type, 0},
    -		{"MarshalerError", Type, 0},
    -		{"MarshalerError.Err", Field, 0},
    -		{"MarshalerError.Type", Field, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"Number", Type, 1},
    -		{"RawMessage", Type, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Offset", Field, 0},
    -		{"Token", Type, 5},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalFieldError", Type, 0},
    -		{"UnmarshalFieldError.Field", Field, 0},
    -		{"UnmarshalFieldError.Key", Field, 0},
    -		{"UnmarshalFieldError.Type", Field, 0},
    -		{"UnmarshalTypeError", Type, 0},
    -		{"UnmarshalTypeError.Field", Field, 8},
    -		{"UnmarshalTypeError.Offset", Field, 5},
    -		{"UnmarshalTypeError.Struct", Field, 8},
    -		{"UnmarshalTypeError.Type", Field, 0},
    -		{"UnmarshalTypeError.Value", Field, 0},
    -		{"Unmarshaler", Type, 0},
    -		{"UnsupportedTypeError", Type, 0},
    -		{"UnsupportedTypeError.Type", Field, 0},
    -		{"UnsupportedValueError", Type, 0},
    -		{"UnsupportedValueError.Str", Field, 0},
    -		{"UnsupportedValueError.Value", Field, 0},
    -		{"Valid", Func, 9},
    +		{"(*Decoder).Buffered", Method, 1, ""},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DisallowUnknownFields", Method, 10, ""},
    +		{"(*Decoder).InputOffset", Method, 14, ""},
    +		{"(*Decoder).More", Method, 5, ""},
    +		{"(*Decoder).Token", Method, 5, ""},
    +		{"(*Decoder).UseNumber", Method, 1, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).SetEscapeHTML", Method, 7, ""},
    +		{"(*Encoder).SetIndent", Method, 7, ""},
    +		{"(*InvalidUTF8Error).Error", Method, 0, ""},
    +		{"(*InvalidUnmarshalError).Error", Method, 0, ""},
    +		{"(*MarshalerError).Error", Method, 0, ""},
    +		{"(*MarshalerError).Unwrap", Method, 13, ""},
    +		{"(*RawMessage).MarshalJSON", Method, 0, ""},
    +		{"(*RawMessage).UnmarshalJSON", Method, 0, ""},
    +		{"(*SyntaxError).Error", Method, 0, ""},
    +		{"(*UnmarshalFieldError).Error", Method, 0, ""},
    +		{"(*UnmarshalTypeError).Error", Method, 0, ""},
    +		{"(*UnsupportedTypeError).Error", Method, 0, ""},
    +		{"(*UnsupportedValueError).Error", Method, 0, ""},
    +		{"(Delim).String", Method, 5, ""},
    +		{"(Number).Float64", Method, 1, ""},
    +		{"(Number).Int64", Method, 1, ""},
    +		{"(Number).String", Method, 1, ""},
    +		{"(RawMessage).MarshalJSON", Method, 8, ""},
    +		{"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
    +		{"Decoder", Type, 0, ""},
    +		{"Delim", Type, 5, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"},
    +		{"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"},
    +		{"InvalidUTF8Error", Type, 0, ""},
    +		{"InvalidUTF8Error.S", Field, 0, ""},
    +		{"InvalidUnmarshalError", Type, 0, ""},
    +		{"InvalidUnmarshalError.Type", Field, 0, ""},
    +		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
    +		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
    +		{"Marshaler", Type, 0, ""},
    +		{"MarshalerError", Type, 0, ""},
    +		{"MarshalerError.Err", Field, 0, ""},
    +		{"MarshalerError.Type", Field, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"Number", Type, 1, ""},
    +		{"RawMessage", Type, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Offset", Field, 0, ""},
    +		{"Token", Type, 5, ""},
    +		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
    +		{"UnmarshalFieldError", Type, 0, ""},
    +		{"UnmarshalFieldError.Field", Field, 0, ""},
    +		{"UnmarshalFieldError.Key", Field, 0, ""},
    +		{"UnmarshalFieldError.Type", Field, 0, ""},
    +		{"UnmarshalTypeError", Type, 0, ""},
    +		{"UnmarshalTypeError.Field", Field, 8, ""},
    +		{"UnmarshalTypeError.Offset", Field, 5, ""},
    +		{"UnmarshalTypeError.Struct", Field, 8, ""},
    +		{"UnmarshalTypeError.Type", Field, 0, ""},
    +		{"UnmarshalTypeError.Value", Field, 0, ""},
    +		{"Unmarshaler", Type, 0, ""},
    +		{"UnsupportedTypeError", Type, 0, ""},
    +		{"UnsupportedTypeError.Type", Field, 0, ""},
    +		{"UnsupportedValueError", Type, 0, ""},
    +		{"UnsupportedValueError.Str", Field, 0, ""},
    +		{"UnsupportedValueError.Value", Field, 0, ""},
    +		{"Valid", Func, 9, "func(data []byte) bool"},
     	},
     	"encoding/pem": {
    -		{"Block", Type, 0},
    -		{"Block.Bytes", Field, 0},
    -		{"Block.Headers", Field, 0},
    -		{"Block.Type", Field, 0},
    -		{"Decode", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeToMemory", Func, 0},
    +		{"Block", Type, 0, ""},
    +		{"Block.Bytes", Field, 0, ""},
    +		{"Block.Headers", Field, 0, ""},
    +		{"Block.Type", Field, 0, ""},
    +		{"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"},
    +		{"Encode", Func, 0, "func(out io.Writer, b *Block) error"},
    +		{"EncodeToMemory", Func, 0, "func(b *Block) []byte"},
     	},
     	"encoding/xml": {
    -		{"(*Decoder).Decode", Method, 0},
    -		{"(*Decoder).DecodeElement", Method, 0},
    -		{"(*Decoder).InputOffset", Method, 4},
    -		{"(*Decoder).InputPos", Method, 19},
    -		{"(*Decoder).RawToken", Method, 0},
    -		{"(*Decoder).Skip", Method, 0},
    -		{"(*Decoder).Token", Method, 0},
    -		{"(*Encoder).Close", Method, 20},
    -		{"(*Encoder).Encode", Method, 0},
    -		{"(*Encoder).EncodeElement", Method, 2},
    -		{"(*Encoder).EncodeToken", Method, 2},
    -		{"(*Encoder).Flush", Method, 2},
    -		{"(*Encoder).Indent", Method, 1},
    -		{"(*SyntaxError).Error", Method, 0},
    -		{"(*TagPathError).Error", Method, 0},
    -		{"(*UnsupportedTypeError).Error", Method, 0},
    -		{"(CharData).Copy", Method, 0},
    -		{"(Comment).Copy", Method, 0},
    -		{"(Directive).Copy", Method, 0},
    -		{"(ProcInst).Copy", Method, 0},
    -		{"(StartElement).Copy", Method, 0},
    -		{"(StartElement).End", Method, 2},
    -		{"(UnmarshalError).Error", Method, 0},
    -		{"Attr", Type, 0},
    -		{"Attr.Name", Field, 0},
    -		{"Attr.Value", Field, 0},
    -		{"CharData", Type, 0},
    -		{"Comment", Type, 0},
    -		{"CopyToken", Func, 0},
    -		{"Decoder", Type, 0},
    -		{"Decoder.AutoClose", Field, 0},
    -		{"Decoder.CharsetReader", Field, 0},
    -		{"Decoder.DefaultSpace", Field, 1},
    -		{"Decoder.Entity", Field, 0},
    -		{"Decoder.Strict", Field, 0},
    -		{"Directive", Type, 0},
    -		{"Encoder", Type, 0},
    -		{"EndElement", Type, 0},
    -		{"EndElement.Name", Field, 0},
    -		{"Escape", Func, 0},
    -		{"EscapeText", Func, 1},
    -		{"HTMLAutoClose", Var, 0},
    -		{"HTMLEntity", Var, 0},
    -		{"Header", Const, 0},
    -		{"Marshal", Func, 0},
    -		{"MarshalIndent", Func, 0},
    -		{"Marshaler", Type, 2},
    -		{"MarshalerAttr", Type, 2},
    -		{"Name", Type, 0},
    -		{"Name.Local", Field, 0},
    -		{"Name.Space", Field, 0},
    -		{"NewDecoder", Func, 0},
    -		{"NewEncoder", Func, 0},
    -		{"NewTokenDecoder", Func, 10},
    -		{"ProcInst", Type, 0},
    -		{"ProcInst.Inst", Field, 0},
    -		{"ProcInst.Target", Field, 0},
    -		{"StartElement", Type, 0},
    -		{"StartElement.Attr", Field, 0},
    -		{"StartElement.Name", Field, 0},
    -		{"SyntaxError", Type, 0},
    -		{"SyntaxError.Line", Field, 0},
    -		{"SyntaxError.Msg", Field, 0},
    -		{"TagPathError", Type, 0},
    -		{"TagPathError.Field1", Field, 0},
    -		{"TagPathError.Field2", Field, 0},
    -		{"TagPathError.Struct", Field, 0},
    -		{"TagPathError.Tag1", Field, 0},
    -		{"TagPathError.Tag2", Field, 0},
    -		{"Token", Type, 0},
    -		{"TokenReader", Type, 10},
    -		{"Unmarshal", Func, 0},
    -		{"UnmarshalError", Type, 0},
    -		{"Unmarshaler", Type, 2},
    -		{"UnmarshalerAttr", Type, 2},
    -		{"UnsupportedTypeError", Type, 0},
    -		{"UnsupportedTypeError.Type", Field, 0},
    +		{"(*Decoder).Decode", Method, 0, ""},
    +		{"(*Decoder).DecodeElement", Method, 0, ""},
    +		{"(*Decoder).InputOffset", Method, 4, ""},
    +		{"(*Decoder).InputPos", Method, 19, ""},
    +		{"(*Decoder).RawToken", Method, 0, ""},
    +		{"(*Decoder).Skip", Method, 0, ""},
    +		{"(*Decoder).Token", Method, 0, ""},
    +		{"(*Encoder).Close", Method, 20, ""},
    +		{"(*Encoder).Encode", Method, 0, ""},
    +		{"(*Encoder).EncodeElement", Method, 2, ""},
    +		{"(*Encoder).EncodeToken", Method, 2, ""},
    +		{"(*Encoder).Flush", Method, 2, ""},
    +		{"(*Encoder).Indent", Method, 1, ""},
    +		{"(*SyntaxError).Error", Method, 0, ""},
    +		{"(*TagPathError).Error", Method, 0, ""},
    +		{"(*UnsupportedTypeError).Error", Method, 0, ""},
    +		{"(CharData).Copy", Method, 0, ""},
    +		{"(Comment).Copy", Method, 0, ""},
    +		{"(Directive).Copy", Method, 0, ""},
    +		{"(ProcInst).Copy", Method, 0, ""},
    +		{"(StartElement).Copy", Method, 0, ""},
    +		{"(StartElement).End", Method, 2, ""},
    +		{"(UnmarshalError).Error", Method, 0, ""},
    +		{"Attr", Type, 0, ""},
    +		{"Attr.Name", Field, 0, ""},
    +		{"Attr.Value", Field, 0, ""},
    +		{"CharData", Type, 0, ""},
    +		{"Comment", Type, 0, ""},
    +		{"CopyToken", Func, 0, "func(t Token) Token"},
    +		{"Decoder", Type, 0, ""},
    +		{"Decoder.AutoClose", Field, 0, ""},
    +		{"Decoder.CharsetReader", Field, 0, ""},
    +		{"Decoder.DefaultSpace", Field, 1, ""},
    +		{"Decoder.Entity", Field, 0, ""},
    +		{"Decoder.Strict", Field, 0, ""},
    +		{"Directive", Type, 0, ""},
    +		{"Encoder", Type, 0, ""},
    +		{"EndElement", Type, 0, ""},
    +		{"EndElement.Name", Field, 0, ""},
    +		{"Escape", Func, 0, "func(w io.Writer, s []byte)"},
    +		{"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"},
    +		{"HTMLAutoClose", Var, 0, ""},
    +		{"HTMLEntity", Var, 0, ""},
    +		{"Header", Const, 0, ""},
    +		{"Marshal", Func, 0, "func(v any) ([]byte, error)"},
    +		{"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
    +		{"Marshaler", Type, 2, ""},
    +		{"MarshalerAttr", Type, 2, ""},
    +		{"Name", Type, 0, ""},
    +		{"Name.Local", Field, 0, ""},
    +		{"Name.Space", Field, 0, ""},
    +		{"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
    +		{"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
    +		{"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"},
    +		{"ProcInst", Type, 0, ""},
    +		{"ProcInst.Inst", Field, 0, ""},
    +		{"ProcInst.Target", Field, 0, ""},
    +		{"StartElement", Type, 0, ""},
    +		{"StartElement.Attr", Field, 0, ""},
    +		{"StartElement.Name", Field, 0, ""},
    +		{"SyntaxError", Type, 0, ""},
    +		{"SyntaxError.Line", Field, 0, ""},
    +		{"SyntaxError.Msg", Field, 0, ""},
    +		{"TagPathError", Type, 0, ""},
    +		{"TagPathError.Field1", Field, 0, ""},
    +		{"TagPathError.Field2", Field, 0, ""},
    +		{"TagPathError.Struct", Field, 0, ""},
    +		{"TagPathError.Tag1", Field, 0, ""},
    +		{"TagPathError.Tag2", Field, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"TokenReader", Type, 10, ""},
    +		{"Unmarshal", Func, 0, "func(data []byte, v any) error"},
    +		{"UnmarshalError", Type, 0, ""},
    +		{"Unmarshaler", Type, 2, ""},
    +		{"UnmarshalerAttr", Type, 2, ""},
    +		{"UnsupportedTypeError", Type, 0, ""},
    +		{"UnsupportedTypeError.Type", Field, 0, ""},
     	},
     	"errors": {
    -		{"As", Func, 13},
    -		{"ErrUnsupported", Var, 21},
    -		{"Is", Func, 13},
    -		{"Join", Func, 20},
    -		{"New", Func, 0},
    -		{"Unwrap", Func, 13},
    +		{"As", Func, 13, "func(err error, target any) bool"},
    +		{"ErrUnsupported", Var, 21, ""},
    +		{"Is", Func, 13, "func(err error, target error) bool"},
    +		{"Join", Func, 20, "func(errs ...error) error"},
    +		{"New", Func, 0, "func(text string) error"},
    +		{"Unwrap", Func, 13, "func(err error) error"},
     	},
     	"expvar": {
    -		{"(*Float).Add", Method, 0},
    -		{"(*Float).Set", Method, 0},
    -		{"(*Float).String", Method, 0},
    -		{"(*Float).Value", Method, 8},
    -		{"(*Int).Add", Method, 0},
    -		{"(*Int).Set", Method, 0},
    -		{"(*Int).String", Method, 0},
    -		{"(*Int).Value", Method, 8},
    -		{"(*Map).Add", Method, 0},
    -		{"(*Map).AddFloat", Method, 0},
    -		{"(*Map).Delete", Method, 12},
    -		{"(*Map).Do", Method, 0},
    -		{"(*Map).Get", Method, 0},
    -		{"(*Map).Init", Method, 0},
    -		{"(*Map).Set", Method, 0},
    -		{"(*Map).String", Method, 0},
    -		{"(*String).Set", Method, 0},
    -		{"(*String).String", Method, 0},
    -		{"(*String).Value", Method, 8},
    -		{"(Func).String", Method, 0},
    -		{"(Func).Value", Method, 8},
    -		{"Do", Func, 0},
    -		{"Float", Type, 0},
    -		{"Func", Type, 0},
    -		{"Get", Func, 0},
    -		{"Handler", Func, 8},
    -		{"Int", Type, 0},
    -		{"KeyValue", Type, 0},
    -		{"KeyValue.Key", Field, 0},
    -		{"KeyValue.Value", Field, 0},
    -		{"Map", Type, 0},
    -		{"NewFloat", Func, 0},
    -		{"NewInt", Func, 0},
    -		{"NewMap", Func, 0},
    -		{"NewString", Func, 0},
    -		{"Publish", Func, 0},
    -		{"String", Type, 0},
    -		{"Var", Type, 0},
    +		{"(*Float).Add", Method, 0, ""},
    +		{"(*Float).Set", Method, 0, ""},
    +		{"(*Float).String", Method, 0, ""},
    +		{"(*Float).Value", Method, 8, ""},
    +		{"(*Int).Add", Method, 0, ""},
    +		{"(*Int).Set", Method, 0, ""},
    +		{"(*Int).String", Method, 0, ""},
    +		{"(*Int).Value", Method, 8, ""},
    +		{"(*Map).Add", Method, 0, ""},
    +		{"(*Map).AddFloat", Method, 0, ""},
    +		{"(*Map).Delete", Method, 12, ""},
    +		{"(*Map).Do", Method, 0, ""},
    +		{"(*Map).Get", Method, 0, ""},
    +		{"(*Map).Init", Method, 0, ""},
    +		{"(*Map).Set", Method, 0, ""},
    +		{"(*Map).String", Method, 0, ""},
    +		{"(*String).Set", Method, 0, ""},
    +		{"(*String).String", Method, 0, ""},
    +		{"(*String).Value", Method, 8, ""},
    +		{"(Func).String", Method, 0, ""},
    +		{"(Func).Value", Method, 8, ""},
    +		{"Do", Func, 0, "func(f func(KeyValue))"},
    +		{"Float", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Get", Func, 0, "func(name string) Var"},
    +		{"Handler", Func, 8, "func() http.Handler"},
    +		{"Int", Type, 0, ""},
    +		{"KeyValue", Type, 0, ""},
    +		{"KeyValue.Key", Field, 0, ""},
    +		{"KeyValue.Value", Field, 0, ""},
    +		{"Map", Type, 0, ""},
    +		{"NewFloat", Func, 0, "func(name string) *Float"},
    +		{"NewInt", Func, 0, "func(name string) *Int"},
    +		{"NewMap", Func, 0, "func(name string) *Map"},
    +		{"NewString", Func, 0, "func(name string) *String"},
    +		{"Publish", Func, 0, "func(name string, v Var)"},
    +		{"String", Type, 0, ""},
    +		{"Var", Type, 0, ""},
     	},
     	"flag": {
    -		{"(*FlagSet).Arg", Method, 0},
    -		{"(*FlagSet).Args", Method, 0},
    -		{"(*FlagSet).Bool", Method, 0},
    -		{"(*FlagSet).BoolFunc", Method, 21},
    -		{"(*FlagSet).BoolVar", Method, 0},
    -		{"(*FlagSet).Duration", Method, 0},
    -		{"(*FlagSet).DurationVar", Method, 0},
    -		{"(*FlagSet).ErrorHandling", Method, 10},
    -		{"(*FlagSet).Float64", Method, 0},
    -		{"(*FlagSet).Float64Var", Method, 0},
    -		{"(*FlagSet).Func", Method, 16},
    -		{"(*FlagSet).Init", Method, 0},
    -		{"(*FlagSet).Int", Method, 0},
    -		{"(*FlagSet).Int64", Method, 0},
    -		{"(*FlagSet).Int64Var", Method, 0},
    -		{"(*FlagSet).IntVar", Method, 0},
    -		{"(*FlagSet).Lookup", Method, 0},
    -		{"(*FlagSet).NArg", Method, 0},
    -		{"(*FlagSet).NFlag", Method, 0},
    -		{"(*FlagSet).Name", Method, 10},
    -		{"(*FlagSet).Output", Method, 10},
    -		{"(*FlagSet).Parse", Method, 0},
    -		{"(*FlagSet).Parsed", Method, 0},
    -		{"(*FlagSet).PrintDefaults", Method, 0},
    -		{"(*FlagSet).Set", Method, 0},
    -		{"(*FlagSet).SetOutput", Method, 0},
    -		{"(*FlagSet).String", Method, 0},
    -		{"(*FlagSet).StringVar", Method, 0},
    -		{"(*FlagSet).TextVar", Method, 19},
    -		{"(*FlagSet).Uint", Method, 0},
    -		{"(*FlagSet).Uint64", Method, 0},
    -		{"(*FlagSet).Uint64Var", Method, 0},
    -		{"(*FlagSet).UintVar", Method, 0},
    -		{"(*FlagSet).Var", Method, 0},
    -		{"(*FlagSet).Visit", Method, 0},
    -		{"(*FlagSet).VisitAll", Method, 0},
    -		{"Arg", Func, 0},
    -		{"Args", Func, 0},
    -		{"Bool", Func, 0},
    -		{"BoolFunc", Func, 21},
    -		{"BoolVar", Func, 0},
    -		{"CommandLine", Var, 2},
    -		{"ContinueOnError", Const, 0},
    -		{"Duration", Func, 0},
    -		{"DurationVar", Func, 0},
    -		{"ErrHelp", Var, 0},
    -		{"ErrorHandling", Type, 0},
    -		{"ExitOnError", Const, 0},
    -		{"Flag", Type, 0},
    -		{"Flag.DefValue", Field, 0},
    -		{"Flag.Name", Field, 0},
    -		{"Flag.Usage", Field, 0},
    -		{"Flag.Value", Field, 0},
    -		{"FlagSet", Type, 0},
    -		{"FlagSet.Usage", Field, 0},
    -		{"Float64", Func, 0},
    -		{"Float64Var", Func, 0},
    -		{"Func", Func, 16},
    -		{"Getter", Type, 2},
    -		{"Int", Func, 0},
    -		{"Int64", Func, 0},
    -		{"Int64Var", Func, 0},
    -		{"IntVar", Func, 0},
    -		{"Lookup", Func, 0},
    -		{"NArg", Func, 0},
    -		{"NFlag", Func, 0},
    -		{"NewFlagSet", Func, 0},
    -		{"PanicOnError", Const, 0},
    -		{"Parse", Func, 0},
    -		{"Parsed", Func, 0},
    -		{"PrintDefaults", Func, 0},
    -		{"Set", Func, 0},
    -		{"String", Func, 0},
    -		{"StringVar", Func, 0},
    -		{"TextVar", Func, 19},
    -		{"Uint", Func, 0},
    -		{"Uint64", Func, 0},
    -		{"Uint64Var", Func, 0},
    -		{"UintVar", Func, 0},
    -		{"UnquoteUsage", Func, 5},
    -		{"Usage", Var, 0},
    -		{"Value", Type, 0},
    -		{"Var", Func, 0},
    -		{"Visit", Func, 0},
    -		{"VisitAll", Func, 0},
    +		{"(*FlagSet).Arg", Method, 0, ""},
    +		{"(*FlagSet).Args", Method, 0, ""},
    +		{"(*FlagSet).Bool", Method, 0, ""},
    +		{"(*FlagSet).BoolFunc", Method, 21, ""},
    +		{"(*FlagSet).BoolVar", Method, 0, ""},
    +		{"(*FlagSet).Duration", Method, 0, ""},
    +		{"(*FlagSet).DurationVar", Method, 0, ""},
    +		{"(*FlagSet).ErrorHandling", Method, 10, ""},
    +		{"(*FlagSet).Float64", Method, 0, ""},
    +		{"(*FlagSet).Float64Var", Method, 0, ""},
    +		{"(*FlagSet).Func", Method, 16, ""},
    +		{"(*FlagSet).Init", Method, 0, ""},
    +		{"(*FlagSet).Int", Method, 0, ""},
    +		{"(*FlagSet).Int64", Method, 0, ""},
    +		{"(*FlagSet).Int64Var", Method, 0, ""},
    +		{"(*FlagSet).IntVar", Method, 0, ""},
    +		{"(*FlagSet).Lookup", Method, 0, ""},
    +		{"(*FlagSet).NArg", Method, 0, ""},
    +		{"(*FlagSet).NFlag", Method, 0, ""},
    +		{"(*FlagSet).Name", Method, 10, ""},
    +		{"(*FlagSet).Output", Method, 10, ""},
    +		{"(*FlagSet).Parse", Method, 0, ""},
    +		{"(*FlagSet).Parsed", Method, 0, ""},
    +		{"(*FlagSet).PrintDefaults", Method, 0, ""},
    +		{"(*FlagSet).Set", Method, 0, ""},
    +		{"(*FlagSet).SetOutput", Method, 0, ""},
    +		{"(*FlagSet).String", Method, 0, ""},
    +		{"(*FlagSet).StringVar", Method, 0, ""},
    +		{"(*FlagSet).TextVar", Method, 19, ""},
    +		{"(*FlagSet).Uint", Method, 0, ""},
    +		{"(*FlagSet).Uint64", Method, 0, ""},
    +		{"(*FlagSet).Uint64Var", Method, 0, ""},
    +		{"(*FlagSet).UintVar", Method, 0, ""},
    +		{"(*FlagSet).Var", Method, 0, ""},
    +		{"(*FlagSet).Visit", Method, 0, ""},
    +		{"(*FlagSet).VisitAll", Method, 0, ""},
    +		{"Arg", Func, 0, "func(i int) string"},
    +		{"Args", Func, 0, "func() []string"},
    +		{"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
    +		{"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"},
    +		{"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"},
    +		{"CommandLine", Var, 2, ""},
    +		{"ContinueOnError", Const, 0, ""},
    +		{"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"},
    +		{"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"},
    +		{"ErrHelp", Var, 0, ""},
    +		{"ErrorHandling", Type, 0, ""},
    +		{"ExitOnError", Const, 0, ""},
    +		{"Flag", Type, 0, ""},
    +		{"Flag.DefValue", Field, 0, ""},
    +		{"Flag.Name", Field, 0, ""},
    +		{"Flag.Usage", Field, 0, ""},
    +		{"Flag.Value", Field, 0, ""},
    +		{"FlagSet", Type, 0, ""},
    +		{"FlagSet.Usage", Field, 0, ""},
    +		{"Float64", Func, 0, "func(name string, value float64, usage string) *float64"},
    +		{"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"},
    +		{"Func", Func, 16, "func(name string, usage string, fn func(string) error)"},
    +		{"Getter", Type, 2, ""},
    +		{"Int", Func, 0, "func(name string, value int, usage string) *int"},
    +		{"Int64", Func, 0, "func(name string, value int64, usage string) *int64"},
    +		{"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"},
    +		{"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"},
    +		{"Lookup", Func, 0, "func(name string) *Flag"},
    +		{"NArg", Func, 0, "func() int"},
    +		{"NFlag", Func, 0, "func() int"},
    +		{"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"},
    +		{"PanicOnError", Const, 0, ""},
    +		{"Parse", Func, 0, "func()"},
    +		{"Parsed", Func, 0, "func() bool"},
    +		{"PrintDefaults", Func, 0, "func()"},
    +		{"Set", Func, 0, "func(name string, value string) error"},
    +		{"String", Func, 0, "func(name string, value string, usage string) *string"},
    +		{"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"},
    +		{"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"},
    +		{"Uint", Func, 0, "func(name string, value uint, usage string) *uint"},
    +		{"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"},
    +		{"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"},
    +		{"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"},
    +		{"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"},
    +		{"Usage", Var, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"Var", Func, 0, "func(value Value, name string, usage string)"},
    +		{"Visit", Func, 0, "func(fn func(*Flag))"},
    +		{"VisitAll", Func, 0, "func(fn func(*Flag))"},
     	},
     	"fmt": {
    -		{"Append", Func, 19},
    -		{"Appendf", Func, 19},
    -		{"Appendln", Func, 19},
    -		{"Errorf", Func, 0},
    -		{"FormatString", Func, 20},
    -		{"Formatter", Type, 0},
    -		{"Fprint", Func, 0},
    -		{"Fprintf", Func, 0},
    -		{"Fprintln", Func, 0},
    -		{"Fscan", Func, 0},
    -		{"Fscanf", Func, 0},
    -		{"Fscanln", Func, 0},
    -		{"GoStringer", Type, 0},
    -		{"Print", Func, 0},
    -		{"Printf", Func, 0},
    -		{"Println", Func, 0},
    -		{"Scan", Func, 0},
    -		{"ScanState", Type, 0},
    -		{"Scanf", Func, 0},
    -		{"Scanln", Func, 0},
    -		{"Scanner", Type, 0},
    -		{"Sprint", Func, 0},
    -		{"Sprintf", Func, 0},
    -		{"Sprintln", Func, 0},
    -		{"Sscan", Func, 0},
    -		{"Sscanf", Func, 0},
    -		{"Sscanln", Func, 0},
    -		{"State", Type, 0},
    -		{"Stringer", Type, 0},
    +		{"Append", Func, 19, "func(b []byte, a ...any) []byte"},
    +		{"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
    +		{"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
    +		{"Errorf", Func, 0, "func(format string, a ...any) error"},
    +		{"FormatString", Func, 20, "func(state State, verb rune) string"},
    +		{"Formatter", Type, 0, ""},
    +		{"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
    +		{"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"},
    +		{"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
    +		{"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
    +		{"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"},
    +		{"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
    +		{"GoStringer", Type, 0, ""},
    +		{"Print", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"},
    +		{"Println", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Scan", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"ScanState", Type, 0, ""},
    +		{"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"},
    +		{"Scanln", Func, 0, "func(a ...any) (n int, err error)"},
    +		{"Scanner", Type, 0, ""},
    +		{"Sprint", Func, 0, "func(a ...any) string"},
    +		{"Sprintf", Func, 0, "func(format string, a ...any) string"},
    +		{"Sprintln", Func, 0, "func(a ...any) string"},
    +		{"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"},
    +		{"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"},
    +		{"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"},
    +		{"State", Type, 0, ""},
    +		{"Stringer", Type, 0, ""},
     	},
     	"go/ast": {
    -		{"(*ArrayType).End", Method, 0},
    -		{"(*ArrayType).Pos", Method, 0},
    -		{"(*AssignStmt).End", Method, 0},
    -		{"(*AssignStmt).Pos", Method, 0},
    -		{"(*BadDecl).End", Method, 0},
    -		{"(*BadDecl).Pos", Method, 0},
    -		{"(*BadExpr).End", Method, 0},
    -		{"(*BadExpr).Pos", Method, 0},
    -		{"(*BadStmt).End", Method, 0},
    -		{"(*BadStmt).Pos", Method, 0},
    -		{"(*BasicLit).End", Method, 0},
    -		{"(*BasicLit).Pos", Method, 0},
    -		{"(*BinaryExpr).End", Method, 0},
    -		{"(*BinaryExpr).Pos", Method, 0},
    -		{"(*BlockStmt).End", Method, 0},
    -		{"(*BlockStmt).Pos", Method, 0},
    -		{"(*BranchStmt).End", Method, 0},
    -		{"(*BranchStmt).Pos", Method, 0},
    -		{"(*CallExpr).End", Method, 0},
    -		{"(*CallExpr).Pos", Method, 0},
    -		{"(*CaseClause).End", Method, 0},
    -		{"(*CaseClause).Pos", Method, 0},
    -		{"(*ChanType).End", Method, 0},
    -		{"(*ChanType).Pos", Method, 0},
    -		{"(*CommClause).End", Method, 0},
    -		{"(*CommClause).Pos", Method, 0},
    -		{"(*Comment).End", Method, 0},
    -		{"(*Comment).Pos", Method, 0},
    -		{"(*CommentGroup).End", Method, 0},
    -		{"(*CommentGroup).Pos", Method, 0},
    -		{"(*CommentGroup).Text", Method, 0},
    -		{"(*CompositeLit).End", Method, 0},
    -		{"(*CompositeLit).Pos", Method, 0},
    -		{"(*DeclStmt).End", Method, 0},
    -		{"(*DeclStmt).Pos", Method, 0},
    -		{"(*DeferStmt).End", Method, 0},
    -		{"(*DeferStmt).Pos", Method, 0},
    -		{"(*Ellipsis).End", Method, 0},
    -		{"(*Ellipsis).Pos", Method, 0},
    -		{"(*EmptyStmt).End", Method, 0},
    -		{"(*EmptyStmt).Pos", Method, 0},
    -		{"(*ExprStmt).End", Method, 0},
    -		{"(*ExprStmt).Pos", Method, 0},
    -		{"(*Field).End", Method, 0},
    -		{"(*Field).Pos", Method, 0},
    -		{"(*FieldList).End", Method, 0},
    -		{"(*FieldList).NumFields", Method, 0},
    -		{"(*FieldList).Pos", Method, 0},
    -		{"(*File).End", Method, 0},
    -		{"(*File).Pos", Method, 0},
    -		{"(*ForStmt).End", Method, 0},
    -		{"(*ForStmt).Pos", Method, 0},
    -		{"(*FuncDecl).End", Method, 0},
    -		{"(*FuncDecl).Pos", Method, 0},
    -		{"(*FuncLit).End", Method, 0},
    -		{"(*FuncLit).Pos", Method, 0},
    -		{"(*FuncType).End", Method, 0},
    -		{"(*FuncType).Pos", Method, 0},
    -		{"(*GenDecl).End", Method, 0},
    -		{"(*GenDecl).Pos", Method, 0},
    -		{"(*GoStmt).End", Method, 0},
    -		{"(*GoStmt).Pos", Method, 0},
    -		{"(*Ident).End", Method, 0},
    -		{"(*Ident).IsExported", Method, 0},
    -		{"(*Ident).Pos", Method, 0},
    -		{"(*Ident).String", Method, 0},
    -		{"(*IfStmt).End", Method, 0},
    -		{"(*IfStmt).Pos", Method, 0},
    -		{"(*ImportSpec).End", Method, 0},
    -		{"(*ImportSpec).Pos", Method, 0},
    -		{"(*IncDecStmt).End", Method, 0},
    -		{"(*IncDecStmt).Pos", Method, 0},
    -		{"(*IndexExpr).End", Method, 0},
    -		{"(*IndexExpr).Pos", Method, 0},
    -		{"(*IndexListExpr).End", Method, 18},
    -		{"(*IndexListExpr).Pos", Method, 18},
    -		{"(*InterfaceType).End", Method, 0},
    -		{"(*InterfaceType).Pos", Method, 0},
    -		{"(*KeyValueExpr).End", Method, 0},
    -		{"(*KeyValueExpr).Pos", Method, 0},
    -		{"(*LabeledStmt).End", Method, 0},
    -		{"(*LabeledStmt).Pos", Method, 0},
    -		{"(*MapType).End", Method, 0},
    -		{"(*MapType).Pos", Method, 0},
    -		{"(*Object).Pos", Method, 0},
    -		{"(*Package).End", Method, 0},
    -		{"(*Package).Pos", Method, 0},
    -		{"(*ParenExpr).End", Method, 0},
    -		{"(*ParenExpr).Pos", Method, 0},
    -		{"(*RangeStmt).End", Method, 0},
    -		{"(*RangeStmt).Pos", Method, 0},
    -		{"(*ReturnStmt).End", Method, 0},
    -		{"(*ReturnStmt).Pos", Method, 0},
    -		{"(*Scope).Insert", Method, 0},
    -		{"(*Scope).Lookup", Method, 0},
    -		{"(*Scope).String", Method, 0},
    -		{"(*SelectStmt).End", Method, 0},
    -		{"(*SelectStmt).Pos", Method, 0},
    -		{"(*SelectorExpr).End", Method, 0},
    -		{"(*SelectorExpr).Pos", Method, 0},
    -		{"(*SendStmt).End", Method, 0},
    -		{"(*SendStmt).Pos", Method, 0},
    -		{"(*SliceExpr).End", Method, 0},
    -		{"(*SliceExpr).Pos", Method, 0},
    -		{"(*StarExpr).End", Method, 0},
    -		{"(*StarExpr).Pos", Method, 0},
    -		{"(*StructType).End", Method, 0},
    -		{"(*StructType).Pos", Method, 0},
    -		{"(*SwitchStmt).End", Method, 0},
    -		{"(*SwitchStmt).Pos", Method, 0},
    -		{"(*TypeAssertExpr).End", Method, 0},
    -		{"(*TypeAssertExpr).Pos", Method, 0},
    -		{"(*TypeSpec).End", Method, 0},
    -		{"(*TypeSpec).Pos", Method, 0},
    -		{"(*TypeSwitchStmt).End", Method, 0},
    -		{"(*TypeSwitchStmt).Pos", Method, 0},
    -		{"(*UnaryExpr).End", Method, 0},
    -		{"(*UnaryExpr).Pos", Method, 0},
    -		{"(*ValueSpec).End", Method, 0},
    -		{"(*ValueSpec).Pos", Method, 0},
    -		{"(CommentMap).Comments", Method, 1},
    -		{"(CommentMap).Filter", Method, 1},
    -		{"(CommentMap).String", Method, 1},
    -		{"(CommentMap).Update", Method, 1},
    -		{"(ObjKind).String", Method, 0},
    -		{"ArrayType", Type, 0},
    -		{"ArrayType.Elt", Field, 0},
    -		{"ArrayType.Lbrack", Field, 0},
    -		{"ArrayType.Len", Field, 0},
    -		{"AssignStmt", Type, 0},
    -		{"AssignStmt.Lhs", Field, 0},
    -		{"AssignStmt.Rhs", Field, 0},
    -		{"AssignStmt.Tok", Field, 0},
    -		{"AssignStmt.TokPos", Field, 0},
    -		{"Bad", Const, 0},
    -		{"BadDecl", Type, 0},
    -		{"BadDecl.From", Field, 0},
    -		{"BadDecl.To", Field, 0},
    -		{"BadExpr", Type, 0},
    -		{"BadExpr.From", Field, 0},
    -		{"BadExpr.To", Field, 0},
    -		{"BadStmt", Type, 0},
    -		{"BadStmt.From", Field, 0},
    -		{"BadStmt.To", Field, 0},
    -		{"BasicLit", Type, 0},
    -		{"BasicLit.Kind", Field, 0},
    -		{"BasicLit.Value", Field, 0},
    -		{"BasicLit.ValuePos", Field, 0},
    -		{"BinaryExpr", Type, 0},
    -		{"BinaryExpr.Op", Field, 0},
    -		{"BinaryExpr.OpPos", Field, 0},
    -		{"BinaryExpr.X", Field, 0},
    -		{"BinaryExpr.Y", Field, 0},
    -		{"BlockStmt", Type, 0},
    -		{"BlockStmt.Lbrace", Field, 0},
    -		{"BlockStmt.List", Field, 0},
    -		{"BlockStmt.Rbrace", Field, 0},
    -		{"BranchStmt", Type, 0},
    -		{"BranchStmt.Label", Field, 0},
    -		{"BranchStmt.Tok", Field, 0},
    -		{"BranchStmt.TokPos", Field, 0},
    -		{"CallExpr", Type, 0},
    -		{"CallExpr.Args", Field, 0},
    -		{"CallExpr.Ellipsis", Field, 0},
    -		{"CallExpr.Fun", Field, 0},
    -		{"CallExpr.Lparen", Field, 0},
    -		{"CallExpr.Rparen", Field, 0},
    -		{"CaseClause", Type, 0},
    -		{"CaseClause.Body", Field, 0},
    -		{"CaseClause.Case", Field, 0},
    -		{"CaseClause.Colon", Field, 0},
    -		{"CaseClause.List", Field, 0},
    -		{"ChanDir", Type, 0},
    -		{"ChanType", Type, 0},
    -		{"ChanType.Arrow", Field, 1},
    -		{"ChanType.Begin", Field, 0},
    -		{"ChanType.Dir", Field, 0},
    -		{"ChanType.Value", Field, 0},
    -		{"CommClause", Type, 0},
    -		{"CommClause.Body", Field, 0},
    -		{"CommClause.Case", Field, 0},
    -		{"CommClause.Colon", Field, 0},
    -		{"CommClause.Comm", Field, 0},
    -		{"Comment", Type, 0},
    -		{"Comment.Slash", Field, 0},
    -		{"Comment.Text", Field, 0},
    -		{"CommentGroup", Type, 0},
    -		{"CommentGroup.List", Field, 0},
    -		{"CommentMap", Type, 1},
    -		{"CompositeLit", Type, 0},
    -		{"CompositeLit.Elts", Field, 0},
    -		{"CompositeLit.Incomplete", Field, 11},
    -		{"CompositeLit.Lbrace", Field, 0},
    -		{"CompositeLit.Rbrace", Field, 0},
    -		{"CompositeLit.Type", Field, 0},
    -		{"Con", Const, 0},
    -		{"Decl", Type, 0},
    -		{"DeclStmt", Type, 0},
    -		{"DeclStmt.Decl", Field, 0},
    -		{"DeferStmt", Type, 0},
    -		{"DeferStmt.Call", Field, 0},
    -		{"DeferStmt.Defer", Field, 0},
    -		{"Ellipsis", Type, 0},
    -		{"Ellipsis.Ellipsis", Field, 0},
    -		{"Ellipsis.Elt", Field, 0},
    -		{"EmptyStmt", Type, 0},
    -		{"EmptyStmt.Implicit", Field, 5},
    -		{"EmptyStmt.Semicolon", Field, 0},
    -		{"Expr", Type, 0},
    -		{"ExprStmt", Type, 0},
    -		{"ExprStmt.X", Field, 0},
    -		{"Field", Type, 0},
    -		{"Field.Comment", Field, 0},
    -		{"Field.Doc", Field, 0},
    -		{"Field.Names", Field, 0},
    -		{"Field.Tag", Field, 0},
    -		{"Field.Type", Field, 0},
    -		{"FieldFilter", Type, 0},
    -		{"FieldList", Type, 0},
    -		{"FieldList.Closing", Field, 0},
    -		{"FieldList.List", Field, 0},
    -		{"FieldList.Opening", Field, 0},
    -		{"File", Type, 0},
    -		{"File.Comments", Field, 0},
    -		{"File.Decls", Field, 0},
    -		{"File.Doc", Field, 0},
    -		{"File.FileEnd", Field, 20},
    -		{"File.FileStart", Field, 20},
    -		{"File.GoVersion", Field, 21},
    -		{"File.Imports", Field, 0},
    -		{"File.Name", Field, 0},
    -		{"File.Package", Field, 0},
    -		{"File.Scope", Field, 0},
    -		{"File.Unresolved", Field, 0},
    -		{"FileExports", Func, 0},
    -		{"Filter", Type, 0},
    -		{"FilterDecl", Func, 0},
    -		{"FilterFile", Func, 0},
    -		{"FilterFuncDuplicates", Const, 0},
    -		{"FilterImportDuplicates", Const, 0},
    -		{"FilterPackage", Func, 0},
    -		{"FilterUnassociatedComments", Const, 0},
    -		{"ForStmt", Type, 0},
    -		{"ForStmt.Body", Field, 0},
    -		{"ForStmt.Cond", Field, 0},
    -		{"ForStmt.For", Field, 0},
    -		{"ForStmt.Init", Field, 0},
    -		{"ForStmt.Post", Field, 0},
    -		{"Fprint", Func, 0},
    -		{"Fun", Const, 0},
    -		{"FuncDecl", Type, 0},
    -		{"FuncDecl.Body", Field, 0},
    -		{"FuncDecl.Doc", Field, 0},
    -		{"FuncDecl.Name", Field, 0},
    -		{"FuncDecl.Recv", Field, 0},
    -		{"FuncDecl.Type", Field, 0},
    -		{"FuncLit", Type, 0},
    -		{"FuncLit.Body", Field, 0},
    -		{"FuncLit.Type", Field, 0},
    -		{"FuncType", Type, 0},
    -		{"FuncType.Func", Field, 0},
    -		{"FuncType.Params", Field, 0},
    -		{"FuncType.Results", Field, 0},
    -		{"FuncType.TypeParams", Field, 18},
    -		{"GenDecl", Type, 0},
    -		{"GenDecl.Doc", Field, 0},
    -		{"GenDecl.Lparen", Field, 0},
    -		{"GenDecl.Rparen", Field, 0},
    -		{"GenDecl.Specs", Field, 0},
    -		{"GenDecl.Tok", Field, 0},
    -		{"GenDecl.TokPos", Field, 0},
    -		{"GoStmt", Type, 0},
    -		{"GoStmt.Call", Field, 0},
    -		{"GoStmt.Go", Field, 0},
    -		{"Ident", Type, 0},
    -		{"Ident.Name", Field, 0},
    -		{"Ident.NamePos", Field, 0},
    -		{"Ident.Obj", Field, 0},
    -		{"IfStmt", Type, 0},
    -		{"IfStmt.Body", Field, 0},
    -		{"IfStmt.Cond", Field, 0},
    -		{"IfStmt.Else", Field, 0},
    -		{"IfStmt.If", Field, 0},
    -		{"IfStmt.Init", Field, 0},
    -		{"ImportSpec", Type, 0},
    -		{"ImportSpec.Comment", Field, 0},
    -		{"ImportSpec.Doc", Field, 0},
    -		{"ImportSpec.EndPos", Field, 0},
    -		{"ImportSpec.Name", Field, 0},
    -		{"ImportSpec.Path", Field, 0},
    -		{"Importer", Type, 0},
    -		{"IncDecStmt", Type, 0},
    -		{"IncDecStmt.Tok", Field, 0},
    -		{"IncDecStmt.TokPos", Field, 0},
    -		{"IncDecStmt.X", Field, 0},
    -		{"IndexExpr", Type, 0},
    -		{"IndexExpr.Index", Field, 0},
    -		{"IndexExpr.Lbrack", Field, 0},
    -		{"IndexExpr.Rbrack", Field, 0},
    -		{"IndexExpr.X", Field, 0},
    -		{"IndexListExpr", Type, 18},
    -		{"IndexListExpr.Indices", Field, 18},
    -		{"IndexListExpr.Lbrack", Field, 18},
    -		{"IndexListExpr.Rbrack", Field, 18},
    -		{"IndexListExpr.X", Field, 18},
    -		{"Inspect", Func, 0},
    -		{"InterfaceType", Type, 0},
    -		{"InterfaceType.Incomplete", Field, 0},
    -		{"InterfaceType.Interface", Field, 0},
    -		{"InterfaceType.Methods", Field, 0},
    -		{"IsExported", Func, 0},
    -		{"IsGenerated", Func, 21},
    -		{"KeyValueExpr", Type, 0},
    -		{"KeyValueExpr.Colon", Field, 0},
    -		{"KeyValueExpr.Key", Field, 0},
    -		{"KeyValueExpr.Value", Field, 0},
    -		{"LabeledStmt", Type, 0},
    -		{"LabeledStmt.Colon", Field, 0},
    -		{"LabeledStmt.Label", Field, 0},
    -		{"LabeledStmt.Stmt", Field, 0},
    -		{"Lbl", Const, 0},
    -		{"MapType", Type, 0},
    -		{"MapType.Key", Field, 0},
    -		{"MapType.Map", Field, 0},
    -		{"MapType.Value", Field, 0},
    -		{"MergeMode", Type, 0},
    -		{"MergePackageFiles", Func, 0},
    -		{"NewCommentMap", Func, 1},
    -		{"NewIdent", Func, 0},
    -		{"NewObj", Func, 0},
    -		{"NewPackage", Func, 0},
    -		{"NewScope", Func, 0},
    -		{"Node", Type, 0},
    -		{"NotNilFilter", Func, 0},
    -		{"ObjKind", Type, 0},
    -		{"Object", Type, 0},
    -		{"Object.Data", Field, 0},
    -		{"Object.Decl", Field, 0},
    -		{"Object.Kind", Field, 0},
    -		{"Object.Name", Field, 0},
    -		{"Object.Type", Field, 0},
    -		{"Package", Type, 0},
    -		{"Package.Files", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.Name", Field, 0},
    -		{"Package.Scope", Field, 0},
    -		{"PackageExports", Func, 0},
    -		{"ParenExpr", Type, 0},
    -		{"ParenExpr.Lparen", Field, 0},
    -		{"ParenExpr.Rparen", Field, 0},
    -		{"ParenExpr.X", Field, 0},
    -		{"Pkg", Const, 0},
    -		{"Preorder", Func, 23},
    -		{"Print", Func, 0},
    -		{"RECV", Const, 0},
    -		{"RangeStmt", Type, 0},
    -		{"RangeStmt.Body", Field, 0},
    -		{"RangeStmt.For", Field, 0},
    -		{"RangeStmt.Key", Field, 0},
    -		{"RangeStmt.Range", Field, 20},
    -		{"RangeStmt.Tok", Field, 0},
    -		{"RangeStmt.TokPos", Field, 0},
    -		{"RangeStmt.Value", Field, 0},
    -		{"RangeStmt.X", Field, 0},
    -		{"ReturnStmt", Type, 0},
    -		{"ReturnStmt.Results", Field, 0},
    -		{"ReturnStmt.Return", Field, 0},
    -		{"SEND", Const, 0},
    -		{"Scope", Type, 0},
    -		{"Scope.Objects", Field, 0},
    -		{"Scope.Outer", Field, 0},
    -		{"SelectStmt", Type, 0},
    -		{"SelectStmt.Body", Field, 0},
    -		{"SelectStmt.Select", Field, 0},
    -		{"SelectorExpr", Type, 0},
    -		{"SelectorExpr.Sel", Field, 0},
    -		{"SelectorExpr.X", Field, 0},
    -		{"SendStmt", Type, 0},
    -		{"SendStmt.Arrow", Field, 0},
    -		{"SendStmt.Chan", Field, 0},
    -		{"SendStmt.Value", Field, 0},
    -		{"SliceExpr", Type, 0},
    -		{"SliceExpr.High", Field, 0},
    -		{"SliceExpr.Lbrack", Field, 0},
    -		{"SliceExpr.Low", Field, 0},
    -		{"SliceExpr.Max", Field, 2},
    -		{"SliceExpr.Rbrack", Field, 0},
    -		{"SliceExpr.Slice3", Field, 2},
    -		{"SliceExpr.X", Field, 0},
    -		{"SortImports", Func, 0},
    -		{"Spec", Type, 0},
    -		{"StarExpr", Type, 0},
    -		{"StarExpr.Star", Field, 0},
    -		{"StarExpr.X", Field, 0},
    -		{"Stmt", Type, 0},
    -		{"StructType", Type, 0},
    -		{"StructType.Fields", Field, 0},
    -		{"StructType.Incomplete", Field, 0},
    -		{"StructType.Struct", Field, 0},
    -		{"SwitchStmt", Type, 0},
    -		{"SwitchStmt.Body", Field, 0},
    -		{"SwitchStmt.Init", Field, 0},
    -		{"SwitchStmt.Switch", Field, 0},
    -		{"SwitchStmt.Tag", Field, 0},
    -		{"Typ", Const, 0},
    -		{"TypeAssertExpr", Type, 0},
    -		{"TypeAssertExpr.Lparen", Field, 2},
    -		{"TypeAssertExpr.Rparen", Field, 2},
    -		{"TypeAssertExpr.Type", Field, 0},
    -		{"TypeAssertExpr.X", Field, 0},
    -		{"TypeSpec", Type, 0},
    -		{"TypeSpec.Assign", Field, 9},
    -		{"TypeSpec.Comment", Field, 0},
    -		{"TypeSpec.Doc", Field, 0},
    -		{"TypeSpec.Name", Field, 0},
    -		{"TypeSpec.Type", Field, 0},
    -		{"TypeSpec.TypeParams", Field, 18},
    -		{"TypeSwitchStmt", Type, 0},
    -		{"TypeSwitchStmt.Assign", Field, 0},
    -		{"TypeSwitchStmt.Body", Field, 0},
    -		{"TypeSwitchStmt.Init", Field, 0},
    -		{"TypeSwitchStmt.Switch", Field, 0},
    -		{"UnaryExpr", Type, 0},
    -		{"UnaryExpr.Op", Field, 0},
    -		{"UnaryExpr.OpPos", Field, 0},
    -		{"UnaryExpr.X", Field, 0},
    -		{"Unparen", Func, 22},
    -		{"ValueSpec", Type, 0},
    -		{"ValueSpec.Comment", Field, 0},
    -		{"ValueSpec.Doc", Field, 0},
    -		{"ValueSpec.Names", Field, 0},
    -		{"ValueSpec.Type", Field, 0},
    -		{"ValueSpec.Values", Field, 0},
    -		{"Var", Const, 0},
    -		{"Visitor", Type, 0},
    -		{"Walk", Func, 0},
    +		{"(*ArrayType).End", Method, 0, ""},
    +		{"(*ArrayType).Pos", Method, 0, ""},
    +		{"(*AssignStmt).End", Method, 0, ""},
    +		{"(*AssignStmt).Pos", Method, 0, ""},
    +		{"(*BadDecl).End", Method, 0, ""},
    +		{"(*BadDecl).Pos", Method, 0, ""},
    +		{"(*BadExpr).End", Method, 0, ""},
    +		{"(*BadExpr).Pos", Method, 0, ""},
    +		{"(*BadStmt).End", Method, 0, ""},
    +		{"(*BadStmt).Pos", Method, 0, ""},
    +		{"(*BasicLit).End", Method, 0, ""},
    +		{"(*BasicLit).Pos", Method, 0, ""},
    +		{"(*BinaryExpr).End", Method, 0, ""},
    +		{"(*BinaryExpr).Pos", Method, 0, ""},
    +		{"(*BlockStmt).End", Method, 0, ""},
    +		{"(*BlockStmt).Pos", Method, 0, ""},
    +		{"(*BranchStmt).End", Method, 0, ""},
    +		{"(*BranchStmt).Pos", Method, 0, ""},
    +		{"(*CallExpr).End", Method, 0, ""},
    +		{"(*CallExpr).Pos", Method, 0, ""},
    +		{"(*CaseClause).End", Method, 0, ""},
    +		{"(*CaseClause).Pos", Method, 0, ""},
    +		{"(*ChanType).End", Method, 0, ""},
    +		{"(*ChanType).Pos", Method, 0, ""},
    +		{"(*CommClause).End", Method, 0, ""},
    +		{"(*CommClause).Pos", Method, 0, ""},
    +		{"(*Comment).End", Method, 0, ""},
    +		{"(*Comment).Pos", Method, 0, ""},
    +		{"(*CommentGroup).End", Method, 0, ""},
    +		{"(*CommentGroup).Pos", Method, 0, ""},
    +		{"(*CommentGroup).Text", Method, 0, ""},
    +		{"(*CompositeLit).End", Method, 0, ""},
    +		{"(*CompositeLit).Pos", Method, 0, ""},
    +		{"(*DeclStmt).End", Method, 0, ""},
    +		{"(*DeclStmt).Pos", Method, 0, ""},
    +		{"(*DeferStmt).End", Method, 0, ""},
    +		{"(*DeferStmt).Pos", Method, 0, ""},
    +		{"(*Ellipsis).End", Method, 0, ""},
    +		{"(*Ellipsis).Pos", Method, 0, ""},
    +		{"(*EmptyStmt).End", Method, 0, ""},
    +		{"(*EmptyStmt).Pos", Method, 0, ""},
    +		{"(*ExprStmt).End", Method, 0, ""},
    +		{"(*ExprStmt).Pos", Method, 0, ""},
    +		{"(*Field).End", Method, 0, ""},
    +		{"(*Field).Pos", Method, 0, ""},
    +		{"(*FieldList).End", Method, 0, ""},
    +		{"(*FieldList).NumFields", Method, 0, ""},
    +		{"(*FieldList).Pos", Method, 0, ""},
    +		{"(*File).End", Method, 0, ""},
    +		{"(*File).Pos", Method, 0, ""},
    +		{"(*ForStmt).End", Method, 0, ""},
    +		{"(*ForStmt).Pos", Method, 0, ""},
    +		{"(*FuncDecl).End", Method, 0, ""},
    +		{"(*FuncDecl).Pos", Method, 0, ""},
    +		{"(*FuncLit).End", Method, 0, ""},
    +		{"(*FuncLit).Pos", Method, 0, ""},
    +		{"(*FuncType).End", Method, 0, ""},
    +		{"(*FuncType).Pos", Method, 0, ""},
    +		{"(*GenDecl).End", Method, 0, ""},
    +		{"(*GenDecl).Pos", Method, 0, ""},
    +		{"(*GoStmt).End", Method, 0, ""},
    +		{"(*GoStmt).Pos", Method, 0, ""},
    +		{"(*Ident).End", Method, 0, ""},
    +		{"(*Ident).IsExported", Method, 0, ""},
    +		{"(*Ident).Pos", Method, 0, ""},
    +		{"(*Ident).String", Method, 0, ""},
    +		{"(*IfStmt).End", Method, 0, ""},
    +		{"(*IfStmt).Pos", Method, 0, ""},
    +		{"(*ImportSpec).End", Method, 0, ""},
    +		{"(*ImportSpec).Pos", Method, 0, ""},
    +		{"(*IncDecStmt).End", Method, 0, ""},
    +		{"(*IncDecStmt).Pos", Method, 0, ""},
    +		{"(*IndexExpr).End", Method, 0, ""},
    +		{"(*IndexExpr).Pos", Method, 0, ""},
    +		{"(*IndexListExpr).End", Method, 18, ""},
    +		{"(*IndexListExpr).Pos", Method, 18, ""},
    +		{"(*InterfaceType).End", Method, 0, ""},
    +		{"(*InterfaceType).Pos", Method, 0, ""},
    +		{"(*KeyValueExpr).End", Method, 0, ""},
    +		{"(*KeyValueExpr).Pos", Method, 0, ""},
    +		{"(*LabeledStmt).End", Method, 0, ""},
    +		{"(*LabeledStmt).Pos", Method, 0, ""},
    +		{"(*MapType).End", Method, 0, ""},
    +		{"(*MapType).Pos", Method, 0, ""},
    +		{"(*Object).Pos", Method, 0, ""},
    +		{"(*Package).End", Method, 0, ""},
    +		{"(*Package).Pos", Method, 0, ""},
    +		{"(*ParenExpr).End", Method, 0, ""},
    +		{"(*ParenExpr).Pos", Method, 0, ""},
    +		{"(*RangeStmt).End", Method, 0, ""},
    +		{"(*RangeStmt).Pos", Method, 0, ""},
    +		{"(*ReturnStmt).End", Method, 0, ""},
    +		{"(*ReturnStmt).Pos", Method, 0, ""},
    +		{"(*Scope).Insert", Method, 0, ""},
    +		{"(*Scope).Lookup", Method, 0, ""},
    +		{"(*Scope).String", Method, 0, ""},
    +		{"(*SelectStmt).End", Method, 0, ""},
    +		{"(*SelectStmt).Pos", Method, 0, ""},
    +		{"(*SelectorExpr).End", Method, 0, ""},
    +		{"(*SelectorExpr).Pos", Method, 0, ""},
    +		{"(*SendStmt).End", Method, 0, ""},
    +		{"(*SendStmt).Pos", Method, 0, ""},
    +		{"(*SliceExpr).End", Method, 0, ""},
    +		{"(*SliceExpr).Pos", Method, 0, ""},
    +		{"(*StarExpr).End", Method, 0, ""},
    +		{"(*StarExpr).Pos", Method, 0, ""},
    +		{"(*StructType).End", Method, 0, ""},
    +		{"(*StructType).Pos", Method, 0, ""},
    +		{"(*SwitchStmt).End", Method, 0, ""},
    +		{"(*SwitchStmt).Pos", Method, 0, ""},
    +		{"(*TypeAssertExpr).End", Method, 0, ""},
    +		{"(*TypeAssertExpr).Pos", Method, 0, ""},
    +		{"(*TypeSpec).End", Method, 0, ""},
    +		{"(*TypeSpec).Pos", Method, 0, ""},
    +		{"(*TypeSwitchStmt).End", Method, 0, ""},
    +		{"(*TypeSwitchStmt).Pos", Method, 0, ""},
    +		{"(*UnaryExpr).End", Method, 0, ""},
    +		{"(*UnaryExpr).Pos", Method, 0, ""},
    +		{"(*ValueSpec).End", Method, 0, ""},
    +		{"(*ValueSpec).Pos", Method, 0, ""},
    +		{"(CommentMap).Comments", Method, 1, ""},
    +		{"(CommentMap).Filter", Method, 1, ""},
    +		{"(CommentMap).String", Method, 1, ""},
    +		{"(CommentMap).Update", Method, 1, ""},
    +		{"(ObjKind).String", Method, 0, ""},
    +		{"ArrayType", Type, 0, ""},
    +		{"ArrayType.Elt", Field, 0, ""},
    +		{"ArrayType.Lbrack", Field, 0, ""},
    +		{"ArrayType.Len", Field, 0, ""},
    +		{"AssignStmt", Type, 0, ""},
    +		{"AssignStmt.Lhs", Field, 0, ""},
    +		{"AssignStmt.Rhs", Field, 0, ""},
    +		{"AssignStmt.Tok", Field, 0, ""},
    +		{"AssignStmt.TokPos", Field, 0, ""},
    +		{"Bad", Const, 0, ""},
    +		{"BadDecl", Type, 0, ""},
    +		{"BadDecl.From", Field, 0, ""},
    +		{"BadDecl.To", Field, 0, ""},
    +		{"BadExpr", Type, 0, ""},
    +		{"BadExpr.From", Field, 0, ""},
    +		{"BadExpr.To", Field, 0, ""},
    +		{"BadStmt", Type, 0, ""},
    +		{"BadStmt.From", Field, 0, ""},
    +		{"BadStmt.To", Field, 0, ""},
    +		{"BasicLit", Type, 0, ""},
    +		{"BasicLit.Kind", Field, 0, ""},
    +		{"BasicLit.Value", Field, 0, ""},
    +		{"BasicLit.ValuePos", Field, 0, ""},
    +		{"BinaryExpr", Type, 0, ""},
    +		{"BinaryExpr.Op", Field, 0, ""},
    +		{"BinaryExpr.OpPos", Field, 0, ""},
    +		{"BinaryExpr.X", Field, 0, ""},
    +		{"BinaryExpr.Y", Field, 0, ""},
    +		{"BlockStmt", Type, 0, ""},
    +		{"BlockStmt.Lbrace", Field, 0, ""},
    +		{"BlockStmt.List", Field, 0, ""},
    +		{"BlockStmt.Rbrace", Field, 0, ""},
    +		{"BranchStmt", Type, 0, ""},
    +		{"BranchStmt.Label", Field, 0, ""},
    +		{"BranchStmt.Tok", Field, 0, ""},
    +		{"BranchStmt.TokPos", Field, 0, ""},
    +		{"CallExpr", Type, 0, ""},
    +		{"CallExpr.Args", Field, 0, ""},
    +		{"CallExpr.Ellipsis", Field, 0, ""},
    +		{"CallExpr.Fun", Field, 0, ""},
    +		{"CallExpr.Lparen", Field, 0, ""},
    +		{"CallExpr.Rparen", Field, 0, ""},
    +		{"CaseClause", Type, 0, ""},
    +		{"CaseClause.Body", Field, 0, ""},
    +		{"CaseClause.Case", Field, 0, ""},
    +		{"CaseClause.Colon", Field, 0, ""},
    +		{"CaseClause.List", Field, 0, ""},
    +		{"ChanDir", Type, 0, ""},
    +		{"ChanType", Type, 0, ""},
    +		{"ChanType.Arrow", Field, 1, ""},
    +		{"ChanType.Begin", Field, 0, ""},
    +		{"ChanType.Dir", Field, 0, ""},
    +		{"ChanType.Value", Field, 0, ""},
    +		{"CommClause", Type, 0, ""},
    +		{"CommClause.Body", Field, 0, ""},
    +		{"CommClause.Case", Field, 0, ""},
    +		{"CommClause.Colon", Field, 0, ""},
    +		{"CommClause.Comm", Field, 0, ""},
    +		{"Comment", Type, 0, ""},
    +		{"Comment.Slash", Field, 0, ""},
    +		{"Comment.Text", Field, 0, ""},
    +		{"CommentGroup", Type, 0, ""},
    +		{"CommentGroup.List", Field, 0, ""},
    +		{"CommentMap", Type, 1, ""},
    +		{"CompositeLit", Type, 0, ""},
    +		{"CompositeLit.Elts", Field, 0, ""},
    +		{"CompositeLit.Incomplete", Field, 11, ""},
    +		{"CompositeLit.Lbrace", Field, 0, ""},
    +		{"CompositeLit.Rbrace", Field, 0, ""},
    +		{"CompositeLit.Type", Field, 0, ""},
    +		{"Con", Const, 0, ""},
    +		{"Decl", Type, 0, ""},
    +		{"DeclStmt", Type, 0, ""},
    +		{"DeclStmt.Decl", Field, 0, ""},
    +		{"DeferStmt", Type, 0, ""},
    +		{"DeferStmt.Call", Field, 0, ""},
    +		{"DeferStmt.Defer", Field, 0, ""},
    +		{"Ellipsis", Type, 0, ""},
    +		{"Ellipsis.Ellipsis", Field, 0, ""},
    +		{"Ellipsis.Elt", Field, 0, ""},
    +		{"EmptyStmt", Type, 0, ""},
    +		{"EmptyStmt.Implicit", Field, 5, ""},
    +		{"EmptyStmt.Semicolon", Field, 0, ""},
    +		{"Expr", Type, 0, ""},
    +		{"ExprStmt", Type, 0, ""},
    +		{"ExprStmt.X", Field, 0, ""},
    +		{"Field", Type, 0, ""},
    +		{"Field.Comment", Field, 0, ""},
    +		{"Field.Doc", Field, 0, ""},
    +		{"Field.Names", Field, 0, ""},
    +		{"Field.Tag", Field, 0, ""},
    +		{"Field.Type", Field, 0, ""},
    +		{"FieldFilter", Type, 0, ""},
    +		{"FieldList", Type, 0, ""},
    +		{"FieldList.Closing", Field, 0, ""},
    +		{"FieldList.List", Field, 0, ""},
    +		{"FieldList.Opening", Field, 0, ""},
    +		{"File", Type, 0, ""},
    +		{"File.Comments", Field, 0, ""},
    +		{"File.Decls", Field, 0, ""},
    +		{"File.Doc", Field, 0, ""},
    +		{"File.FileEnd", Field, 20, ""},
    +		{"File.FileStart", Field, 20, ""},
    +		{"File.GoVersion", Field, 21, ""},
    +		{"File.Imports", Field, 0, ""},
    +		{"File.Name", Field, 0, ""},
    +		{"File.Package", Field, 0, ""},
    +		{"File.Scope", Field, 0, ""},
    +		{"File.Unresolved", Field, 0, ""},
    +		{"FileExports", Func, 0, "func(src *File) bool"},
    +		{"Filter", Type, 0, ""},
    +		{"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"},
    +		{"FilterFile", Func, 0, "func(src *File, f Filter) bool"},
    +		{"FilterFuncDuplicates", Const, 0, ""},
    +		{"FilterImportDuplicates", Const, 0, ""},
    +		{"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"},
    +		{"FilterUnassociatedComments", Const, 0, ""},
    +		{"ForStmt", Type, 0, ""},
    +		{"ForStmt.Body", Field, 0, ""},
    +		{"ForStmt.Cond", Field, 0, ""},
    +		{"ForStmt.For", Field, 0, ""},
    +		{"ForStmt.Init", Field, 0, ""},
    +		{"ForStmt.Post", Field, 0, ""},
    +		{"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"},
    +		{"Fun", Const, 0, ""},
    +		{"FuncDecl", Type, 0, ""},
    +		{"FuncDecl.Body", Field, 0, ""},
    +		{"FuncDecl.Doc", Field, 0, ""},
    +		{"FuncDecl.Name", Field, 0, ""},
    +		{"FuncDecl.Recv", Field, 0, ""},
    +		{"FuncDecl.Type", Field, 0, ""},
    +		{"FuncLit", Type, 0, ""},
    +		{"FuncLit.Body", Field, 0, ""},
    +		{"FuncLit.Type", Field, 0, ""},
    +		{"FuncType", Type, 0, ""},
    +		{"FuncType.Func", Field, 0, ""},
    +		{"FuncType.Params", Field, 0, ""},
    +		{"FuncType.Results", Field, 0, ""},
    +		{"FuncType.TypeParams", Field, 18, ""},
    +		{"GenDecl", Type, 0, ""},
    +		{"GenDecl.Doc", Field, 0, ""},
    +		{"GenDecl.Lparen", Field, 0, ""},
    +		{"GenDecl.Rparen", Field, 0, ""},
    +		{"GenDecl.Specs", Field, 0, ""},
    +		{"GenDecl.Tok", Field, 0, ""},
    +		{"GenDecl.TokPos", Field, 0, ""},
    +		{"GoStmt", Type, 0, ""},
    +		{"GoStmt.Call", Field, 0, ""},
    +		{"GoStmt.Go", Field, 0, ""},
    +		{"Ident", Type, 0, ""},
    +		{"Ident.Name", Field, 0, ""},
    +		{"Ident.NamePos", Field, 0, ""},
    +		{"Ident.Obj", Field, 0, ""},
    +		{"IfStmt", Type, 0, ""},
    +		{"IfStmt.Body", Field, 0, ""},
    +		{"IfStmt.Cond", Field, 0, ""},
    +		{"IfStmt.Else", Field, 0, ""},
    +		{"IfStmt.If", Field, 0, ""},
    +		{"IfStmt.Init", Field, 0, ""},
    +		{"ImportSpec", Type, 0, ""},
    +		{"ImportSpec.Comment", Field, 0, ""},
    +		{"ImportSpec.Doc", Field, 0, ""},
    +		{"ImportSpec.EndPos", Field, 0, ""},
    +		{"ImportSpec.Name", Field, 0, ""},
    +		{"ImportSpec.Path", Field, 0, ""},
    +		{"Importer", Type, 0, ""},
    +		{"IncDecStmt", Type, 0, ""},
    +		{"IncDecStmt.Tok", Field, 0, ""},
    +		{"IncDecStmt.TokPos", Field, 0, ""},
    +		{"IncDecStmt.X", Field, 0, ""},
    +		{"IndexExpr", Type, 0, ""},
    +		{"IndexExpr.Index", Field, 0, ""},
    +		{"IndexExpr.Lbrack", Field, 0, ""},
    +		{"IndexExpr.Rbrack", Field, 0, ""},
    +		{"IndexExpr.X", Field, 0, ""},
    +		{"IndexListExpr", Type, 18, ""},
    +		{"IndexListExpr.Indices", Field, 18, ""},
    +		{"IndexListExpr.Lbrack", Field, 18, ""},
    +		{"IndexListExpr.Rbrack", Field, 18, ""},
    +		{"IndexListExpr.X", Field, 18, ""},
    +		{"Inspect", Func, 0, "func(node Node, f func(Node) bool)"},
    +		{"InterfaceType", Type, 0, ""},
    +		{"InterfaceType.Incomplete", Field, 0, ""},
    +		{"InterfaceType.Interface", Field, 0, ""},
    +		{"InterfaceType.Methods", Field, 0, ""},
    +		{"IsExported", Func, 0, "func(name string) bool"},
    +		{"IsGenerated", Func, 21, "func(file *File) bool"},
    +		{"KeyValueExpr", Type, 0, ""},
    +		{"KeyValueExpr.Colon", Field, 0, ""},
    +		{"KeyValueExpr.Key", Field, 0, ""},
    +		{"KeyValueExpr.Value", Field, 0, ""},
    +		{"LabeledStmt", Type, 0, ""},
    +		{"LabeledStmt.Colon", Field, 0, ""},
    +		{"LabeledStmt.Label", Field, 0, ""},
    +		{"LabeledStmt.Stmt", Field, 0, ""},
    +		{"Lbl", Const, 0, ""},
    +		{"MapType", Type, 0, ""},
    +		{"MapType.Key", Field, 0, ""},
    +		{"MapType.Map", Field, 0, ""},
    +		{"MapType.Value", Field, 0, ""},
    +		{"MergeMode", Type, 0, ""},
    +		{"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"},
    +		{"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"},
    +		{"NewIdent", Func, 0, "func(name string) *Ident"},
    +		{"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"},
    +		{"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"},
    +		{"NewScope", Func, 0, "func(outer *Scope) *Scope"},
    +		{"Node", Type, 0, ""},
    +		{"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"},
    +		{"ObjKind", Type, 0, ""},
    +		{"Object", Type, 0, ""},
    +		{"Object.Data", Field, 0, ""},
    +		{"Object.Decl", Field, 0, ""},
    +		{"Object.Kind", Field, 0, ""},
    +		{"Object.Name", Field, 0, ""},
    +		{"Object.Type", Field, 0, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.Files", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.Scope", Field, 0, ""},
    +		{"PackageExports", Func, 0, "func(pkg *Package) bool"},
    +		{"ParenExpr", Type, 0, ""},
    +		{"ParenExpr.Lparen", Field, 0, ""},
    +		{"ParenExpr.Rparen", Field, 0, ""},
    +		{"ParenExpr.X", Field, 0, ""},
    +		{"Pkg", Const, 0, ""},
    +		{"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
    +		{"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
    +		{"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
    +		{"RECV", Const, 0, ""},
    +		{"RangeStmt", Type, 0, ""},
    +		{"RangeStmt.Body", Field, 0, ""},
    +		{"RangeStmt.For", Field, 0, ""},
    +		{"RangeStmt.Key", Field, 0, ""},
    +		{"RangeStmt.Range", Field, 20, ""},
    +		{"RangeStmt.Tok", Field, 0, ""},
    +		{"RangeStmt.TokPos", Field, 0, ""},
    +		{"RangeStmt.Value", Field, 0, ""},
    +		{"RangeStmt.X", Field, 0, ""},
    +		{"ReturnStmt", Type, 0, ""},
    +		{"ReturnStmt.Results", Field, 0, ""},
    +		{"ReturnStmt.Return", Field, 0, ""},
    +		{"SEND", Const, 0, ""},
    +		{"Scope", Type, 0, ""},
    +		{"Scope.Objects", Field, 0, ""},
    +		{"Scope.Outer", Field, 0, ""},
    +		{"SelectStmt", Type, 0, ""},
    +		{"SelectStmt.Body", Field, 0, ""},
    +		{"SelectStmt.Select", Field, 0, ""},
    +		{"SelectorExpr", Type, 0, ""},
    +		{"SelectorExpr.Sel", Field, 0, ""},
    +		{"SelectorExpr.X", Field, 0, ""},
    +		{"SendStmt", Type, 0, ""},
    +		{"SendStmt.Arrow", Field, 0, ""},
    +		{"SendStmt.Chan", Field, 0, ""},
    +		{"SendStmt.Value", Field, 0, ""},
    +		{"SliceExpr", Type, 0, ""},
    +		{"SliceExpr.High", Field, 0, ""},
    +		{"SliceExpr.Lbrack", Field, 0, ""},
    +		{"SliceExpr.Low", Field, 0, ""},
    +		{"SliceExpr.Max", Field, 2, ""},
    +		{"SliceExpr.Rbrack", Field, 0, ""},
    +		{"SliceExpr.Slice3", Field, 2, ""},
    +		{"SliceExpr.X", Field, 0, ""},
    +		{"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
    +		{"Spec", Type, 0, ""},
    +		{"StarExpr", Type, 0, ""},
    +		{"StarExpr.Star", Field, 0, ""},
    +		{"StarExpr.X", Field, 0, ""},
    +		{"Stmt", Type, 0, ""},
    +		{"StructType", Type, 0, ""},
    +		{"StructType.Fields", Field, 0, ""},
    +		{"StructType.Incomplete", Field, 0, ""},
    +		{"StructType.Struct", Field, 0, ""},
    +		{"SwitchStmt", Type, 0, ""},
    +		{"SwitchStmt.Body", Field, 0, ""},
    +		{"SwitchStmt.Init", Field, 0, ""},
    +		{"SwitchStmt.Switch", Field, 0, ""},
    +		{"SwitchStmt.Tag", Field, 0, ""},
    +		{"Typ", Const, 0, ""},
    +		{"TypeAssertExpr", Type, 0, ""},
    +		{"TypeAssertExpr.Lparen", Field, 2, ""},
    +		{"TypeAssertExpr.Rparen", Field, 2, ""},
    +		{"TypeAssertExpr.Type", Field, 0, ""},
    +		{"TypeAssertExpr.X", Field, 0, ""},
    +		{"TypeSpec", Type, 0, ""},
    +		{"TypeSpec.Assign", Field, 9, ""},
    +		{"TypeSpec.Comment", Field, 0, ""},
    +		{"TypeSpec.Doc", Field, 0, ""},
    +		{"TypeSpec.Name", Field, 0, ""},
    +		{"TypeSpec.Type", Field, 0, ""},
    +		{"TypeSpec.TypeParams", Field, 18, ""},
    +		{"TypeSwitchStmt", Type, 0, ""},
    +		{"TypeSwitchStmt.Assign", Field, 0, ""},
    +		{"TypeSwitchStmt.Body", Field, 0, ""},
    +		{"TypeSwitchStmt.Init", Field, 0, ""},
    +		{"TypeSwitchStmt.Switch", Field, 0, ""},
    +		{"UnaryExpr", Type, 0, ""},
    +		{"UnaryExpr.Op", Field, 0, ""},
    +		{"UnaryExpr.OpPos", Field, 0, ""},
    +		{"UnaryExpr.X", Field, 0, ""},
    +		{"Unparen", Func, 22, "func(e Expr) Expr"},
    +		{"ValueSpec", Type, 0, ""},
    +		{"ValueSpec.Comment", Field, 0, ""},
    +		{"ValueSpec.Doc", Field, 0, ""},
    +		{"ValueSpec.Names", Field, 0, ""},
    +		{"ValueSpec.Type", Field, 0, ""},
    +		{"ValueSpec.Values", Field, 0, ""},
    +		{"Var", Const, 0, ""},
    +		{"Visitor", Type, 0, ""},
    +		{"Walk", Func, 0, "func(v Visitor, node Node)"},
     	},
     	"go/build": {
    -		{"(*Context).Import", Method, 0},
    -		{"(*Context).ImportDir", Method, 0},
    -		{"(*Context).MatchFile", Method, 2},
    -		{"(*Context).SrcDirs", Method, 0},
    -		{"(*MultiplePackageError).Error", Method, 4},
    -		{"(*NoGoError).Error", Method, 0},
    -		{"(*Package).IsCommand", Method, 0},
    -		{"AllowBinary", Const, 0},
    -		{"ArchChar", Func, 0},
    -		{"Context", Type, 0},
    -		{"Context.BuildTags", Field, 0},
    -		{"Context.CgoEnabled", Field, 0},
    -		{"Context.Compiler", Field, 0},
    -		{"Context.Dir", Field, 14},
    -		{"Context.GOARCH", Field, 0},
    -		{"Context.GOOS", Field, 0},
    -		{"Context.GOPATH", Field, 0},
    -		{"Context.GOROOT", Field, 0},
    -		{"Context.HasSubdir", Field, 0},
    -		{"Context.InstallSuffix", Field, 1},
    -		{"Context.IsAbsPath", Field, 0},
    -		{"Context.IsDir", Field, 0},
    -		{"Context.JoinPath", Field, 0},
    -		{"Context.OpenFile", Field, 0},
    -		{"Context.ReadDir", Field, 0},
    -		{"Context.ReleaseTags", Field, 1},
    -		{"Context.SplitPathList", Field, 0},
    -		{"Context.ToolTags", Field, 17},
    -		{"Context.UseAllFiles", Field, 0},
    -		{"Default", Var, 0},
    -		{"Directive", Type, 21},
    -		{"Directive.Pos", Field, 21},
    -		{"Directive.Text", Field, 21},
    -		{"FindOnly", Const, 0},
    -		{"IgnoreVendor", Const, 6},
    -		{"Import", Func, 0},
    -		{"ImportComment", Const, 4},
    -		{"ImportDir", Func, 0},
    -		{"ImportMode", Type, 0},
    -		{"IsLocalImport", Func, 0},
    -		{"MultiplePackageError", Type, 4},
    -		{"MultiplePackageError.Dir", Field, 4},
    -		{"MultiplePackageError.Files", Field, 4},
    -		{"MultiplePackageError.Packages", Field, 4},
    -		{"NoGoError", Type, 0},
    -		{"NoGoError.Dir", Field, 0},
    -		{"Package", Type, 0},
    -		{"Package.AllTags", Field, 2},
    -		{"Package.BinDir", Field, 0},
    -		{"Package.BinaryOnly", Field, 7},
    -		{"Package.CFiles", Field, 0},
    -		{"Package.CXXFiles", Field, 2},
    -		{"Package.CgoCFLAGS", Field, 0},
    -		{"Package.CgoCPPFLAGS", Field, 2},
    -		{"Package.CgoCXXFLAGS", Field, 2},
    -		{"Package.CgoFFLAGS", Field, 7},
    -		{"Package.CgoFiles", Field, 0},
    -		{"Package.CgoLDFLAGS", Field, 0},
    -		{"Package.CgoPkgConfig", Field, 0},
    -		{"Package.ConflictDir", Field, 2},
    -		{"Package.Dir", Field, 0},
    -		{"Package.Directives", Field, 21},
    -		{"Package.Doc", Field, 0},
    -		{"Package.EmbedPatternPos", Field, 16},
    -		{"Package.EmbedPatterns", Field, 16},
    -		{"Package.FFiles", Field, 7},
    -		{"Package.GoFiles", Field, 0},
    -		{"Package.Goroot", Field, 0},
    -		{"Package.HFiles", Field, 0},
    -		{"Package.IgnoredGoFiles", Field, 1},
    -		{"Package.IgnoredOtherFiles", Field, 16},
    -		{"Package.ImportComment", Field, 4},
    -		{"Package.ImportPath", Field, 0},
    -		{"Package.ImportPos", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.InvalidGoFiles", Field, 6},
    -		{"Package.MFiles", Field, 3},
    -		{"Package.Name", Field, 0},
    -		{"Package.PkgObj", Field, 0},
    -		{"Package.PkgRoot", Field, 0},
    -		{"Package.PkgTargetRoot", Field, 5},
    -		{"Package.Root", Field, 0},
    -		{"Package.SFiles", Field, 0},
    -		{"Package.SrcRoot", Field, 0},
    -		{"Package.SwigCXXFiles", Field, 1},
    -		{"Package.SwigFiles", Field, 1},
    -		{"Package.SysoFiles", Field, 0},
    -		{"Package.TestDirectives", Field, 21},
    -		{"Package.TestEmbedPatternPos", Field, 16},
    -		{"Package.TestEmbedPatterns", Field, 16},
    -		{"Package.TestGoFiles", Field, 0},
    -		{"Package.TestImportPos", Field, 0},
    -		{"Package.TestImports", Field, 0},
    -		{"Package.XTestDirectives", Field, 21},
    -		{"Package.XTestEmbedPatternPos", Field, 16},
    -		{"Package.XTestEmbedPatterns", Field, 16},
    -		{"Package.XTestGoFiles", Field, 0},
    -		{"Package.XTestImportPos", Field, 0},
    -		{"Package.XTestImports", Field, 0},
    -		{"ToolDir", Var, 0},
    +		{"(*Context).Import", Method, 0, ""},
    +		{"(*Context).ImportDir", Method, 0, ""},
    +		{"(*Context).MatchFile", Method, 2, ""},
    +		{"(*Context).SrcDirs", Method, 0, ""},
    +		{"(*MultiplePackageError).Error", Method, 4, ""},
    +		{"(*NoGoError).Error", Method, 0, ""},
    +		{"(*Package).IsCommand", Method, 0, ""},
    +		{"AllowBinary", Const, 0, ""},
    +		{"ArchChar", Func, 0, "func(goarch string) (string, error)"},
    +		{"Context", Type, 0, ""},
    +		{"Context.BuildTags", Field, 0, ""},
    +		{"Context.CgoEnabled", Field, 0, ""},
    +		{"Context.Compiler", Field, 0, ""},
    +		{"Context.Dir", Field, 14, ""},
    +		{"Context.GOARCH", Field, 0, ""},
    +		{"Context.GOOS", Field, 0, ""},
    +		{"Context.GOPATH", Field, 0, ""},
    +		{"Context.GOROOT", Field, 0, ""},
    +		{"Context.HasSubdir", Field, 0, ""},
    +		{"Context.InstallSuffix", Field, 1, ""},
    +		{"Context.IsAbsPath", Field, 0, ""},
    +		{"Context.IsDir", Field, 0, ""},
    +		{"Context.JoinPath", Field, 0, ""},
    +		{"Context.OpenFile", Field, 0, ""},
    +		{"Context.ReadDir", Field, 0, ""},
    +		{"Context.ReleaseTags", Field, 1, ""},
    +		{"Context.SplitPathList", Field, 0, ""},
    +		{"Context.ToolTags", Field, 17, ""},
    +		{"Context.UseAllFiles", Field, 0, ""},
    +		{"Default", Var, 0, ""},
    +		{"Directive", Type, 21, ""},
    +		{"Directive.Pos", Field, 21, ""},
    +		{"Directive.Text", Field, 21, ""},
    +		{"FindOnly", Const, 0, ""},
    +		{"IgnoreVendor", Const, 6, ""},
    +		{"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"},
    +		{"ImportComment", Const, 4, ""},
    +		{"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"},
    +		{"ImportMode", Type, 0, ""},
    +		{"IsLocalImport", Func, 0, "func(path string) bool"},
    +		{"MultiplePackageError", Type, 4, ""},
    +		{"MultiplePackageError.Dir", Field, 4, ""},
    +		{"MultiplePackageError.Files", Field, 4, ""},
    +		{"MultiplePackageError.Packages", Field, 4, ""},
    +		{"NoGoError", Type, 0, ""},
    +		{"NoGoError.Dir", Field, 0, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.AllTags", Field, 2, ""},
    +		{"Package.BinDir", Field, 0, ""},
    +		{"Package.BinaryOnly", Field, 7, ""},
    +		{"Package.CFiles", Field, 0, ""},
    +		{"Package.CXXFiles", Field, 2, ""},
    +		{"Package.CgoCFLAGS", Field, 0, ""},
    +		{"Package.CgoCPPFLAGS", Field, 2, ""},
    +		{"Package.CgoCXXFLAGS", Field, 2, ""},
    +		{"Package.CgoFFLAGS", Field, 7, ""},
    +		{"Package.CgoFiles", Field, 0, ""},
    +		{"Package.CgoLDFLAGS", Field, 0, ""},
    +		{"Package.CgoPkgConfig", Field, 0, ""},
    +		{"Package.ConflictDir", Field, 2, ""},
    +		{"Package.Dir", Field, 0, ""},
    +		{"Package.Directives", Field, 21, ""},
    +		{"Package.Doc", Field, 0, ""},
    +		{"Package.EmbedPatternPos", Field, 16, ""},
    +		{"Package.EmbedPatterns", Field, 16, ""},
    +		{"Package.FFiles", Field, 7, ""},
    +		{"Package.GoFiles", Field, 0, ""},
    +		{"Package.Goroot", Field, 0, ""},
    +		{"Package.HFiles", Field, 0, ""},
    +		{"Package.IgnoredGoFiles", Field, 1, ""},
    +		{"Package.IgnoredOtherFiles", Field, 16, ""},
    +		{"Package.ImportComment", Field, 4, ""},
    +		{"Package.ImportPath", Field, 0, ""},
    +		{"Package.ImportPos", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.InvalidGoFiles", Field, 6, ""},
    +		{"Package.MFiles", Field, 3, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.PkgObj", Field, 0, ""},
    +		{"Package.PkgRoot", Field, 0, ""},
    +		{"Package.PkgTargetRoot", Field, 5, ""},
    +		{"Package.Root", Field, 0, ""},
    +		{"Package.SFiles", Field, 0, ""},
    +		{"Package.SrcRoot", Field, 0, ""},
    +		{"Package.SwigCXXFiles", Field, 1, ""},
    +		{"Package.SwigFiles", Field, 1, ""},
    +		{"Package.SysoFiles", Field, 0, ""},
    +		{"Package.TestDirectives", Field, 21, ""},
    +		{"Package.TestEmbedPatternPos", Field, 16, ""},
    +		{"Package.TestEmbedPatterns", Field, 16, ""},
    +		{"Package.TestGoFiles", Field, 0, ""},
    +		{"Package.TestImportPos", Field, 0, ""},
    +		{"Package.TestImports", Field, 0, ""},
    +		{"Package.XTestDirectives", Field, 21, ""},
    +		{"Package.XTestEmbedPatternPos", Field, 16, ""},
    +		{"Package.XTestEmbedPatterns", Field, 16, ""},
    +		{"Package.XTestGoFiles", Field, 0, ""},
    +		{"Package.XTestImportPos", Field, 0, ""},
    +		{"Package.XTestImports", Field, 0, ""},
    +		{"ToolDir", Var, 0, ""},
     	},
     	"go/build/constraint": {
    -		{"(*AndExpr).Eval", Method, 16},
    -		{"(*AndExpr).String", Method, 16},
    -		{"(*NotExpr).Eval", Method, 16},
    -		{"(*NotExpr).String", Method, 16},
    -		{"(*OrExpr).Eval", Method, 16},
    -		{"(*OrExpr).String", Method, 16},
    -		{"(*SyntaxError).Error", Method, 16},
    -		{"(*TagExpr).Eval", Method, 16},
    -		{"(*TagExpr).String", Method, 16},
    -		{"AndExpr", Type, 16},
    -		{"AndExpr.X", Field, 16},
    -		{"AndExpr.Y", Field, 16},
    -		{"Expr", Type, 16},
    -		{"GoVersion", Func, 21},
    -		{"IsGoBuild", Func, 16},
    -		{"IsPlusBuild", Func, 16},
    -		{"NotExpr", Type, 16},
    -		{"NotExpr.X", Field, 16},
    -		{"OrExpr", Type, 16},
    -		{"OrExpr.X", Field, 16},
    -		{"OrExpr.Y", Field, 16},
    -		{"Parse", Func, 16},
    -		{"PlusBuildLines", Func, 16},
    -		{"SyntaxError", Type, 16},
    -		{"SyntaxError.Err", Field, 16},
    -		{"SyntaxError.Offset", Field, 16},
    -		{"TagExpr", Type, 16},
    -		{"TagExpr.Tag", Field, 16},
    +		{"(*AndExpr).Eval", Method, 16, ""},
    +		{"(*AndExpr).String", Method, 16, ""},
    +		{"(*NotExpr).Eval", Method, 16, ""},
    +		{"(*NotExpr).String", Method, 16, ""},
    +		{"(*OrExpr).Eval", Method, 16, ""},
    +		{"(*OrExpr).String", Method, 16, ""},
    +		{"(*SyntaxError).Error", Method, 16, ""},
    +		{"(*TagExpr).Eval", Method, 16, ""},
    +		{"(*TagExpr).String", Method, 16, ""},
    +		{"AndExpr", Type, 16, ""},
    +		{"AndExpr.X", Field, 16, ""},
    +		{"AndExpr.Y", Field, 16, ""},
    +		{"Expr", Type, 16, ""},
    +		{"GoVersion", Func, 21, "func(x Expr) string"},
    +		{"IsGoBuild", Func, 16, "func(line string) bool"},
    +		{"IsPlusBuild", Func, 16, "func(line string) bool"},
    +		{"NotExpr", Type, 16, ""},
    +		{"NotExpr.X", Field, 16, ""},
    +		{"OrExpr", Type, 16, ""},
    +		{"OrExpr.X", Field, 16, ""},
    +		{"OrExpr.Y", Field, 16, ""},
    +		{"Parse", Func, 16, "func(line string) (Expr, error)"},
    +		{"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"},
    +		{"SyntaxError", Type, 16, ""},
    +		{"SyntaxError.Err", Field, 16, ""},
    +		{"SyntaxError.Offset", Field, 16, ""},
    +		{"TagExpr", Type, 16, ""},
    +		{"TagExpr.Tag", Field, 16, ""},
     	},
     	"go/constant": {
    -		{"(Kind).String", Method, 18},
    -		{"BinaryOp", Func, 5},
    -		{"BitLen", Func, 5},
    -		{"Bool", Const, 5},
    -		{"BoolVal", Func, 5},
    -		{"Bytes", Func, 5},
    -		{"Compare", Func, 5},
    -		{"Complex", Const, 5},
    -		{"Denom", Func, 5},
    -		{"Float", Const, 5},
    -		{"Float32Val", Func, 5},
    -		{"Float64Val", Func, 5},
    -		{"Imag", Func, 5},
    -		{"Int", Const, 5},
    -		{"Int64Val", Func, 5},
    -		{"Kind", Type, 5},
    -		{"Make", Func, 13},
    -		{"MakeBool", Func, 5},
    -		{"MakeFloat64", Func, 5},
    -		{"MakeFromBytes", Func, 5},
    -		{"MakeFromLiteral", Func, 5},
    -		{"MakeImag", Func, 5},
    -		{"MakeInt64", Func, 5},
    -		{"MakeString", Func, 5},
    -		{"MakeUint64", Func, 5},
    -		{"MakeUnknown", Func, 5},
    -		{"Num", Func, 5},
    -		{"Real", Func, 5},
    -		{"Shift", Func, 5},
    -		{"Sign", Func, 5},
    -		{"String", Const, 5},
    -		{"StringVal", Func, 5},
    -		{"ToComplex", Func, 6},
    -		{"ToFloat", Func, 6},
    -		{"ToInt", Func, 6},
    -		{"Uint64Val", Func, 5},
    -		{"UnaryOp", Func, 5},
    -		{"Unknown", Const, 5},
    -		{"Val", Func, 13},
    -		{"Value", Type, 5},
    +		{"(Kind).String", Method, 18, ""},
    +		{"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
    +		{"BitLen", Func, 5, "func(x Value) int"},
    +		{"Bool", Const, 5, ""},
    +		{"BoolVal", Func, 5, "func(x Value) bool"},
    +		{"Bytes", Func, 5, "func(x Value) []byte"},
    +		{"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"},
    +		{"Complex", Const, 5, ""},
    +		{"Denom", Func, 5, "func(x Value) Value"},
    +		{"Float", Const, 5, ""},
    +		{"Float32Val", Func, 5, "func(x Value) (float32, bool)"},
    +		{"Float64Val", Func, 5, "func(x Value) (float64, bool)"},
    +		{"Imag", Func, 5, "func(x Value) Value"},
    +		{"Int", Const, 5, ""},
    +		{"Int64Val", Func, 5, "func(x Value) (int64, bool)"},
    +		{"Kind", Type, 5, ""},
    +		{"Make", Func, 13, "func(x any) Value"},
    +		{"MakeBool", Func, 5, "func(b bool) Value"},
    +		{"MakeFloat64", Func, 5, "func(x float64) Value"},
    +		{"MakeFromBytes", Func, 5, "func(bytes []byte) Value"},
    +		{"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"},
    +		{"MakeImag", Func, 5, "func(x Value) Value"},
    +		{"MakeInt64", Func, 5, "func(x int64) Value"},
    +		{"MakeString", Func, 5, "func(s string) Value"},
    +		{"MakeUint64", Func, 5, "func(x uint64) Value"},
    +		{"MakeUnknown", Func, 5, "func() Value"},
    +		{"Num", Func, 5, "func(x Value) Value"},
    +		{"Real", Func, 5, "func(x Value) Value"},
    +		{"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"},
    +		{"Sign", Func, 5, "func(x Value) int"},
    +		{"String", Const, 5, ""},
    +		{"StringVal", Func, 5, "func(x Value) string"},
    +		{"ToComplex", Func, 6, "func(x Value) Value"},
    +		{"ToFloat", Func, 6, "func(x Value) Value"},
    +		{"ToInt", Func, 6, "func(x Value) Value"},
    +		{"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"},
    +		{"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
    +		{"Unknown", Const, 5, ""},
    +		{"Val", Func, 13, "func(x Value) any"},
    +		{"Value", Type, 5, ""},
     	},
     	"go/doc": {
    -		{"(*Package).Filter", Method, 0},
    -		{"(*Package).HTML", Method, 19},
    -		{"(*Package).Markdown", Method, 19},
    -		{"(*Package).Parser", Method, 19},
    -		{"(*Package).Printer", Method, 19},
    -		{"(*Package).Synopsis", Method, 19},
    -		{"(*Package).Text", Method, 19},
    -		{"AllDecls", Const, 0},
    -		{"AllMethods", Const, 0},
    -		{"Example", Type, 0},
    -		{"Example.Code", Field, 0},
    -		{"Example.Comments", Field, 0},
    -		{"Example.Doc", Field, 0},
    -		{"Example.EmptyOutput", Field, 1},
    -		{"Example.Name", Field, 0},
    -		{"Example.Order", Field, 1},
    -		{"Example.Output", Field, 0},
    -		{"Example.Play", Field, 1},
    -		{"Example.Suffix", Field, 14},
    -		{"Example.Unordered", Field, 7},
    -		{"Examples", Func, 0},
    -		{"Filter", Type, 0},
    -		{"Func", Type, 0},
    -		{"Func.Decl", Field, 0},
    -		{"Func.Doc", Field, 0},
    -		{"Func.Examples", Field, 14},
    -		{"Func.Level", Field, 0},
    -		{"Func.Name", Field, 0},
    -		{"Func.Orig", Field, 0},
    -		{"Func.Recv", Field, 0},
    -		{"IllegalPrefixes", Var, 1},
    -		{"IsPredeclared", Func, 8},
    -		{"Mode", Type, 0},
    -		{"New", Func, 0},
    -		{"NewFromFiles", Func, 14},
    -		{"Note", Type, 1},
    -		{"Note.Body", Field, 1},
    -		{"Note.End", Field, 1},
    -		{"Note.Pos", Field, 1},
    -		{"Note.UID", Field, 1},
    -		{"Package", Type, 0},
    -		{"Package.Bugs", Field, 0},
    -		{"Package.Consts", Field, 0},
    -		{"Package.Doc", Field, 0},
    -		{"Package.Examples", Field, 14},
    -		{"Package.Filenames", Field, 0},
    -		{"Package.Funcs", Field, 0},
    -		{"Package.ImportPath", Field, 0},
    -		{"Package.Imports", Field, 0},
    -		{"Package.Name", Field, 0},
    -		{"Package.Notes", Field, 1},
    -		{"Package.Types", Field, 0},
    -		{"Package.Vars", Field, 0},
    -		{"PreserveAST", Const, 12},
    -		{"Synopsis", Func, 0},
    -		{"ToHTML", Func, 0},
    -		{"ToText", Func, 0},
    -		{"Type", Type, 0},
    -		{"Type.Consts", Field, 0},
    -		{"Type.Decl", Field, 0},
    -		{"Type.Doc", Field, 0},
    -		{"Type.Examples", Field, 14},
    -		{"Type.Funcs", Field, 0},
    -		{"Type.Methods", Field, 0},
    -		{"Type.Name", Field, 0},
    -		{"Type.Vars", Field, 0},
    -		{"Value", Type, 0},
    -		{"Value.Decl", Field, 0},
    -		{"Value.Doc", Field, 0},
    -		{"Value.Names", Field, 0},
    +		{"(*Package).Filter", Method, 0, ""},
    +		{"(*Package).HTML", Method, 19, ""},
    +		{"(*Package).Markdown", Method, 19, ""},
    +		{"(*Package).Parser", Method, 19, ""},
    +		{"(*Package).Printer", Method, 19, ""},
    +		{"(*Package).Synopsis", Method, 19, ""},
    +		{"(*Package).Text", Method, 19, ""},
    +		{"AllDecls", Const, 0, ""},
    +		{"AllMethods", Const, 0, ""},
    +		{"Example", Type, 0, ""},
    +		{"Example.Code", Field, 0, ""},
    +		{"Example.Comments", Field, 0, ""},
    +		{"Example.Doc", Field, 0, ""},
    +		{"Example.EmptyOutput", Field, 1, ""},
    +		{"Example.Name", Field, 0, ""},
    +		{"Example.Order", Field, 1, ""},
    +		{"Example.Output", Field, 0, ""},
    +		{"Example.Play", Field, 1, ""},
    +		{"Example.Suffix", Field, 14, ""},
    +		{"Example.Unordered", Field, 7, ""},
    +		{"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"},
    +		{"Filter", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"Func.Decl", Field, 0, ""},
    +		{"Func.Doc", Field, 0, ""},
    +		{"Func.Examples", Field, 14, ""},
    +		{"Func.Level", Field, 0, ""},
    +		{"Func.Name", Field, 0, ""},
    +		{"Func.Orig", Field, 0, ""},
    +		{"Func.Recv", Field, 0, ""},
    +		{"IllegalPrefixes", Var, 1, ""},
    +		{"IsPredeclared", Func, 8, "func(s string) bool"},
    +		{"Mode", Type, 0, ""},
    +		{"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"},
    +		{"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"},
    +		{"Note", Type, 1, ""},
    +		{"Note.Body", Field, 1, ""},
    +		{"Note.End", Field, 1, ""},
    +		{"Note.Pos", Field, 1, ""},
    +		{"Note.UID", Field, 1, ""},
    +		{"Package", Type, 0, ""},
    +		{"Package.Bugs", Field, 0, ""},
    +		{"Package.Consts", Field, 0, ""},
    +		{"Package.Doc", Field, 0, ""},
    +		{"Package.Examples", Field, 14, ""},
    +		{"Package.Filenames", Field, 0, ""},
    +		{"Package.Funcs", Field, 0, ""},
    +		{"Package.ImportPath", Field, 0, ""},
    +		{"Package.Imports", Field, 0, ""},
    +		{"Package.Name", Field, 0, ""},
    +		{"Package.Notes", Field, 1, ""},
    +		{"Package.Types", Field, 0, ""},
    +		{"Package.Vars", Field, 0, ""},
    +		{"PreserveAST", Const, 12, ""},
    +		{"Synopsis", Func, 0, "func(text string) string"},
    +		{"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"},
    +		{"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"},
    +		{"Type", Type, 0, ""},
    +		{"Type.Consts", Field, 0, ""},
    +		{"Type.Decl", Field, 0, ""},
    +		{"Type.Doc", Field, 0, ""},
    +		{"Type.Examples", Field, 14, ""},
    +		{"Type.Funcs", Field, 0, ""},
    +		{"Type.Methods", Field, 0, ""},
    +		{"Type.Name", Field, 0, ""},
    +		{"Type.Vars", Field, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"Value.Decl", Field, 0, ""},
    +		{"Value.Doc", Field, 0, ""},
    +		{"Value.Names", Field, 0, ""},
     	},
     	"go/doc/comment": {
    -		{"(*DocLink).DefaultURL", Method, 19},
    -		{"(*Heading).DefaultID", Method, 19},
    -		{"(*List).BlankBefore", Method, 19},
    -		{"(*List).BlankBetween", Method, 19},
    -		{"(*Parser).Parse", Method, 19},
    -		{"(*Printer).Comment", Method, 19},
    -		{"(*Printer).HTML", Method, 19},
    -		{"(*Printer).Markdown", Method, 19},
    -		{"(*Printer).Text", Method, 19},
    -		{"Block", Type, 19},
    -		{"Code", Type, 19},
    -		{"Code.Text", Field, 19},
    -		{"DefaultLookupPackage", Func, 19},
    -		{"Doc", Type, 19},
    -		{"Doc.Content", Field, 19},
    -		{"Doc.Links", Field, 19},
    -		{"DocLink", Type, 19},
    -		{"DocLink.ImportPath", Field, 19},
    -		{"DocLink.Name", Field, 19},
    -		{"DocLink.Recv", Field, 19},
    -		{"DocLink.Text", Field, 19},
    -		{"Heading", Type, 19},
    -		{"Heading.Text", Field, 19},
    -		{"Italic", Type, 19},
    -		{"Link", Type, 19},
    -		{"Link.Auto", Field, 19},
    -		{"Link.Text", Field, 19},
    -		{"Link.URL", Field, 19},
    -		{"LinkDef", Type, 19},
    -		{"LinkDef.Text", Field, 19},
    -		{"LinkDef.URL", Field, 19},
    -		{"LinkDef.Used", Field, 19},
    -		{"List", Type, 19},
    -		{"List.ForceBlankBefore", Field, 19},
    -		{"List.ForceBlankBetween", Field, 19},
    -		{"List.Items", Field, 19},
    -		{"ListItem", Type, 19},
    -		{"ListItem.Content", Field, 19},
    -		{"ListItem.Number", Field, 19},
    -		{"Paragraph", Type, 19},
    -		{"Paragraph.Text", Field, 19},
    -		{"Parser", Type, 19},
    -		{"Parser.LookupPackage", Field, 19},
    -		{"Parser.LookupSym", Field, 19},
    -		{"Parser.Words", Field, 19},
    -		{"Plain", Type, 19},
    -		{"Printer", Type, 19},
    -		{"Printer.DocLinkBaseURL", Field, 19},
    -		{"Printer.DocLinkURL", Field, 19},
    -		{"Printer.HeadingID", Field, 19},
    -		{"Printer.HeadingLevel", Field, 19},
    -		{"Printer.TextCodePrefix", Field, 19},
    -		{"Printer.TextPrefix", Field, 19},
    -		{"Printer.TextWidth", Field, 19},
    -		{"Text", Type, 19},
    +		{"(*DocLink).DefaultURL", Method, 19, ""},
    +		{"(*Heading).DefaultID", Method, 19, ""},
    +		{"(*List).BlankBefore", Method, 19, ""},
    +		{"(*List).BlankBetween", Method, 19, ""},
    +		{"(*Parser).Parse", Method, 19, ""},
    +		{"(*Printer).Comment", Method, 19, ""},
    +		{"(*Printer).HTML", Method, 19, ""},
    +		{"(*Printer).Markdown", Method, 19, ""},
    +		{"(*Printer).Text", Method, 19, ""},
    +		{"Block", Type, 19, ""},
    +		{"Code", Type, 19, ""},
    +		{"Code.Text", Field, 19, ""},
    +		{"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
    +		{"Doc", Type, 19, ""},
    +		{"Doc.Content", Field, 19, ""},
    +		{"Doc.Links", Field, 19, ""},
    +		{"DocLink", Type, 19, ""},
    +		{"DocLink.ImportPath", Field, 19, ""},
    +		{"DocLink.Name", Field, 19, ""},
    +		{"DocLink.Recv", Field, 19, ""},
    +		{"DocLink.Text", Field, 19, ""},
    +		{"Heading", Type, 19, ""},
    +		{"Heading.Text", Field, 19, ""},
    +		{"Italic", Type, 19, ""},
    +		{"Link", Type, 19, ""},
    +		{"Link.Auto", Field, 19, ""},
    +		{"Link.Text", Field, 19, ""},
    +		{"Link.URL", Field, 19, ""},
    +		{"LinkDef", Type, 19, ""},
    +		{"LinkDef.Text", Field, 19, ""},
    +		{"LinkDef.URL", Field, 19, ""},
    +		{"LinkDef.Used", Field, 19, ""},
    +		{"List", Type, 19, ""},
    +		{"List.ForceBlankBefore", Field, 19, ""},
    +		{"List.ForceBlankBetween", Field, 19, ""},
    +		{"List.Items", Field, 19, ""},
    +		{"ListItem", Type, 19, ""},
    +		{"ListItem.Content", Field, 19, ""},
    +		{"ListItem.Number", Field, 19, ""},
    +		{"Paragraph", Type, 19, ""},
    +		{"Paragraph.Text", Field, 19, ""},
    +		{"Parser", Type, 19, ""},
    +		{"Parser.LookupPackage", Field, 19, ""},
    +		{"Parser.LookupSym", Field, 19, ""},
    +		{"Parser.Words", Field, 19, ""},
    +		{"Plain", Type, 19, ""},
    +		{"Printer", Type, 19, ""},
    +		{"Printer.DocLinkBaseURL", Field, 19, ""},
    +		{"Printer.DocLinkURL", Field, 19, ""},
    +		{"Printer.HeadingID", Field, 19, ""},
    +		{"Printer.HeadingLevel", Field, 19, ""},
    +		{"Printer.TextCodePrefix", Field, 19, ""},
    +		{"Printer.TextPrefix", Field, 19, ""},
    +		{"Printer.TextWidth", Field, 19, ""},
    +		{"Text", Type, 19, ""},
     	},
     	"go/format": {
    -		{"Node", Func, 1},
    -		{"Source", Func, 1},
    +		{"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
    +		{"Source", Func, 1, "func(src []byte) ([]byte, error)"},
     	},
     	"go/importer": {
    -		{"Default", Func, 5},
    -		{"For", Func, 5},
    -		{"ForCompiler", Func, 12},
    -		{"Lookup", Type, 5},
    +		{"Default", Func, 5, "func() types.Importer"},
    +		{"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"},
    +		{"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"},
    +		{"Lookup", Type, 5, ""},
     	},
     	"go/parser": {
    -		{"AllErrors", Const, 1},
    -		{"DeclarationErrors", Const, 0},
    -		{"ImportsOnly", Const, 0},
    -		{"Mode", Type, 0},
    -		{"PackageClauseOnly", Const, 0},
    -		{"ParseComments", Const, 0},
    -		{"ParseDir", Func, 0},
    -		{"ParseExpr", Func, 0},
    -		{"ParseExprFrom", Func, 5},
    -		{"ParseFile", Func, 0},
    -		{"SkipObjectResolution", Const, 17},
    -		{"SpuriousErrors", Const, 0},
    -		{"Trace", Const, 0},
    +		{"AllErrors", Const, 1, ""},
    +		{"DeclarationErrors", Const, 0, ""},
    +		{"ImportsOnly", Const, 0, ""},
    +		{"Mode", Type, 0, ""},
    +		{"PackageClauseOnly", Const, 0, ""},
    +		{"ParseComments", Const, 0, ""},
    +		{"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"},
    +		{"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"},
    +		{"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"},
    +		{"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"},
    +		{"SkipObjectResolution", Const, 17, ""},
    +		{"SpuriousErrors", Const, 0, ""},
    +		{"Trace", Const, 0, ""},
     	},
     	"go/printer": {
    -		{"(*Config).Fprint", Method, 0},
    -		{"CommentedNode", Type, 0},
    -		{"CommentedNode.Comments", Field, 0},
    -		{"CommentedNode.Node", Field, 0},
    -		{"Config", Type, 0},
    -		{"Config.Indent", Field, 1},
    -		{"Config.Mode", Field, 0},
    -		{"Config.Tabwidth", Field, 0},
    -		{"Fprint", Func, 0},
    -		{"Mode", Type, 0},
    -		{"RawFormat", Const, 0},
    -		{"SourcePos", Const, 0},
    -		{"TabIndent", Const, 0},
    -		{"UseSpaces", Const, 0},
    +		{"(*Config).Fprint", Method, 0, ""},
    +		{"CommentedNode", Type, 0, ""},
    +		{"CommentedNode.Comments", Field, 0, ""},
    +		{"CommentedNode.Node", Field, 0, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.Indent", Field, 1, ""},
    +		{"Config.Mode", Field, 0, ""},
    +		{"Config.Tabwidth", Field, 0, ""},
    +		{"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"},
    +		{"Mode", Type, 0, ""},
    +		{"RawFormat", Const, 0, ""},
    +		{"SourcePos", Const, 0, ""},
    +		{"TabIndent", Const, 0, ""},
    +		{"UseSpaces", Const, 0, ""},
     	},
     	"go/scanner": {
    -		{"(*ErrorList).Add", Method, 0},
    -		{"(*ErrorList).RemoveMultiples", Method, 0},
    -		{"(*ErrorList).Reset", Method, 0},
    -		{"(*Scanner).Init", Method, 0},
    -		{"(*Scanner).Scan", Method, 0},
    -		{"(Error).Error", Method, 0},
    -		{"(ErrorList).Err", Method, 0},
    -		{"(ErrorList).Error", Method, 0},
    -		{"(ErrorList).Len", Method, 0},
    -		{"(ErrorList).Less", Method, 0},
    -		{"(ErrorList).Sort", Method, 0},
    -		{"(ErrorList).Swap", Method, 0},
    -		{"Error", Type, 0},
    -		{"Error.Msg", Field, 0},
    -		{"Error.Pos", Field, 0},
    -		{"ErrorHandler", Type, 0},
    -		{"ErrorList", Type, 0},
    -		{"Mode", Type, 0},
    -		{"PrintError", Func, 0},
    -		{"ScanComments", Const, 0},
    -		{"Scanner", Type, 0},
    -		{"Scanner.ErrorCount", Field, 0},
    +		{"(*ErrorList).Add", Method, 0, ""},
    +		{"(*ErrorList).RemoveMultiples", Method, 0, ""},
    +		{"(*ErrorList).Reset", Method, 0, ""},
    +		{"(*Scanner).Init", Method, 0, ""},
    +		{"(*Scanner).Scan", Method, 0, ""},
    +		{"(Error).Error", Method, 0, ""},
    +		{"(ErrorList).Err", Method, 0, ""},
    +		{"(ErrorList).Error", Method, 0, ""},
    +		{"(ErrorList).Len", Method, 0, ""},
    +		{"(ErrorList).Less", Method, 0, ""},
    +		{"(ErrorList).Sort", Method, 0, ""},
    +		{"(ErrorList).Swap", Method, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Msg", Field, 0, ""},
    +		{"Error.Pos", Field, 0, ""},
    +		{"ErrorHandler", Type, 0, ""},
    +		{"ErrorList", Type, 0, ""},
    +		{"Mode", Type, 0, ""},
    +		{"PrintError", Func, 0, "func(w io.Writer, err error)"},
    +		{"ScanComments", Const, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Scanner.ErrorCount", Field, 0, ""},
     	},
     	"go/token": {
    -		{"(*File).AddLine", Method, 0},
    -		{"(*File).AddLineColumnInfo", Method, 11},
    -		{"(*File).AddLineInfo", Method, 0},
    -		{"(*File).Base", Method, 0},
    -		{"(*File).Line", Method, 0},
    -		{"(*File).LineCount", Method, 0},
    -		{"(*File).LineStart", Method, 12},
    -		{"(*File).Lines", Method, 21},
    -		{"(*File).MergeLine", Method, 2},
    -		{"(*File).Name", Method, 0},
    -		{"(*File).Offset", Method, 0},
    -		{"(*File).Pos", Method, 0},
    -		{"(*File).Position", Method, 0},
    -		{"(*File).PositionFor", Method, 4},
    -		{"(*File).SetLines", Method, 0},
    -		{"(*File).SetLinesForContent", Method, 0},
    -		{"(*File).Size", Method, 0},
    -		{"(*FileSet).AddFile", Method, 0},
    -		{"(*FileSet).Base", Method, 0},
    -		{"(*FileSet).File", Method, 0},
    -		{"(*FileSet).Iterate", Method, 0},
    -		{"(*FileSet).Position", Method, 0},
    -		{"(*FileSet).PositionFor", Method, 4},
    -		{"(*FileSet).Read", Method, 0},
    -		{"(*FileSet).RemoveFile", Method, 20},
    -		{"(*FileSet).Write", Method, 0},
    -		{"(*Position).IsValid", Method, 0},
    -		{"(Pos).IsValid", Method, 0},
    -		{"(Position).String", Method, 0},
    -		{"(Token).IsKeyword", Method, 0},
    -		{"(Token).IsLiteral", Method, 0},
    -		{"(Token).IsOperator", Method, 0},
    -		{"(Token).Precedence", Method, 0},
    -		{"(Token).String", Method, 0},
    -		{"ADD", Const, 0},
    -		{"ADD_ASSIGN", Const, 0},
    -		{"AND", Const, 0},
    -		{"AND_ASSIGN", Const, 0},
    -		{"AND_NOT", Const, 0},
    -		{"AND_NOT_ASSIGN", Const, 0},
    -		{"ARROW", Const, 0},
    -		{"ASSIGN", Const, 0},
    -		{"BREAK", Const, 0},
    -		{"CASE", Const, 0},
    -		{"CHAN", Const, 0},
    -		{"CHAR", Const, 0},
    -		{"COLON", Const, 0},
    -		{"COMMA", Const, 0},
    -		{"COMMENT", Const, 0},
    -		{"CONST", Const, 0},
    -		{"CONTINUE", Const, 0},
    -		{"DEC", Const, 0},
    -		{"DEFAULT", Const, 0},
    -		{"DEFER", Const, 0},
    -		{"DEFINE", Const, 0},
    -		{"ELLIPSIS", Const, 0},
    -		{"ELSE", Const, 0},
    -		{"EOF", Const, 0},
    -		{"EQL", Const, 0},
    -		{"FALLTHROUGH", Const, 0},
    -		{"FLOAT", Const, 0},
    -		{"FOR", Const, 0},
    -		{"FUNC", Const, 0},
    -		{"File", Type, 0},
    -		{"FileSet", Type, 0},
    -		{"GEQ", Const, 0},
    -		{"GO", Const, 0},
    -		{"GOTO", Const, 0},
    -		{"GTR", Const, 0},
    -		{"HighestPrec", Const, 0},
    -		{"IDENT", Const, 0},
    -		{"IF", Const, 0},
    -		{"ILLEGAL", Const, 0},
    -		{"IMAG", Const, 0},
    -		{"IMPORT", Const, 0},
    -		{"INC", Const, 0},
    -		{"INT", Const, 0},
    -		{"INTERFACE", Const, 0},
    -		{"IsExported", Func, 13},
    -		{"IsIdentifier", Func, 13},
    -		{"IsKeyword", Func, 13},
    -		{"LAND", Const, 0},
    -		{"LBRACE", Const, 0},
    -		{"LBRACK", Const, 0},
    -		{"LEQ", Const, 0},
    -		{"LOR", Const, 0},
    -		{"LPAREN", Const, 0},
    -		{"LSS", Const, 0},
    -		{"Lookup", Func, 0},
    -		{"LowestPrec", Const, 0},
    -		{"MAP", Const, 0},
    -		{"MUL", Const, 0},
    -		{"MUL_ASSIGN", Const, 0},
    -		{"NEQ", Const, 0},
    -		{"NOT", Const, 0},
    -		{"NewFileSet", Func, 0},
    -		{"NoPos", Const, 0},
    -		{"OR", Const, 0},
    -		{"OR_ASSIGN", Const, 0},
    -		{"PACKAGE", Const, 0},
    -		{"PERIOD", Const, 0},
    -		{"Pos", Type, 0},
    -		{"Position", Type, 0},
    -		{"Position.Column", Field, 0},
    -		{"Position.Filename", Field, 0},
    -		{"Position.Line", Field, 0},
    -		{"Position.Offset", Field, 0},
    -		{"QUO", Const, 0},
    -		{"QUO_ASSIGN", Const, 0},
    -		{"RANGE", Const, 0},
    -		{"RBRACE", Const, 0},
    -		{"RBRACK", Const, 0},
    -		{"REM", Const, 0},
    -		{"REM_ASSIGN", Const, 0},
    -		{"RETURN", Const, 0},
    -		{"RPAREN", Const, 0},
    -		{"SELECT", Const, 0},
    -		{"SEMICOLON", Const, 0},
    -		{"SHL", Const, 0},
    -		{"SHL_ASSIGN", Const, 0},
    -		{"SHR", Const, 0},
    -		{"SHR_ASSIGN", Const, 0},
    -		{"STRING", Const, 0},
    -		{"STRUCT", Const, 0},
    -		{"SUB", Const, 0},
    -		{"SUB_ASSIGN", Const, 0},
    -		{"SWITCH", Const, 0},
    -		{"TILDE", Const, 18},
    -		{"TYPE", Const, 0},
    -		{"Token", Type, 0},
    -		{"UnaryPrec", Const, 0},
    -		{"VAR", Const, 0},
    -		{"XOR", Const, 0},
    -		{"XOR_ASSIGN", Const, 0},
    +		{"(*File).AddLine", Method, 0, ""},
    +		{"(*File).AddLineColumnInfo", Method, 11, ""},
    +		{"(*File).AddLineInfo", Method, 0, ""},
    +		{"(*File).Base", Method, 0, ""},
    +		{"(*File).Line", Method, 0, ""},
    +		{"(*File).LineCount", Method, 0, ""},
    +		{"(*File).LineStart", Method, 12, ""},
    +		{"(*File).Lines", Method, 21, ""},
    +		{"(*File).MergeLine", Method, 2, ""},
    +		{"(*File).Name", Method, 0, ""},
    +		{"(*File).Offset", Method, 0, ""},
    +		{"(*File).Pos", Method, 0, ""},
    +		{"(*File).Position", Method, 0, ""},
    +		{"(*File).PositionFor", Method, 4, ""},
    +		{"(*File).SetLines", Method, 0, ""},
    +		{"(*File).SetLinesForContent", Method, 0, ""},
    +		{"(*File).Size", Method, 0, ""},
    +		{"(*FileSet).AddExistingFiles", Method, 25, ""},
    +		{"(*FileSet).AddFile", Method, 0, ""},
    +		{"(*FileSet).Base", Method, 0, ""},
    +		{"(*FileSet).File", Method, 0, ""},
    +		{"(*FileSet).Iterate", Method, 0, ""},
    +		{"(*FileSet).Position", Method, 0, ""},
    +		{"(*FileSet).PositionFor", Method, 4, ""},
    +		{"(*FileSet).Read", Method, 0, ""},
    +		{"(*FileSet).RemoveFile", Method, 20, ""},
    +		{"(*FileSet).Write", Method, 0, ""},
    +		{"(*Position).IsValid", Method, 0, ""},
    +		{"(Pos).IsValid", Method, 0, ""},
    +		{"(Position).String", Method, 0, ""},
    +		{"(Token).IsKeyword", Method, 0, ""},
    +		{"(Token).IsLiteral", Method, 0, ""},
    +		{"(Token).IsOperator", Method, 0, ""},
    +		{"(Token).Precedence", Method, 0, ""},
    +		{"(Token).String", Method, 0, ""},
    +		{"ADD", Const, 0, ""},
    +		{"ADD_ASSIGN", Const, 0, ""},
    +		{"AND", Const, 0, ""},
    +		{"AND_ASSIGN", Const, 0, ""},
    +		{"AND_NOT", Const, 0, ""},
    +		{"AND_NOT_ASSIGN", Const, 0, ""},
    +		{"ARROW", Const, 0, ""},
    +		{"ASSIGN", Const, 0, ""},
    +		{"BREAK", Const, 0, ""},
    +		{"CASE", Const, 0, ""},
    +		{"CHAN", Const, 0, ""},
    +		{"CHAR", Const, 0, ""},
    +		{"COLON", Const, 0, ""},
    +		{"COMMA", Const, 0, ""},
    +		{"COMMENT", Const, 0, ""},
    +		{"CONST", Const, 0, ""},
    +		{"CONTINUE", Const, 0, ""},
    +		{"DEC", Const, 0, ""},
    +		{"DEFAULT", Const, 0, ""},
    +		{"DEFER", Const, 0, ""},
    +		{"DEFINE", Const, 0, ""},
    +		{"ELLIPSIS", Const, 0, ""},
    +		{"ELSE", Const, 0, ""},
    +		{"EOF", Const, 0, ""},
    +		{"EQL", Const, 0, ""},
    +		{"FALLTHROUGH", Const, 0, ""},
    +		{"FLOAT", Const, 0, ""},
    +		{"FOR", Const, 0, ""},
    +		{"FUNC", Const, 0, ""},
    +		{"File", Type, 0, ""},
    +		{"FileSet", Type, 0, ""},
    +		{"GEQ", Const, 0, ""},
    +		{"GO", Const, 0, ""},
    +		{"GOTO", Const, 0, ""},
    +		{"GTR", Const, 0, ""},
    +		{"HighestPrec", Const, 0, ""},
    +		{"IDENT", Const, 0, ""},
    +		{"IF", Const, 0, ""},
    +		{"ILLEGAL", Const, 0, ""},
    +		{"IMAG", Const, 0, ""},
    +		{"IMPORT", Const, 0, ""},
    +		{"INC", Const, 0, ""},
    +		{"INT", Const, 0, ""},
    +		{"INTERFACE", Const, 0, ""},
    +		{"IsExported", Func, 13, "func(name string) bool"},
    +		{"IsIdentifier", Func, 13, "func(name string) bool"},
    +		{"IsKeyword", Func, 13, "func(name string) bool"},
    +		{"LAND", Const, 0, ""},
    +		{"LBRACE", Const, 0, ""},
    +		{"LBRACK", Const, 0, ""},
    +		{"LEQ", Const, 0, ""},
    +		{"LOR", Const, 0, ""},
    +		{"LPAREN", Const, 0, ""},
    +		{"LSS", Const, 0, ""},
    +		{"Lookup", Func, 0, "func(ident string) Token"},
    +		{"LowestPrec", Const, 0, ""},
    +		{"MAP", Const, 0, ""},
    +		{"MUL", Const, 0, ""},
    +		{"MUL_ASSIGN", Const, 0, ""},
    +		{"NEQ", Const, 0, ""},
    +		{"NOT", Const, 0, ""},
    +		{"NewFileSet", Func, 0, "func() *FileSet"},
    +		{"NoPos", Const, 0, ""},
    +		{"OR", Const, 0, ""},
    +		{"OR_ASSIGN", Const, 0, ""},
    +		{"PACKAGE", Const, 0, ""},
    +		{"PERIOD", Const, 0, ""},
    +		{"Pos", Type, 0, ""},
    +		{"Position", Type, 0, ""},
    +		{"Position.Column", Field, 0, ""},
    +		{"Position.Filename", Field, 0, ""},
    +		{"Position.Line", Field, 0, ""},
    +		{"Position.Offset", Field, 0, ""},
    +		{"QUO", Const, 0, ""},
    +		{"QUO_ASSIGN", Const, 0, ""},
    +		{"RANGE", Const, 0, ""},
    +		{"RBRACE", Const, 0, ""},
    +		{"RBRACK", Const, 0, ""},
    +		{"REM", Const, 0, ""},
    +		{"REM_ASSIGN", Const, 0, ""},
    +		{"RETURN", Const, 0, ""},
    +		{"RPAREN", Const, 0, ""},
    +		{"SELECT", Const, 0, ""},
    +		{"SEMICOLON", Const, 0, ""},
    +		{"SHL", Const, 0, ""},
    +		{"SHL_ASSIGN", Const, 0, ""},
    +		{"SHR", Const, 0, ""},
    +		{"SHR_ASSIGN", Const, 0, ""},
    +		{"STRING", Const, 0, ""},
    +		{"STRUCT", Const, 0, ""},
    +		{"SUB", Const, 0, ""},
    +		{"SUB_ASSIGN", Const, 0, ""},
    +		{"SWITCH", Const, 0, ""},
    +		{"TILDE", Const, 18, ""},
    +		{"TYPE", Const, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"UnaryPrec", Const, 0, ""},
    +		{"VAR", Const, 0, ""},
    +		{"XOR", Const, 0, ""},
    +		{"XOR_ASSIGN", Const, 0, ""},
     	},
     	"go/types": {
    -		{"(*Alias).Obj", Method, 22},
    -		{"(*Alias).Origin", Method, 23},
    -		{"(*Alias).Rhs", Method, 23},
    -		{"(*Alias).SetTypeParams", Method, 23},
    -		{"(*Alias).String", Method, 22},
    -		{"(*Alias).TypeArgs", Method, 23},
    -		{"(*Alias).TypeParams", Method, 23},
    -		{"(*Alias).Underlying", Method, 22},
    -		{"(*ArgumentError).Error", Method, 18},
    -		{"(*ArgumentError).Unwrap", Method, 18},
    -		{"(*Array).Elem", Method, 5},
    -		{"(*Array).Len", Method, 5},
    -		{"(*Array).String", Method, 5},
    -		{"(*Array).Underlying", Method, 5},
    -		{"(*Basic).Info", Method, 5},
    -		{"(*Basic).Kind", Method, 5},
    -		{"(*Basic).Name", Method, 5},
    -		{"(*Basic).String", Method, 5},
    -		{"(*Basic).Underlying", Method, 5},
    -		{"(*Builtin).Exported", Method, 5},
    -		{"(*Builtin).Id", Method, 5},
    -		{"(*Builtin).Name", Method, 5},
    -		{"(*Builtin).Parent", Method, 5},
    -		{"(*Builtin).Pkg", Method, 5},
    -		{"(*Builtin).Pos", Method, 5},
    -		{"(*Builtin).String", Method, 5},
    -		{"(*Builtin).Type", Method, 5},
    -		{"(*Chan).Dir", Method, 5},
    -		{"(*Chan).Elem", Method, 5},
    -		{"(*Chan).String", Method, 5},
    -		{"(*Chan).Underlying", Method, 5},
    -		{"(*Checker).Files", Method, 5},
    -		{"(*Config).Check", Method, 5},
    -		{"(*Const).Exported", Method, 5},
    -		{"(*Const).Id", Method, 5},
    -		{"(*Const).Name", Method, 5},
    -		{"(*Const).Parent", Method, 5},
    -		{"(*Const).Pkg", Method, 5},
    -		{"(*Const).Pos", Method, 5},
    -		{"(*Const).String", Method, 5},
    -		{"(*Const).Type", Method, 5},
    -		{"(*Const).Val", Method, 5},
    -		{"(*Func).Exported", Method, 5},
    -		{"(*Func).FullName", Method, 5},
    -		{"(*Func).Id", Method, 5},
    -		{"(*Func).Name", Method, 5},
    -		{"(*Func).Origin", Method, 19},
    -		{"(*Func).Parent", Method, 5},
    -		{"(*Func).Pkg", Method, 5},
    -		{"(*Func).Pos", Method, 5},
    -		{"(*Func).Scope", Method, 5},
    -		{"(*Func).Signature", Method, 23},
    -		{"(*Func).String", Method, 5},
    -		{"(*Func).Type", Method, 5},
    -		{"(*Info).ObjectOf", Method, 5},
    -		{"(*Info).PkgNameOf", Method, 22},
    -		{"(*Info).TypeOf", Method, 5},
    -		{"(*Initializer).String", Method, 5},
    -		{"(*Interface).Complete", Method, 5},
    -		{"(*Interface).Embedded", Method, 5},
    -		{"(*Interface).EmbeddedType", Method, 11},
    -		{"(*Interface).EmbeddedTypes", Method, 24},
    -		{"(*Interface).Empty", Method, 5},
    -		{"(*Interface).ExplicitMethod", Method, 5},
    -		{"(*Interface).ExplicitMethods", Method, 24},
    -		{"(*Interface).IsComparable", Method, 18},
    -		{"(*Interface).IsImplicit", Method, 18},
    -		{"(*Interface).IsMethodSet", Method, 18},
    -		{"(*Interface).MarkImplicit", Method, 18},
    -		{"(*Interface).Method", Method, 5},
    -		{"(*Interface).Methods", Method, 24},
    -		{"(*Interface).NumEmbeddeds", Method, 5},
    -		{"(*Interface).NumExplicitMethods", Method, 5},
    -		{"(*Interface).NumMethods", Method, 5},
    -		{"(*Interface).String", Method, 5},
    -		{"(*Interface).Underlying", Method, 5},
    -		{"(*Label).Exported", Method, 5},
    -		{"(*Label).Id", Method, 5},
    -		{"(*Label).Name", Method, 5},
    -		{"(*Label).Parent", Method, 5},
    -		{"(*Label).Pkg", Method, 5},
    -		{"(*Label).Pos", Method, 5},
    -		{"(*Label).String", Method, 5},
    -		{"(*Label).Type", Method, 5},
    -		{"(*Map).Elem", Method, 5},
    -		{"(*Map).Key", Method, 5},
    -		{"(*Map).String", Method, 5},
    -		{"(*Map).Underlying", Method, 5},
    -		{"(*MethodSet).At", Method, 5},
    -		{"(*MethodSet).Len", Method, 5},
    -		{"(*MethodSet).Lookup", Method, 5},
    -		{"(*MethodSet).Methods", Method, 24},
    -		{"(*MethodSet).String", Method, 5},
    -		{"(*Named).AddMethod", Method, 5},
    -		{"(*Named).Method", Method, 5},
    -		{"(*Named).Methods", Method, 24},
    -		{"(*Named).NumMethods", Method, 5},
    -		{"(*Named).Obj", Method, 5},
    -		{"(*Named).Origin", Method, 18},
    -		{"(*Named).SetTypeParams", Method, 18},
    -		{"(*Named).SetUnderlying", Method, 5},
    -		{"(*Named).String", Method, 5},
    -		{"(*Named).TypeArgs", Method, 18},
    -		{"(*Named).TypeParams", Method, 18},
    -		{"(*Named).Underlying", Method, 5},
    -		{"(*Nil).Exported", Method, 5},
    -		{"(*Nil).Id", Method, 5},
    -		{"(*Nil).Name", Method, 5},
    -		{"(*Nil).Parent", Method, 5},
    -		{"(*Nil).Pkg", Method, 5},
    -		{"(*Nil).Pos", Method, 5},
    -		{"(*Nil).String", Method, 5},
    -		{"(*Nil).Type", Method, 5},
    -		{"(*Package).Complete", Method, 5},
    -		{"(*Package).GoVersion", Method, 21},
    -		{"(*Package).Imports", Method, 5},
    -		{"(*Package).MarkComplete", Method, 5},
    -		{"(*Package).Name", Method, 5},
    -		{"(*Package).Path", Method, 5},
    -		{"(*Package).Scope", Method, 5},
    -		{"(*Package).SetImports", Method, 5},
    -		{"(*Package).SetName", Method, 6},
    -		{"(*Package).String", Method, 5},
    -		{"(*PkgName).Exported", Method, 5},
    -		{"(*PkgName).Id", Method, 5},
    -		{"(*PkgName).Imported", Method, 5},
    -		{"(*PkgName).Name", Method, 5},
    -		{"(*PkgName).Parent", Method, 5},
    -		{"(*PkgName).Pkg", Method, 5},
    -		{"(*PkgName).Pos", Method, 5},
    -		{"(*PkgName).String", Method, 5},
    -		{"(*PkgName).Type", Method, 5},
    -		{"(*Pointer).Elem", Method, 5},
    -		{"(*Pointer).String", Method, 5},
    -		{"(*Pointer).Underlying", Method, 5},
    -		{"(*Scope).Child", Method, 5},
    -		{"(*Scope).Children", Method, 24},
    -		{"(*Scope).Contains", Method, 5},
    -		{"(*Scope).End", Method, 5},
    -		{"(*Scope).Innermost", Method, 5},
    -		{"(*Scope).Insert", Method, 5},
    -		{"(*Scope).Len", Method, 5},
    -		{"(*Scope).Lookup", Method, 5},
    -		{"(*Scope).LookupParent", Method, 5},
    -		{"(*Scope).Names", Method, 5},
    -		{"(*Scope).NumChildren", Method, 5},
    -		{"(*Scope).Parent", Method, 5},
    -		{"(*Scope).Pos", Method, 5},
    -		{"(*Scope).String", Method, 5},
    -		{"(*Scope).WriteTo", Method, 5},
    -		{"(*Selection).Index", Method, 5},
    -		{"(*Selection).Indirect", Method, 5},
    -		{"(*Selection).Kind", Method, 5},
    -		{"(*Selection).Obj", Method, 5},
    -		{"(*Selection).Recv", Method, 5},
    -		{"(*Selection).String", Method, 5},
    -		{"(*Selection).Type", Method, 5},
    -		{"(*Signature).Params", Method, 5},
    -		{"(*Signature).Recv", Method, 5},
    -		{"(*Signature).RecvTypeParams", Method, 18},
    -		{"(*Signature).Results", Method, 5},
    -		{"(*Signature).String", Method, 5},
    -		{"(*Signature).TypeParams", Method, 18},
    -		{"(*Signature).Underlying", Method, 5},
    -		{"(*Signature).Variadic", Method, 5},
    -		{"(*Slice).Elem", Method, 5},
    -		{"(*Slice).String", Method, 5},
    -		{"(*Slice).Underlying", Method, 5},
    -		{"(*StdSizes).Alignof", Method, 5},
    -		{"(*StdSizes).Offsetsof", Method, 5},
    -		{"(*StdSizes).Sizeof", Method, 5},
    -		{"(*Struct).Field", Method, 5},
    -		{"(*Struct).Fields", Method, 24},
    -		{"(*Struct).NumFields", Method, 5},
    -		{"(*Struct).String", Method, 5},
    -		{"(*Struct).Tag", Method, 5},
    -		{"(*Struct).Underlying", Method, 5},
    -		{"(*Term).String", Method, 18},
    -		{"(*Term).Tilde", Method, 18},
    -		{"(*Term).Type", Method, 18},
    -		{"(*Tuple).At", Method, 5},
    -		{"(*Tuple).Len", Method, 5},
    -		{"(*Tuple).String", Method, 5},
    -		{"(*Tuple).Underlying", Method, 5},
    -		{"(*Tuple).Variables", Method, 24},
    -		{"(*TypeList).At", Method, 18},
    -		{"(*TypeList).Len", Method, 18},
    -		{"(*TypeList).Types", Method, 24},
    -		{"(*TypeName).Exported", Method, 5},
    -		{"(*TypeName).Id", Method, 5},
    -		{"(*TypeName).IsAlias", Method, 9},
    -		{"(*TypeName).Name", Method, 5},
    -		{"(*TypeName).Parent", Method, 5},
    -		{"(*TypeName).Pkg", Method, 5},
    -		{"(*TypeName).Pos", Method, 5},
    -		{"(*TypeName).String", Method, 5},
    -		{"(*TypeName).Type", Method, 5},
    -		{"(*TypeParam).Constraint", Method, 18},
    -		{"(*TypeParam).Index", Method, 18},
    -		{"(*TypeParam).Obj", Method, 18},
    -		{"(*TypeParam).SetConstraint", Method, 18},
    -		{"(*TypeParam).String", Method, 18},
    -		{"(*TypeParam).Underlying", Method, 18},
    -		{"(*TypeParamList).At", Method, 18},
    -		{"(*TypeParamList).Len", Method, 18},
    -		{"(*TypeParamList).TypeParams", Method, 24},
    -		{"(*Union).Len", Method, 18},
    -		{"(*Union).String", Method, 18},
    -		{"(*Union).Term", Method, 18},
    -		{"(*Union).Terms", Method, 24},
    -		{"(*Union).Underlying", Method, 18},
    -		{"(*Var).Anonymous", Method, 5},
    -		{"(*Var).Embedded", Method, 11},
    -		{"(*Var).Exported", Method, 5},
    -		{"(*Var).Id", Method, 5},
    -		{"(*Var).IsField", Method, 5},
    -		{"(*Var).Name", Method, 5},
    -		{"(*Var).Origin", Method, 19},
    -		{"(*Var).Parent", Method, 5},
    -		{"(*Var).Pkg", Method, 5},
    -		{"(*Var).Pos", Method, 5},
    -		{"(*Var).String", Method, 5},
    -		{"(*Var).Type", Method, 5},
    -		{"(Checker).ObjectOf", Method, 5},
    -		{"(Checker).PkgNameOf", Method, 22},
    -		{"(Checker).TypeOf", Method, 5},
    -		{"(Error).Error", Method, 5},
    -		{"(TypeAndValue).Addressable", Method, 5},
    -		{"(TypeAndValue).Assignable", Method, 5},
    -		{"(TypeAndValue).HasOk", Method, 5},
    -		{"(TypeAndValue).IsBuiltin", Method, 5},
    -		{"(TypeAndValue).IsNil", Method, 5},
    -		{"(TypeAndValue).IsType", Method, 5},
    -		{"(TypeAndValue).IsValue", Method, 5},
    -		{"(TypeAndValue).IsVoid", Method, 5},
    -		{"Alias", Type, 22},
    -		{"ArgumentError", Type, 18},
    -		{"ArgumentError.Err", Field, 18},
    -		{"ArgumentError.Index", Field, 18},
    -		{"Array", Type, 5},
    -		{"AssertableTo", Func, 5},
    -		{"AssignableTo", Func, 5},
    -		{"Basic", Type, 5},
    -		{"BasicInfo", Type, 5},
    -		{"BasicKind", Type, 5},
    -		{"Bool", Const, 5},
    -		{"Builtin", Type, 5},
    -		{"Byte", Const, 5},
    -		{"Chan", Type, 5},
    -		{"ChanDir", Type, 5},
    -		{"CheckExpr", Func, 13},
    -		{"Checker", Type, 5},
    -		{"Checker.Info", Field, 5},
    -		{"Comparable", Func, 5},
    -		{"Complex128", Const, 5},
    -		{"Complex64", Const, 5},
    -		{"Config", Type, 5},
    -		{"Config.Context", Field, 18},
    -		{"Config.DisableUnusedImportCheck", Field, 5},
    -		{"Config.Error", Field, 5},
    -		{"Config.FakeImportC", Field, 5},
    -		{"Config.GoVersion", Field, 18},
    -		{"Config.IgnoreFuncBodies", Field, 5},
    -		{"Config.Importer", Field, 5},
    -		{"Config.Sizes", Field, 5},
    -		{"Const", Type, 5},
    -		{"Context", Type, 18},
    -		{"ConvertibleTo", Func, 5},
    -		{"DefPredeclaredTestFuncs", Func, 5},
    -		{"Default", Func, 8},
    -		{"Error", Type, 5},
    -		{"Error.Fset", Field, 5},
    -		{"Error.Msg", Field, 5},
    -		{"Error.Pos", Field, 5},
    -		{"Error.Soft", Field, 5},
    -		{"Eval", Func, 5},
    -		{"ExprString", Func, 5},
    -		{"FieldVal", Const, 5},
    -		{"Float32", Const, 5},
    -		{"Float64", Const, 5},
    -		{"Func", Type, 5},
    -		{"Id", Func, 5},
    -		{"Identical", Func, 5},
    -		{"IdenticalIgnoreTags", Func, 8},
    -		{"Implements", Func, 5},
    -		{"ImportMode", Type, 6},
    -		{"Importer", Type, 5},
    -		{"ImporterFrom", Type, 6},
    -		{"Info", Type, 5},
    -		{"Info.Defs", Field, 5},
    -		{"Info.FileVersions", Field, 22},
    -		{"Info.Implicits", Field, 5},
    -		{"Info.InitOrder", Field, 5},
    -		{"Info.Instances", Field, 18},
    -		{"Info.Scopes", Field, 5},
    -		{"Info.Selections", Field, 5},
    -		{"Info.Types", Field, 5},
    -		{"Info.Uses", Field, 5},
    -		{"Initializer", Type, 5},
    -		{"Initializer.Lhs", Field, 5},
    -		{"Initializer.Rhs", Field, 5},
    -		{"Instance", Type, 18},
    -		{"Instance.Type", Field, 18},
    -		{"Instance.TypeArgs", Field, 18},
    -		{"Instantiate", Func, 18},
    -		{"Int", Const, 5},
    -		{"Int16", Const, 5},
    -		{"Int32", Const, 5},
    -		{"Int64", Const, 5},
    -		{"Int8", Const, 5},
    -		{"Interface", Type, 5},
    -		{"Invalid", Const, 5},
    -		{"IsBoolean", Const, 5},
    -		{"IsComplex", Const, 5},
    -		{"IsConstType", Const, 5},
    -		{"IsFloat", Const, 5},
    -		{"IsInteger", Const, 5},
    -		{"IsInterface", Func, 5},
    -		{"IsNumeric", Const, 5},
    -		{"IsOrdered", Const, 5},
    -		{"IsString", Const, 5},
    -		{"IsUnsigned", Const, 5},
    -		{"IsUntyped", Const, 5},
    -		{"Label", Type, 5},
    -		{"LookupFieldOrMethod", Func, 5},
    -		{"Map", Type, 5},
    -		{"MethodExpr", Const, 5},
    -		{"MethodSet", Type, 5},
    -		{"MethodVal", Const, 5},
    -		{"MissingMethod", Func, 5},
    -		{"Named", Type, 5},
    -		{"NewAlias", Func, 22},
    -		{"NewArray", Func, 5},
    -		{"NewChan", Func, 5},
    -		{"NewChecker", Func, 5},
    -		{"NewConst", Func, 5},
    -		{"NewContext", Func, 18},
    -		{"NewField", Func, 5},
    -		{"NewFunc", Func, 5},
    -		{"NewInterface", Func, 5},
    -		{"NewInterfaceType", Func, 11},
    -		{"NewLabel", Func, 5},
    -		{"NewMap", Func, 5},
    -		{"NewMethodSet", Func, 5},
    -		{"NewNamed", Func, 5},
    -		{"NewPackage", Func, 5},
    -		{"NewParam", Func, 5},
    -		{"NewPkgName", Func, 5},
    -		{"NewPointer", Func, 5},
    -		{"NewScope", Func, 5},
    -		{"NewSignature", Func, 5},
    -		{"NewSignatureType", Func, 18},
    -		{"NewSlice", Func, 5},
    -		{"NewStruct", Func, 5},
    -		{"NewTerm", Func, 18},
    -		{"NewTuple", Func, 5},
    -		{"NewTypeName", Func, 5},
    -		{"NewTypeParam", Func, 18},
    -		{"NewUnion", Func, 18},
    -		{"NewVar", Func, 5},
    -		{"Nil", Type, 5},
    -		{"Object", Type, 5},
    -		{"ObjectString", Func, 5},
    -		{"Package", Type, 5},
    -		{"PkgName", Type, 5},
    -		{"Pointer", Type, 5},
    -		{"Qualifier", Type, 5},
    -		{"RecvOnly", Const, 5},
    -		{"RelativeTo", Func, 5},
    -		{"Rune", Const, 5},
    -		{"Satisfies", Func, 20},
    -		{"Scope", Type, 5},
    -		{"Selection", Type, 5},
    -		{"SelectionKind", Type, 5},
    -		{"SelectionString", Func, 5},
    -		{"SendOnly", Const, 5},
    -		{"SendRecv", Const, 5},
    -		{"Signature", Type, 5},
    -		{"Sizes", Type, 5},
    -		{"SizesFor", Func, 9},
    -		{"Slice", Type, 5},
    -		{"StdSizes", Type, 5},
    -		{"StdSizes.MaxAlign", Field, 5},
    -		{"StdSizes.WordSize", Field, 5},
    -		{"String", Const, 5},
    -		{"Struct", Type, 5},
    -		{"Term", Type, 18},
    -		{"Tuple", Type, 5},
    -		{"Typ", Var, 5},
    -		{"Type", Type, 5},
    -		{"TypeAndValue", Type, 5},
    -		{"TypeAndValue.Type", Field, 5},
    -		{"TypeAndValue.Value", Field, 5},
    -		{"TypeList", Type, 18},
    -		{"TypeName", Type, 5},
    -		{"TypeParam", Type, 18},
    -		{"TypeParamList", Type, 18},
    -		{"TypeString", Func, 5},
    -		{"Uint", Const, 5},
    -		{"Uint16", Const, 5},
    -		{"Uint32", Const, 5},
    -		{"Uint64", Const, 5},
    -		{"Uint8", Const, 5},
    -		{"Uintptr", Const, 5},
    -		{"Unalias", Func, 22},
    -		{"Union", Type, 18},
    -		{"Universe", Var, 5},
    -		{"Unsafe", Var, 5},
    -		{"UnsafePointer", Const, 5},
    -		{"UntypedBool", Const, 5},
    -		{"UntypedComplex", Const, 5},
    -		{"UntypedFloat", Const, 5},
    -		{"UntypedInt", Const, 5},
    -		{"UntypedNil", Const, 5},
    -		{"UntypedRune", Const, 5},
    -		{"UntypedString", Const, 5},
    -		{"Var", Type, 5},
    -		{"WriteExpr", Func, 5},
    -		{"WriteSignature", Func, 5},
    -		{"WriteType", Func, 5},
    +		{"(*Alias).Obj", Method, 22, ""},
    +		{"(*Alias).Origin", Method, 23, ""},
    +		{"(*Alias).Rhs", Method, 23, ""},
    +		{"(*Alias).SetTypeParams", Method, 23, ""},
    +		{"(*Alias).String", Method, 22, ""},
    +		{"(*Alias).TypeArgs", Method, 23, ""},
    +		{"(*Alias).TypeParams", Method, 23, ""},
    +		{"(*Alias).Underlying", Method, 22, ""},
    +		{"(*ArgumentError).Error", Method, 18, ""},
    +		{"(*ArgumentError).Unwrap", Method, 18, ""},
    +		{"(*Array).Elem", Method, 5, ""},
    +		{"(*Array).Len", Method, 5, ""},
    +		{"(*Array).String", Method, 5, ""},
    +		{"(*Array).Underlying", Method, 5, ""},
    +		{"(*Basic).Info", Method, 5, ""},
    +		{"(*Basic).Kind", Method, 5, ""},
    +		{"(*Basic).Name", Method, 5, ""},
    +		{"(*Basic).String", Method, 5, ""},
    +		{"(*Basic).Underlying", Method, 5, ""},
    +		{"(*Builtin).Exported", Method, 5, ""},
    +		{"(*Builtin).Id", Method, 5, ""},
    +		{"(*Builtin).Name", Method, 5, ""},
    +		{"(*Builtin).Parent", Method, 5, ""},
    +		{"(*Builtin).Pkg", Method, 5, ""},
    +		{"(*Builtin).Pos", Method, 5, ""},
    +		{"(*Builtin).String", Method, 5, ""},
    +		{"(*Builtin).Type", Method, 5, ""},
    +		{"(*Chan).Dir", Method, 5, ""},
    +		{"(*Chan).Elem", Method, 5, ""},
    +		{"(*Chan).String", Method, 5, ""},
    +		{"(*Chan).Underlying", Method, 5, ""},
    +		{"(*Checker).Files", Method, 5, ""},
    +		{"(*Config).Check", Method, 5, ""},
    +		{"(*Const).Exported", Method, 5, ""},
    +		{"(*Const).Id", Method, 5, ""},
    +		{"(*Const).Name", Method, 5, ""},
    +		{"(*Const).Parent", Method, 5, ""},
    +		{"(*Const).Pkg", Method, 5, ""},
    +		{"(*Const).Pos", Method, 5, ""},
    +		{"(*Const).String", Method, 5, ""},
    +		{"(*Const).Type", Method, 5, ""},
    +		{"(*Const).Val", Method, 5, ""},
    +		{"(*Func).Exported", Method, 5, ""},
    +		{"(*Func).FullName", Method, 5, ""},
    +		{"(*Func).Id", Method, 5, ""},
    +		{"(*Func).Name", Method, 5, ""},
    +		{"(*Func).Origin", Method, 19, ""},
    +		{"(*Func).Parent", Method, 5, ""},
    +		{"(*Func).Pkg", Method, 5, ""},
    +		{"(*Func).Pos", Method, 5, ""},
    +		{"(*Func).Scope", Method, 5, ""},
    +		{"(*Func).Signature", Method, 23, ""},
    +		{"(*Func).String", Method, 5, ""},
    +		{"(*Func).Type", Method, 5, ""},
    +		{"(*Info).ObjectOf", Method, 5, ""},
    +		{"(*Info).PkgNameOf", Method, 22, ""},
    +		{"(*Info).TypeOf", Method, 5, ""},
    +		{"(*Initializer).String", Method, 5, ""},
    +		{"(*Interface).Complete", Method, 5, ""},
    +		{"(*Interface).Embedded", Method, 5, ""},
    +		{"(*Interface).EmbeddedType", Method, 11, ""},
    +		{"(*Interface).EmbeddedTypes", Method, 24, ""},
    +		{"(*Interface).Empty", Method, 5, ""},
    +		{"(*Interface).ExplicitMethod", Method, 5, ""},
    +		{"(*Interface).ExplicitMethods", Method, 24, ""},
    +		{"(*Interface).IsComparable", Method, 18, ""},
    +		{"(*Interface).IsImplicit", Method, 18, ""},
    +		{"(*Interface).IsMethodSet", Method, 18, ""},
    +		{"(*Interface).MarkImplicit", Method, 18, ""},
    +		{"(*Interface).Method", Method, 5, ""},
    +		{"(*Interface).Methods", Method, 24, ""},
    +		{"(*Interface).NumEmbeddeds", Method, 5, ""},
    +		{"(*Interface).NumExplicitMethods", Method, 5, ""},
    +		{"(*Interface).NumMethods", Method, 5, ""},
    +		{"(*Interface).String", Method, 5, ""},
    +		{"(*Interface).Underlying", Method, 5, ""},
    +		{"(*Label).Exported", Method, 5, ""},
    +		{"(*Label).Id", Method, 5, ""},
    +		{"(*Label).Name", Method, 5, ""},
    +		{"(*Label).Parent", Method, 5, ""},
    +		{"(*Label).Pkg", Method, 5, ""},
    +		{"(*Label).Pos", Method, 5, ""},
    +		{"(*Label).String", Method, 5, ""},
    +		{"(*Label).Type", Method, 5, ""},
    +		{"(*Map).Elem", Method, 5, ""},
    +		{"(*Map).Key", Method, 5, ""},
    +		{"(*Map).String", Method, 5, ""},
    +		{"(*Map).Underlying", Method, 5, ""},
    +		{"(*MethodSet).At", Method, 5, ""},
    +		{"(*MethodSet).Len", Method, 5, ""},
    +		{"(*MethodSet).Lookup", Method, 5, ""},
    +		{"(*MethodSet).Methods", Method, 24, ""},
    +		{"(*MethodSet).String", Method, 5, ""},
    +		{"(*Named).AddMethod", Method, 5, ""},
    +		{"(*Named).Method", Method, 5, ""},
    +		{"(*Named).Methods", Method, 24, ""},
    +		{"(*Named).NumMethods", Method, 5, ""},
    +		{"(*Named).Obj", Method, 5, ""},
    +		{"(*Named).Origin", Method, 18, ""},
    +		{"(*Named).SetTypeParams", Method, 18, ""},
    +		{"(*Named).SetUnderlying", Method, 5, ""},
    +		{"(*Named).String", Method, 5, ""},
    +		{"(*Named).TypeArgs", Method, 18, ""},
    +		{"(*Named).TypeParams", Method, 18, ""},
    +		{"(*Named).Underlying", Method, 5, ""},
    +		{"(*Nil).Exported", Method, 5, ""},
    +		{"(*Nil).Id", Method, 5, ""},
    +		{"(*Nil).Name", Method, 5, ""},
    +		{"(*Nil).Parent", Method, 5, ""},
    +		{"(*Nil).Pkg", Method, 5, ""},
    +		{"(*Nil).Pos", Method, 5, ""},
    +		{"(*Nil).String", Method, 5, ""},
    +		{"(*Nil).Type", Method, 5, ""},
    +		{"(*Package).Complete", Method, 5, ""},
    +		{"(*Package).GoVersion", Method, 21, ""},
    +		{"(*Package).Imports", Method, 5, ""},
    +		{"(*Package).MarkComplete", Method, 5, ""},
    +		{"(*Package).Name", Method, 5, ""},
    +		{"(*Package).Path", Method, 5, ""},
    +		{"(*Package).Scope", Method, 5, ""},
    +		{"(*Package).SetImports", Method, 5, ""},
    +		{"(*Package).SetName", Method, 6, ""},
    +		{"(*Package).String", Method, 5, ""},
    +		{"(*PkgName).Exported", Method, 5, ""},
    +		{"(*PkgName).Id", Method, 5, ""},
    +		{"(*PkgName).Imported", Method, 5, ""},
    +		{"(*PkgName).Name", Method, 5, ""},
    +		{"(*PkgName).Parent", Method, 5, ""},
    +		{"(*PkgName).Pkg", Method, 5, ""},
    +		{"(*PkgName).Pos", Method, 5, ""},
    +		{"(*PkgName).String", Method, 5, ""},
    +		{"(*PkgName).Type", Method, 5, ""},
    +		{"(*Pointer).Elem", Method, 5, ""},
    +		{"(*Pointer).String", Method, 5, ""},
    +		{"(*Pointer).Underlying", Method, 5, ""},
    +		{"(*Scope).Child", Method, 5, ""},
    +		{"(*Scope).Children", Method, 24, ""},
    +		{"(*Scope).Contains", Method, 5, ""},
    +		{"(*Scope).End", Method, 5, ""},
    +		{"(*Scope).Innermost", Method, 5, ""},
    +		{"(*Scope).Insert", Method, 5, ""},
    +		{"(*Scope).Len", Method, 5, ""},
    +		{"(*Scope).Lookup", Method, 5, ""},
    +		{"(*Scope).LookupParent", Method, 5, ""},
    +		{"(*Scope).Names", Method, 5, ""},
    +		{"(*Scope).NumChildren", Method, 5, ""},
    +		{"(*Scope).Parent", Method, 5, ""},
    +		{"(*Scope).Pos", Method, 5, ""},
    +		{"(*Scope).String", Method, 5, ""},
    +		{"(*Scope).WriteTo", Method, 5, ""},
    +		{"(*Selection).Index", Method, 5, ""},
    +		{"(*Selection).Indirect", Method, 5, ""},
    +		{"(*Selection).Kind", Method, 5, ""},
    +		{"(*Selection).Obj", Method, 5, ""},
    +		{"(*Selection).Recv", Method, 5, ""},
    +		{"(*Selection).String", Method, 5, ""},
    +		{"(*Selection).Type", Method, 5, ""},
    +		{"(*Signature).Params", Method, 5, ""},
    +		{"(*Signature).Recv", Method, 5, ""},
    +		{"(*Signature).RecvTypeParams", Method, 18, ""},
    +		{"(*Signature).Results", Method, 5, ""},
    +		{"(*Signature).String", Method, 5, ""},
    +		{"(*Signature).TypeParams", Method, 18, ""},
    +		{"(*Signature).Underlying", Method, 5, ""},
    +		{"(*Signature).Variadic", Method, 5, ""},
    +		{"(*Slice).Elem", Method, 5, ""},
    +		{"(*Slice).String", Method, 5, ""},
    +		{"(*Slice).Underlying", Method, 5, ""},
    +		{"(*StdSizes).Alignof", Method, 5, ""},
    +		{"(*StdSizes).Offsetsof", Method, 5, ""},
    +		{"(*StdSizes).Sizeof", Method, 5, ""},
    +		{"(*Struct).Field", Method, 5, ""},
    +		{"(*Struct).Fields", Method, 24, ""},
    +		{"(*Struct).NumFields", Method, 5, ""},
    +		{"(*Struct).String", Method, 5, ""},
    +		{"(*Struct).Tag", Method, 5, ""},
    +		{"(*Struct).Underlying", Method, 5, ""},
    +		{"(*Term).String", Method, 18, ""},
    +		{"(*Term).Tilde", Method, 18, ""},
    +		{"(*Term).Type", Method, 18, ""},
    +		{"(*Tuple).At", Method, 5, ""},
    +		{"(*Tuple).Len", Method, 5, ""},
    +		{"(*Tuple).String", Method, 5, ""},
    +		{"(*Tuple).Underlying", Method, 5, ""},
    +		{"(*Tuple).Variables", Method, 24, ""},
    +		{"(*TypeList).At", Method, 18, ""},
    +		{"(*TypeList).Len", Method, 18, ""},
    +		{"(*TypeList).Types", Method, 24, ""},
    +		{"(*TypeName).Exported", Method, 5, ""},
    +		{"(*TypeName).Id", Method, 5, ""},
    +		{"(*TypeName).IsAlias", Method, 9, ""},
    +		{"(*TypeName).Name", Method, 5, ""},
    +		{"(*TypeName).Parent", Method, 5, ""},
    +		{"(*TypeName).Pkg", Method, 5, ""},
    +		{"(*TypeName).Pos", Method, 5, ""},
    +		{"(*TypeName).String", Method, 5, ""},
    +		{"(*TypeName).Type", Method, 5, ""},
    +		{"(*TypeParam).Constraint", Method, 18, ""},
    +		{"(*TypeParam).Index", Method, 18, ""},
    +		{"(*TypeParam).Obj", Method, 18, ""},
    +		{"(*TypeParam).SetConstraint", Method, 18, ""},
    +		{"(*TypeParam).String", Method, 18, ""},
    +		{"(*TypeParam).Underlying", Method, 18, ""},
    +		{"(*TypeParamList).At", Method, 18, ""},
    +		{"(*TypeParamList).Len", Method, 18, ""},
    +		{"(*TypeParamList).TypeParams", Method, 24, ""},
    +		{"(*Union).Len", Method, 18, ""},
    +		{"(*Union).String", Method, 18, ""},
    +		{"(*Union).Term", Method, 18, ""},
    +		{"(*Union).Terms", Method, 24, ""},
    +		{"(*Union).Underlying", Method, 18, ""},
    +		{"(*Var).Anonymous", Method, 5, ""},
    +		{"(*Var).Embedded", Method, 11, ""},
    +		{"(*Var).Exported", Method, 5, ""},
    +		{"(*Var).Id", Method, 5, ""},
    +		{"(*Var).IsField", Method, 5, ""},
    +		{"(*Var).Kind", Method, 25, ""},
    +		{"(*Var).Name", Method, 5, ""},
    +		{"(*Var).Origin", Method, 19, ""},
    +		{"(*Var).Parent", Method, 5, ""},
    +		{"(*Var).Pkg", Method, 5, ""},
    +		{"(*Var).Pos", Method, 5, ""},
    +		{"(*Var).SetKind", Method, 25, ""},
    +		{"(*Var).String", Method, 5, ""},
    +		{"(*Var).Type", Method, 5, ""},
    +		{"(Checker).ObjectOf", Method, 5, ""},
    +		{"(Checker).PkgNameOf", Method, 22, ""},
    +		{"(Checker).TypeOf", Method, 5, ""},
    +		{"(Error).Error", Method, 5, ""},
    +		{"(TypeAndValue).Addressable", Method, 5, ""},
    +		{"(TypeAndValue).Assignable", Method, 5, ""},
    +		{"(TypeAndValue).HasOk", Method, 5, ""},
    +		{"(TypeAndValue).IsBuiltin", Method, 5, ""},
    +		{"(TypeAndValue).IsNil", Method, 5, ""},
    +		{"(TypeAndValue).IsType", Method, 5, ""},
    +		{"(TypeAndValue).IsValue", Method, 5, ""},
    +		{"(TypeAndValue).IsVoid", Method, 5, ""},
    +		{"(VarKind).String", Method, 25, ""},
    +		{"Alias", Type, 22, ""},
    +		{"ArgumentError", Type, 18, ""},
    +		{"ArgumentError.Err", Field, 18, ""},
    +		{"ArgumentError.Index", Field, 18, ""},
    +		{"Array", Type, 5, ""},
    +		{"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"},
    +		{"AssignableTo", Func, 5, "func(V Type, T Type) bool"},
    +		{"Basic", Type, 5, ""},
    +		{"BasicInfo", Type, 5, ""},
    +		{"BasicKind", Type, 5, ""},
    +		{"Bool", Const, 5, ""},
    +		{"Builtin", Type, 5, ""},
    +		{"Byte", Const, 5, ""},
    +		{"Chan", Type, 5, ""},
    +		{"ChanDir", Type, 5, ""},
    +		{"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"},
    +		{"Checker", Type, 5, ""},
    +		{"Checker.Info", Field, 5, ""},
    +		{"Comparable", Func, 5, "func(T Type) bool"},
    +		{"Complex128", Const, 5, ""},
    +		{"Complex64", Const, 5, ""},
    +		{"Config", Type, 5, ""},
    +		{"Config.Context", Field, 18, ""},
    +		{"Config.DisableUnusedImportCheck", Field, 5, ""},
    +		{"Config.Error", Field, 5, ""},
    +		{"Config.FakeImportC", Field, 5, ""},
    +		{"Config.GoVersion", Field, 18, ""},
    +		{"Config.IgnoreFuncBodies", Field, 5, ""},
    +		{"Config.Importer", Field, 5, ""},
    +		{"Config.Sizes", Field, 5, ""},
    +		{"Const", Type, 5, ""},
    +		{"Context", Type, 18, ""},
    +		{"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"},
    +		{"DefPredeclaredTestFuncs", Func, 5, "func()"},
    +		{"Default", Func, 8, "func(t Type) Type"},
    +		{"Error", Type, 5, ""},
    +		{"Error.Fset", Field, 5, ""},
    +		{"Error.Msg", Field, 5, ""},
    +		{"Error.Pos", Field, 5, ""},
    +		{"Error.Soft", Field, 5, ""},
    +		{"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"},
    +		{"ExprString", Func, 5, "func(x ast.Expr) string"},
    +		{"FieldVal", Const, 5, ""},
    +		{"FieldVar", Const, 25, ""},
    +		{"Float32", Const, 5, ""},
    +		{"Float64", Const, 5, ""},
    +		{"Func", Type, 5, ""},
    +		{"Id", Func, 5, "func(pkg *Package, name string) string"},
    +		{"Identical", Func, 5, "func(x Type, y Type) bool"},
    +		{"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"},
    +		{"Implements", Func, 5, "func(V Type, T *Interface) bool"},
    +		{"ImportMode", Type, 6, ""},
    +		{"Importer", Type, 5, ""},
    +		{"ImporterFrom", Type, 6, ""},
    +		{"Info", Type, 5, ""},
    +		{"Info.Defs", Field, 5, ""},
    +		{"Info.FileVersions", Field, 22, ""},
    +		{"Info.Implicits", Field, 5, ""},
    +		{"Info.InitOrder", Field, 5, ""},
    +		{"Info.Instances", Field, 18, ""},
    +		{"Info.Scopes", Field, 5, ""},
    +		{"Info.Selections", Field, 5, ""},
    +		{"Info.Types", Field, 5, ""},
    +		{"Info.Uses", Field, 5, ""},
    +		{"Initializer", Type, 5, ""},
    +		{"Initializer.Lhs", Field, 5, ""},
    +		{"Initializer.Rhs", Field, 5, ""},
    +		{"Instance", Type, 18, ""},
    +		{"Instance.Type", Field, 18, ""},
    +		{"Instance.TypeArgs", Field, 18, ""},
    +		{"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"},
    +		{"Int", Const, 5, ""},
    +		{"Int16", Const, 5, ""},
    +		{"Int32", Const, 5, ""},
    +		{"Int64", Const, 5, ""},
    +		{"Int8", Const, 5, ""},
    +		{"Interface", Type, 5, ""},
    +		{"Invalid", Const, 5, ""},
    +		{"IsBoolean", Const, 5, ""},
    +		{"IsComplex", Const, 5, ""},
    +		{"IsConstType", Const, 5, ""},
    +		{"IsFloat", Const, 5, ""},
    +		{"IsInteger", Const, 5, ""},
    +		{"IsInterface", Func, 5, "func(t Type) bool"},
    +		{"IsNumeric", Const, 5, ""},
    +		{"IsOrdered", Const, 5, ""},
    +		{"IsString", Const, 5, ""},
    +		{"IsUnsigned", Const, 5, ""},
    +		{"IsUntyped", Const, 5, ""},
    +		{"Label", Type, 5, ""},
    +		{"LocalVar", Const, 25, ""},
    +		{"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
    +		{"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
    +		{"Map", Type, 5, ""},
    +		{"MethodExpr", Const, 5, ""},
    +		{"MethodSet", Type, 5, ""},
    +		{"MethodVal", Const, 5, ""},
    +		{"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"},
    +		{"Named", Type, 5, ""},
    +		{"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"},
    +		{"NewArray", Func, 5, "func(elem Type, len int64) *Array"},
    +		{"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"},
    +		{"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"},
    +		{"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"},
    +		{"NewContext", Func, 18, "func() *Context"},
    +		{"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"},
    +		{"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"},
    +		{"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"},
    +		{"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"},
    +		{"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"},
    +		{"NewMap", Func, 5, "func(key Type, elem Type) *Map"},
    +		{"NewMethodSet", Func, 5, "func(T Type) *MethodSet"},
    +		{"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"},
    +		{"NewPackage", Func, 5, "func(path string, name string) *Package"},
    +		{"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
    +		{"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"},
    +		{"NewPointer", Func, 5, "func(elem Type) *Pointer"},
    +		{"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"},
    +		{"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"},
    +		{"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"},
    +		{"NewSlice", Func, 5, "func(elem Type) *Slice"},
    +		{"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"},
    +		{"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"},
    +		{"NewTuple", Func, 5, "func(x ...*Var) *Tuple"},
    +		{"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"},
    +		{"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"},
    +		{"NewUnion", Func, 18, "func(terms []*Term) *Union"},
    +		{"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
    +		{"Nil", Type, 5, ""},
    +		{"Object", Type, 5, ""},
    +		{"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
    +		{"Package", Type, 5, ""},
    +		{"PackageVar", Const, 25, ""},
    +		{"ParamVar", Const, 25, ""},
    +		{"PkgName", Type, 5, ""},
    +		{"Pointer", Type, 5, ""},
    +		{"Qualifier", Type, 5, ""},
    +		{"RecvOnly", Const, 5, ""},
    +		{"RecvVar", Const, 25, ""},
    +		{"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"},
    +		{"ResultVar", Const, 25, ""},
    +		{"Rune", Const, 5, ""},
    +		{"Satisfies", Func, 20, "func(V Type, T *Interface) bool"},
    +		{"Scope", Type, 5, ""},
    +		{"Selection", Type, 5, ""},
    +		{"SelectionKind", Type, 5, ""},
    +		{"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"},
    +		{"SendOnly", Const, 5, ""},
    +		{"SendRecv", Const, 5, ""},
    +		{"Signature", Type, 5, ""},
    +		{"Sizes", Type, 5, ""},
    +		{"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"},
    +		{"Slice", Type, 5, ""},
    +		{"StdSizes", Type, 5, ""},
    +		{"StdSizes.MaxAlign", Field, 5, ""},
    +		{"StdSizes.WordSize", Field, 5, ""},
    +		{"String", Const, 5, ""},
    +		{"Struct", Type, 5, ""},
    +		{"Term", Type, 18, ""},
    +		{"Tuple", Type, 5, ""},
    +		{"Typ", Var, 5, ""},
    +		{"Type", Type, 5, ""},
    +		{"TypeAndValue", Type, 5, ""},
    +		{"TypeAndValue.Type", Field, 5, ""},
    +		{"TypeAndValue.Value", Field, 5, ""},
    +		{"TypeList", Type, 18, ""},
    +		{"TypeName", Type, 5, ""},
    +		{"TypeParam", Type, 18, ""},
    +		{"TypeParamList", Type, 18, ""},
    +		{"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"},
    +		{"Uint", Const, 5, ""},
    +		{"Uint16", Const, 5, ""},
    +		{"Uint32", Const, 5, ""},
    +		{"Uint64", Const, 5, ""},
    +		{"Uint8", Const, 5, ""},
    +		{"Uintptr", Const, 5, ""},
    +		{"Unalias", Func, 22, "func(t Type) Type"},
    +		{"Union", Type, 18, ""},
    +		{"Universe", Var, 5, ""},
    +		{"Unsafe", Var, 5, ""},
    +		{"UnsafePointer", Const, 5, ""},
    +		{"UntypedBool", Const, 5, ""},
    +		{"UntypedComplex", Const, 5, ""},
    +		{"UntypedFloat", Const, 5, ""},
    +		{"UntypedInt", Const, 5, ""},
    +		{"UntypedNil", Const, 5, ""},
    +		{"UntypedRune", Const, 5, ""},
    +		{"UntypedString", Const, 5, ""},
    +		{"Var", Type, 5, ""},
    +		{"VarKind", Type, 25, ""},
    +		{"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"},
    +		{"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"},
    +		{"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"},
     	},
     	"go/version": {
    -		{"Compare", Func, 22},
    -		{"IsValid", Func, 22},
    -		{"Lang", Func, 22},
    +		{"Compare", Func, 22, "func(x string, y string) int"},
    +		{"IsValid", Func, 22, "func(x string) bool"},
    +		{"Lang", Func, 22, "func(x string) string"},
     	},
     	"hash": {
    -		{"Hash", Type, 0},
    -		{"Hash32", Type, 0},
    -		{"Hash64", Type, 0},
    +		{"Cloner", Type, 25, ""},
    +		{"Hash", Type, 0, ""},
    +		{"Hash32", Type, 0, ""},
    +		{"Hash64", Type, 0, ""},
    +		{"XOF", Type, 25, ""},
     	},
     	"hash/adler32": {
    -		{"Checksum", Func, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    +		{"Checksum", Func, 0, "func(data []byte) uint32"},
    +		{"New", Func, 0, "func() hash.Hash32"},
    +		{"Size", Const, 0, ""},
     	},
     	"hash/crc32": {
    -		{"Castagnoli", Const, 0},
    -		{"Checksum", Func, 0},
    -		{"ChecksumIEEE", Func, 0},
    -		{"IEEE", Const, 0},
    -		{"IEEETable", Var, 0},
    -		{"Koopman", Const, 0},
    -		{"MakeTable", Func, 0},
    -		{"New", Func, 0},
    -		{"NewIEEE", Func, 0},
    -		{"Size", Const, 0},
    -		{"Table", Type, 0},
    -		{"Update", Func, 0},
    +		{"Castagnoli", Const, 0, ""},
    +		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"},
    +		{"ChecksumIEEE", Func, 0, "func(data []byte) uint32"},
    +		{"IEEE", Const, 0, ""},
    +		{"IEEETable", Var, 0, ""},
    +		{"Koopman", Const, 0, ""},
    +		{"MakeTable", Func, 0, "func(poly uint32) *Table"},
    +		{"New", Func, 0, "func(tab *Table) hash.Hash32"},
    +		{"NewIEEE", Func, 0, "func() hash.Hash32"},
    +		{"Size", Const, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"},
     	},
     	"hash/crc64": {
    -		{"Checksum", Func, 0},
    -		{"ECMA", Const, 0},
    -		{"ISO", Const, 0},
    -		{"MakeTable", Func, 0},
    -		{"New", Func, 0},
    -		{"Size", Const, 0},
    -		{"Table", Type, 0},
    -		{"Update", Func, 0},
    +		{"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"},
    +		{"ECMA", Const, 0, ""},
    +		{"ISO", Const, 0, ""},
    +		{"MakeTable", Func, 0, "func(poly uint64) *Table"},
    +		{"New", Func, 0, "func(tab *Table) hash.Hash64"},
    +		{"Size", Const, 0, ""},
    +		{"Table", Type, 0, ""},
    +		{"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"},
     	},
     	"hash/fnv": {
    -		{"New128", Func, 9},
    -		{"New128a", Func, 9},
    -		{"New32", Func, 0},
    -		{"New32a", Func, 0},
    -		{"New64", Func, 0},
    -		{"New64a", Func, 0},
    +		{"New128", Func, 9, "func() hash.Hash"},
    +		{"New128a", Func, 9, "func() hash.Hash"},
    +		{"New32", Func, 0, "func() hash.Hash32"},
    +		{"New32a", Func, 0, "func() hash.Hash32"},
    +		{"New64", Func, 0, "func() hash.Hash64"},
    +		{"New64a", Func, 0, "func() hash.Hash64"},
     	},
     	"hash/maphash": {
    -		{"(*Hash).BlockSize", Method, 14},
    -		{"(*Hash).Reset", Method, 14},
    -		{"(*Hash).Seed", Method, 14},
    -		{"(*Hash).SetSeed", Method, 14},
    -		{"(*Hash).Size", Method, 14},
    -		{"(*Hash).Sum", Method, 14},
    -		{"(*Hash).Sum64", Method, 14},
    -		{"(*Hash).Write", Method, 14},
    -		{"(*Hash).WriteByte", Method, 14},
    -		{"(*Hash).WriteString", Method, 14},
    -		{"Bytes", Func, 19},
    -		{"Comparable", Func, 24},
    -		{"Hash", Type, 14},
    -		{"MakeSeed", Func, 14},
    -		{"Seed", Type, 14},
    -		{"String", Func, 19},
    -		{"WriteComparable", Func, 24},
    +		{"(*Hash).BlockSize", Method, 14, ""},
    +		{"(*Hash).Clone", Method, 25, ""},
    +		{"(*Hash).Reset", Method, 14, ""},
    +		{"(*Hash).Seed", Method, 14, ""},
    +		{"(*Hash).SetSeed", Method, 14, ""},
    +		{"(*Hash).Size", Method, 14, ""},
    +		{"(*Hash).Sum", Method, 14, ""},
    +		{"(*Hash).Sum64", Method, 14, ""},
    +		{"(*Hash).Write", Method, 14, ""},
    +		{"(*Hash).WriteByte", Method, 14, ""},
    +		{"(*Hash).WriteString", Method, 14, ""},
    +		{"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"},
    +		{"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"},
    +		{"Hash", Type, 14, ""},
    +		{"MakeSeed", Func, 14, "func() Seed"},
    +		{"Seed", Type, 14, ""},
    +		{"String", Func, 19, "func(seed Seed, s string) uint64"},
    +		{"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"},
     	},
     	"html": {
    -		{"EscapeString", Func, 0},
    -		{"UnescapeString", Func, 0},
    +		{"EscapeString", Func, 0, "func(s string) string"},
    +		{"UnescapeString", Func, 0, "func(s string) string"},
     	},
     	"html/template": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Template).AddParseTree", Method, 0},
    -		{"(*Template).Clone", Method, 0},
    -		{"(*Template).DefinedTemplates", Method, 6},
    -		{"(*Template).Delims", Method, 0},
    -		{"(*Template).Execute", Method, 0},
    -		{"(*Template).ExecuteTemplate", Method, 0},
    -		{"(*Template).Funcs", Method, 0},
    -		{"(*Template).Lookup", Method, 0},
    -		{"(*Template).Name", Method, 0},
    -		{"(*Template).New", Method, 0},
    -		{"(*Template).Option", Method, 5},
    -		{"(*Template).Parse", Method, 0},
    -		{"(*Template).ParseFS", Method, 16},
    -		{"(*Template).ParseFiles", Method, 0},
    -		{"(*Template).ParseGlob", Method, 0},
    -		{"(*Template).Templates", Method, 0},
    -		{"CSS", Type, 0},
    -		{"ErrAmbigContext", Const, 0},
    -		{"ErrBadHTML", Const, 0},
    -		{"ErrBranchEnd", Const, 0},
    -		{"ErrEndContext", Const, 0},
    -		{"ErrJSTemplate", Const, 21},
    -		{"ErrNoSuchTemplate", Const, 0},
    -		{"ErrOutputContext", Const, 0},
    -		{"ErrPartialCharset", Const, 0},
    -		{"ErrPartialEscape", Const, 0},
    -		{"ErrPredefinedEscaper", Const, 9},
    -		{"ErrRangeLoopReentry", Const, 0},
    -		{"ErrSlashAmbig", Const, 0},
    -		{"Error", Type, 0},
    -		{"Error.Description", Field, 0},
    -		{"Error.ErrorCode", Field, 0},
    -		{"Error.Line", Field, 0},
    -		{"Error.Name", Field, 0},
    -		{"Error.Node", Field, 4},
    -		{"ErrorCode", Type, 0},
    -		{"FuncMap", Type, 0},
    -		{"HTML", Type, 0},
    -		{"HTMLAttr", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"HTMLEscapeString", Func, 0},
    -		{"HTMLEscaper", Func, 0},
    -		{"IsTrue", Func, 6},
    -		{"JS", Type, 0},
    -		{"JSEscape", Func, 0},
    -		{"JSEscapeString", Func, 0},
    -		{"JSEscaper", Func, 0},
    -		{"JSStr", Type, 0},
    -		{"Must", Func, 0},
    -		{"New", Func, 0},
    -		{"OK", Const, 0},
    -		{"ParseFS", Func, 16},
    -		{"ParseFiles", Func, 0},
    -		{"ParseGlob", Func, 0},
    -		{"Srcset", Type, 10},
    -		{"Template", Type, 0},
    -		{"Template.Tree", Field, 2},
    -		{"URL", Type, 0},
    -		{"URLQueryEscaper", Func, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Template).AddParseTree", Method, 0, ""},
    +		{"(*Template).Clone", Method, 0, ""},
    +		{"(*Template).DefinedTemplates", Method, 6, ""},
    +		{"(*Template).Delims", Method, 0, ""},
    +		{"(*Template).Execute", Method, 0, ""},
    +		{"(*Template).ExecuteTemplate", Method, 0, ""},
    +		{"(*Template).Funcs", Method, 0, ""},
    +		{"(*Template).Lookup", Method, 0, ""},
    +		{"(*Template).Name", Method, 0, ""},
    +		{"(*Template).New", Method, 0, ""},
    +		{"(*Template).Option", Method, 5, ""},
    +		{"(*Template).Parse", Method, 0, ""},
    +		{"(*Template).ParseFS", Method, 16, ""},
    +		{"(*Template).ParseFiles", Method, 0, ""},
    +		{"(*Template).ParseGlob", Method, 0, ""},
    +		{"(*Template).Templates", Method, 0, ""},
    +		{"CSS", Type, 0, ""},
    +		{"ErrAmbigContext", Const, 0, ""},
    +		{"ErrBadHTML", Const, 0, ""},
    +		{"ErrBranchEnd", Const, 0, ""},
    +		{"ErrEndContext", Const, 0, ""},
    +		{"ErrJSTemplate", Const, 21, ""},
    +		{"ErrNoSuchTemplate", Const, 0, ""},
    +		{"ErrOutputContext", Const, 0, ""},
    +		{"ErrPartialCharset", Const, 0, ""},
    +		{"ErrPartialEscape", Const, 0, ""},
    +		{"ErrPredefinedEscaper", Const, 9, ""},
    +		{"ErrRangeLoopReentry", Const, 0, ""},
    +		{"ErrSlashAmbig", Const, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Description", Field, 0, ""},
    +		{"Error.ErrorCode", Field, 0, ""},
    +		{"Error.Line", Field, 0, ""},
    +		{"Error.Name", Field, 0, ""},
    +		{"Error.Node", Field, 4, ""},
    +		{"ErrorCode", Type, 0, ""},
    +		{"FuncMap", Type, 0, ""},
    +		{"HTML", Type, 0, ""},
    +		{"HTMLAttr", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"HTMLEscapeString", Func, 0, "func(s string) string"},
    +		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
    +		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
    +		{"JS", Type, 0, ""},
    +		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"JSEscapeString", Func, 0, "func(s string) string"},
    +		{"JSEscaper", Func, 0, "func(args ...any) string"},
    +		{"JSStr", Type, 0, ""},
    +		{"Must", Func, 0, "func(t *Template, err error) *Template"},
    +		{"New", Func, 0, "func(name string) *Template"},
    +		{"OK", Const, 0, ""},
    +		{"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"},
    +		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
    +		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
    +		{"Srcset", Type, 10, ""},
    +		{"Template", Type, 0, ""},
    +		{"Template.Tree", Field, 2, ""},
    +		{"URL", Type, 0, ""},
    +		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
     	},
     	"image": {
    -		{"(*Alpha).AlphaAt", Method, 4},
    -		{"(*Alpha).At", Method, 0},
    -		{"(*Alpha).Bounds", Method, 0},
    -		{"(*Alpha).ColorModel", Method, 0},
    -		{"(*Alpha).Opaque", Method, 0},
    -		{"(*Alpha).PixOffset", Method, 0},
    -		{"(*Alpha).RGBA64At", Method, 17},
    -		{"(*Alpha).Set", Method, 0},
    -		{"(*Alpha).SetAlpha", Method, 0},
    -		{"(*Alpha).SetRGBA64", Method, 17},
    -		{"(*Alpha).SubImage", Method, 0},
    -		{"(*Alpha16).Alpha16At", Method, 4},
    -		{"(*Alpha16).At", Method, 0},
    -		{"(*Alpha16).Bounds", Method, 0},
    -		{"(*Alpha16).ColorModel", Method, 0},
    -		{"(*Alpha16).Opaque", Method, 0},
    -		{"(*Alpha16).PixOffset", Method, 0},
    -		{"(*Alpha16).RGBA64At", Method, 17},
    -		{"(*Alpha16).Set", Method, 0},
    -		{"(*Alpha16).SetAlpha16", Method, 0},
    -		{"(*Alpha16).SetRGBA64", Method, 17},
    -		{"(*Alpha16).SubImage", Method, 0},
    -		{"(*CMYK).At", Method, 5},
    -		{"(*CMYK).Bounds", Method, 5},
    -		{"(*CMYK).CMYKAt", Method, 5},
    -		{"(*CMYK).ColorModel", Method, 5},
    -		{"(*CMYK).Opaque", Method, 5},
    -		{"(*CMYK).PixOffset", Method, 5},
    -		{"(*CMYK).RGBA64At", Method, 17},
    -		{"(*CMYK).Set", Method, 5},
    -		{"(*CMYK).SetCMYK", Method, 5},
    -		{"(*CMYK).SetRGBA64", Method, 17},
    -		{"(*CMYK).SubImage", Method, 5},
    -		{"(*Gray).At", Method, 0},
    -		{"(*Gray).Bounds", Method, 0},
    -		{"(*Gray).ColorModel", Method, 0},
    -		{"(*Gray).GrayAt", Method, 4},
    -		{"(*Gray).Opaque", Method, 0},
    -		{"(*Gray).PixOffset", Method, 0},
    -		{"(*Gray).RGBA64At", Method, 17},
    -		{"(*Gray).Set", Method, 0},
    -		{"(*Gray).SetGray", Method, 0},
    -		{"(*Gray).SetRGBA64", Method, 17},
    -		{"(*Gray).SubImage", Method, 0},
    -		{"(*Gray16).At", Method, 0},
    -		{"(*Gray16).Bounds", Method, 0},
    -		{"(*Gray16).ColorModel", Method, 0},
    -		{"(*Gray16).Gray16At", Method, 4},
    -		{"(*Gray16).Opaque", Method, 0},
    -		{"(*Gray16).PixOffset", Method, 0},
    -		{"(*Gray16).RGBA64At", Method, 17},
    -		{"(*Gray16).Set", Method, 0},
    -		{"(*Gray16).SetGray16", Method, 0},
    -		{"(*Gray16).SetRGBA64", Method, 17},
    -		{"(*Gray16).SubImage", Method, 0},
    -		{"(*NRGBA).At", Method, 0},
    -		{"(*NRGBA).Bounds", Method, 0},
    -		{"(*NRGBA).ColorModel", Method, 0},
    -		{"(*NRGBA).NRGBAAt", Method, 4},
    -		{"(*NRGBA).Opaque", Method, 0},
    -		{"(*NRGBA).PixOffset", Method, 0},
    -		{"(*NRGBA).RGBA64At", Method, 17},
    -		{"(*NRGBA).Set", Method, 0},
    -		{"(*NRGBA).SetNRGBA", Method, 0},
    -		{"(*NRGBA).SetRGBA64", Method, 17},
    -		{"(*NRGBA).SubImage", Method, 0},
    -		{"(*NRGBA64).At", Method, 0},
    -		{"(*NRGBA64).Bounds", Method, 0},
    -		{"(*NRGBA64).ColorModel", Method, 0},
    -		{"(*NRGBA64).NRGBA64At", Method, 4},
    -		{"(*NRGBA64).Opaque", Method, 0},
    -		{"(*NRGBA64).PixOffset", Method, 0},
    -		{"(*NRGBA64).RGBA64At", Method, 17},
    -		{"(*NRGBA64).Set", Method, 0},
    -		{"(*NRGBA64).SetNRGBA64", Method, 0},
    -		{"(*NRGBA64).SetRGBA64", Method, 17},
    -		{"(*NRGBA64).SubImage", Method, 0},
    -		{"(*NYCbCrA).AOffset", Method, 6},
    -		{"(*NYCbCrA).At", Method, 6},
    -		{"(*NYCbCrA).Bounds", Method, 6},
    -		{"(*NYCbCrA).COffset", Method, 6},
    -		{"(*NYCbCrA).ColorModel", Method, 6},
    -		{"(*NYCbCrA).NYCbCrAAt", Method, 6},
    -		{"(*NYCbCrA).Opaque", Method, 6},
    -		{"(*NYCbCrA).RGBA64At", Method, 17},
    -		{"(*NYCbCrA).SubImage", Method, 6},
    -		{"(*NYCbCrA).YCbCrAt", Method, 6},
    -		{"(*NYCbCrA).YOffset", Method, 6},
    -		{"(*Paletted).At", Method, 0},
    -		{"(*Paletted).Bounds", Method, 0},
    -		{"(*Paletted).ColorIndexAt", Method, 0},
    -		{"(*Paletted).ColorModel", Method, 0},
    -		{"(*Paletted).Opaque", Method, 0},
    -		{"(*Paletted).PixOffset", Method, 0},
    -		{"(*Paletted).RGBA64At", Method, 17},
    -		{"(*Paletted).Set", Method, 0},
    -		{"(*Paletted).SetColorIndex", Method, 0},
    -		{"(*Paletted).SetRGBA64", Method, 17},
    -		{"(*Paletted).SubImage", Method, 0},
    -		{"(*RGBA).At", Method, 0},
    -		{"(*RGBA).Bounds", Method, 0},
    -		{"(*RGBA).ColorModel", Method, 0},
    -		{"(*RGBA).Opaque", Method, 0},
    -		{"(*RGBA).PixOffset", Method, 0},
    -		{"(*RGBA).RGBA64At", Method, 17},
    -		{"(*RGBA).RGBAAt", Method, 4},
    -		{"(*RGBA).Set", Method, 0},
    -		{"(*RGBA).SetRGBA", Method, 0},
    -		{"(*RGBA).SetRGBA64", Method, 17},
    -		{"(*RGBA).SubImage", Method, 0},
    -		{"(*RGBA64).At", Method, 0},
    -		{"(*RGBA64).Bounds", Method, 0},
    -		{"(*RGBA64).ColorModel", Method, 0},
    -		{"(*RGBA64).Opaque", Method, 0},
    -		{"(*RGBA64).PixOffset", Method, 0},
    -		{"(*RGBA64).RGBA64At", Method, 4},
    -		{"(*RGBA64).Set", Method, 0},
    -		{"(*RGBA64).SetRGBA64", Method, 0},
    -		{"(*RGBA64).SubImage", Method, 0},
    -		{"(*Uniform).At", Method, 0},
    -		{"(*Uniform).Bounds", Method, 0},
    -		{"(*Uniform).ColorModel", Method, 0},
    -		{"(*Uniform).Convert", Method, 0},
    -		{"(*Uniform).Opaque", Method, 0},
    -		{"(*Uniform).RGBA", Method, 0},
    -		{"(*Uniform).RGBA64At", Method, 17},
    -		{"(*YCbCr).At", Method, 0},
    -		{"(*YCbCr).Bounds", Method, 0},
    -		{"(*YCbCr).COffset", Method, 0},
    -		{"(*YCbCr).ColorModel", Method, 0},
    -		{"(*YCbCr).Opaque", Method, 0},
    -		{"(*YCbCr).RGBA64At", Method, 17},
    -		{"(*YCbCr).SubImage", Method, 0},
    -		{"(*YCbCr).YCbCrAt", Method, 4},
    -		{"(*YCbCr).YOffset", Method, 0},
    -		{"(Point).Add", Method, 0},
    -		{"(Point).Div", Method, 0},
    -		{"(Point).Eq", Method, 0},
    -		{"(Point).In", Method, 0},
    -		{"(Point).Mod", Method, 0},
    -		{"(Point).Mul", Method, 0},
    -		{"(Point).String", Method, 0},
    -		{"(Point).Sub", Method, 0},
    -		{"(Rectangle).Add", Method, 0},
    -		{"(Rectangle).At", Method, 5},
    -		{"(Rectangle).Bounds", Method, 5},
    -		{"(Rectangle).Canon", Method, 0},
    -		{"(Rectangle).ColorModel", Method, 5},
    -		{"(Rectangle).Dx", Method, 0},
    -		{"(Rectangle).Dy", Method, 0},
    -		{"(Rectangle).Empty", Method, 0},
    -		{"(Rectangle).Eq", Method, 0},
    -		{"(Rectangle).In", Method, 0},
    -		{"(Rectangle).Inset", Method, 0},
    -		{"(Rectangle).Intersect", Method, 0},
    -		{"(Rectangle).Overlaps", Method, 0},
    -		{"(Rectangle).RGBA64At", Method, 17},
    -		{"(Rectangle).Size", Method, 0},
    -		{"(Rectangle).String", Method, 0},
    -		{"(Rectangle).Sub", Method, 0},
    -		{"(Rectangle).Union", Method, 0},
    -		{"(YCbCrSubsampleRatio).String", Method, 0},
    -		{"Alpha", Type, 0},
    -		{"Alpha.Pix", Field, 0},
    -		{"Alpha.Rect", Field, 0},
    -		{"Alpha.Stride", Field, 0},
    -		{"Alpha16", Type, 0},
    -		{"Alpha16.Pix", Field, 0},
    -		{"Alpha16.Rect", Field, 0},
    -		{"Alpha16.Stride", Field, 0},
    -		{"Black", Var, 0},
    -		{"CMYK", Type, 5},
    -		{"CMYK.Pix", Field, 5},
    -		{"CMYK.Rect", Field, 5},
    -		{"CMYK.Stride", Field, 5},
    -		{"Config", Type, 0},
    -		{"Config.ColorModel", Field, 0},
    -		{"Config.Height", Field, 0},
    -		{"Config.Width", Field, 0},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"ErrFormat", Var, 0},
    -		{"Gray", Type, 0},
    -		{"Gray.Pix", Field, 0},
    -		{"Gray.Rect", Field, 0},
    -		{"Gray.Stride", Field, 0},
    -		{"Gray16", Type, 0},
    -		{"Gray16.Pix", Field, 0},
    -		{"Gray16.Rect", Field, 0},
    -		{"Gray16.Stride", Field, 0},
    -		{"Image", Type, 0},
    -		{"NRGBA", Type, 0},
    -		{"NRGBA.Pix", Field, 0},
    -		{"NRGBA.Rect", Field, 0},
    -		{"NRGBA.Stride", Field, 0},
    -		{"NRGBA64", Type, 0},
    -		{"NRGBA64.Pix", Field, 0},
    -		{"NRGBA64.Rect", Field, 0},
    -		{"NRGBA64.Stride", Field, 0},
    -		{"NYCbCrA", Type, 6},
    -		{"NYCbCrA.A", Field, 6},
    -		{"NYCbCrA.AStride", Field, 6},
    -		{"NYCbCrA.YCbCr", Field, 6},
    -		{"NewAlpha", Func, 0},
    -		{"NewAlpha16", Func, 0},
    -		{"NewCMYK", Func, 5},
    -		{"NewGray", Func, 0},
    -		{"NewGray16", Func, 0},
    -		{"NewNRGBA", Func, 0},
    -		{"NewNRGBA64", Func, 0},
    -		{"NewNYCbCrA", Func, 6},
    -		{"NewPaletted", Func, 0},
    -		{"NewRGBA", Func, 0},
    -		{"NewRGBA64", Func, 0},
    -		{"NewUniform", Func, 0},
    -		{"NewYCbCr", Func, 0},
    -		{"Opaque", Var, 0},
    -		{"Paletted", Type, 0},
    -		{"Paletted.Palette", Field, 0},
    -		{"Paletted.Pix", Field, 0},
    -		{"Paletted.Rect", Field, 0},
    -		{"Paletted.Stride", Field, 0},
    -		{"PalettedImage", Type, 0},
    -		{"Point", Type, 0},
    -		{"Point.X", Field, 0},
    -		{"Point.Y", Field, 0},
    -		{"Pt", Func, 0},
    -		{"RGBA", Type, 0},
    -		{"RGBA.Pix", Field, 0},
    -		{"RGBA.Rect", Field, 0},
    -		{"RGBA.Stride", Field, 0},
    -		{"RGBA64", Type, 0},
    -		{"RGBA64.Pix", Field, 0},
    -		{"RGBA64.Rect", Field, 0},
    -		{"RGBA64.Stride", Field, 0},
    -		{"RGBA64Image", Type, 17},
    -		{"Rect", Func, 0},
    -		{"Rectangle", Type, 0},
    -		{"Rectangle.Max", Field, 0},
    -		{"Rectangle.Min", Field, 0},
    -		{"RegisterFormat", Func, 0},
    -		{"Transparent", Var, 0},
    -		{"Uniform", Type, 0},
    -		{"Uniform.C", Field, 0},
    -		{"White", Var, 0},
    -		{"YCbCr", Type, 0},
    -		{"YCbCr.CStride", Field, 0},
    -		{"YCbCr.Cb", Field, 0},
    -		{"YCbCr.Cr", Field, 0},
    -		{"YCbCr.Rect", Field, 0},
    -		{"YCbCr.SubsampleRatio", Field, 0},
    -		{"YCbCr.Y", Field, 0},
    -		{"YCbCr.YStride", Field, 0},
    -		{"YCbCrSubsampleRatio", Type, 0},
    -		{"YCbCrSubsampleRatio410", Const, 5},
    -		{"YCbCrSubsampleRatio411", Const, 5},
    -		{"YCbCrSubsampleRatio420", Const, 0},
    -		{"YCbCrSubsampleRatio422", Const, 0},
    -		{"YCbCrSubsampleRatio440", Const, 1},
    -		{"YCbCrSubsampleRatio444", Const, 0},
    -		{"ZP", Var, 0},
    -		{"ZR", Var, 0},
    +		{"(*Alpha).AlphaAt", Method, 4, ""},
    +		{"(*Alpha).At", Method, 0, ""},
    +		{"(*Alpha).Bounds", Method, 0, ""},
    +		{"(*Alpha).ColorModel", Method, 0, ""},
    +		{"(*Alpha).Opaque", Method, 0, ""},
    +		{"(*Alpha).PixOffset", Method, 0, ""},
    +		{"(*Alpha).RGBA64At", Method, 17, ""},
    +		{"(*Alpha).Set", Method, 0, ""},
    +		{"(*Alpha).SetAlpha", Method, 0, ""},
    +		{"(*Alpha).SetRGBA64", Method, 17, ""},
    +		{"(*Alpha).SubImage", Method, 0, ""},
    +		{"(*Alpha16).Alpha16At", Method, 4, ""},
    +		{"(*Alpha16).At", Method, 0, ""},
    +		{"(*Alpha16).Bounds", Method, 0, ""},
    +		{"(*Alpha16).ColorModel", Method, 0, ""},
    +		{"(*Alpha16).Opaque", Method, 0, ""},
    +		{"(*Alpha16).PixOffset", Method, 0, ""},
    +		{"(*Alpha16).RGBA64At", Method, 17, ""},
    +		{"(*Alpha16).Set", Method, 0, ""},
    +		{"(*Alpha16).SetAlpha16", Method, 0, ""},
    +		{"(*Alpha16).SetRGBA64", Method, 17, ""},
    +		{"(*Alpha16).SubImage", Method, 0, ""},
    +		{"(*CMYK).At", Method, 5, ""},
    +		{"(*CMYK).Bounds", Method, 5, ""},
    +		{"(*CMYK).CMYKAt", Method, 5, ""},
    +		{"(*CMYK).ColorModel", Method, 5, ""},
    +		{"(*CMYK).Opaque", Method, 5, ""},
    +		{"(*CMYK).PixOffset", Method, 5, ""},
    +		{"(*CMYK).RGBA64At", Method, 17, ""},
    +		{"(*CMYK).Set", Method, 5, ""},
    +		{"(*CMYK).SetCMYK", Method, 5, ""},
    +		{"(*CMYK).SetRGBA64", Method, 17, ""},
    +		{"(*CMYK).SubImage", Method, 5, ""},
    +		{"(*Gray).At", Method, 0, ""},
    +		{"(*Gray).Bounds", Method, 0, ""},
    +		{"(*Gray).ColorModel", Method, 0, ""},
    +		{"(*Gray).GrayAt", Method, 4, ""},
    +		{"(*Gray).Opaque", Method, 0, ""},
    +		{"(*Gray).PixOffset", Method, 0, ""},
    +		{"(*Gray).RGBA64At", Method, 17, ""},
    +		{"(*Gray).Set", Method, 0, ""},
    +		{"(*Gray).SetGray", Method, 0, ""},
    +		{"(*Gray).SetRGBA64", Method, 17, ""},
    +		{"(*Gray).SubImage", Method, 0, ""},
    +		{"(*Gray16).At", Method, 0, ""},
    +		{"(*Gray16).Bounds", Method, 0, ""},
    +		{"(*Gray16).ColorModel", Method, 0, ""},
    +		{"(*Gray16).Gray16At", Method, 4, ""},
    +		{"(*Gray16).Opaque", Method, 0, ""},
    +		{"(*Gray16).PixOffset", Method, 0, ""},
    +		{"(*Gray16).RGBA64At", Method, 17, ""},
    +		{"(*Gray16).Set", Method, 0, ""},
    +		{"(*Gray16).SetGray16", Method, 0, ""},
    +		{"(*Gray16).SetRGBA64", Method, 17, ""},
    +		{"(*Gray16).SubImage", Method, 0, ""},
    +		{"(*NRGBA).At", Method, 0, ""},
    +		{"(*NRGBA).Bounds", Method, 0, ""},
    +		{"(*NRGBA).ColorModel", Method, 0, ""},
    +		{"(*NRGBA).NRGBAAt", Method, 4, ""},
    +		{"(*NRGBA).Opaque", Method, 0, ""},
    +		{"(*NRGBA).PixOffset", Method, 0, ""},
    +		{"(*NRGBA).RGBA64At", Method, 17, ""},
    +		{"(*NRGBA).Set", Method, 0, ""},
    +		{"(*NRGBA).SetNRGBA", Method, 0, ""},
    +		{"(*NRGBA).SetRGBA64", Method, 17, ""},
    +		{"(*NRGBA).SubImage", Method, 0, ""},
    +		{"(*NRGBA64).At", Method, 0, ""},
    +		{"(*NRGBA64).Bounds", Method, 0, ""},
    +		{"(*NRGBA64).ColorModel", Method, 0, ""},
    +		{"(*NRGBA64).NRGBA64At", Method, 4, ""},
    +		{"(*NRGBA64).Opaque", Method, 0, ""},
    +		{"(*NRGBA64).PixOffset", Method, 0, ""},
    +		{"(*NRGBA64).RGBA64At", Method, 17, ""},
    +		{"(*NRGBA64).Set", Method, 0, ""},
    +		{"(*NRGBA64).SetNRGBA64", Method, 0, ""},
    +		{"(*NRGBA64).SetRGBA64", Method, 17, ""},
    +		{"(*NRGBA64).SubImage", Method, 0, ""},
    +		{"(*NYCbCrA).AOffset", Method, 6, ""},
    +		{"(*NYCbCrA).At", Method, 6, ""},
    +		{"(*NYCbCrA).Bounds", Method, 6, ""},
    +		{"(*NYCbCrA).COffset", Method, 6, ""},
    +		{"(*NYCbCrA).ColorModel", Method, 6, ""},
    +		{"(*NYCbCrA).NYCbCrAAt", Method, 6, ""},
    +		{"(*NYCbCrA).Opaque", Method, 6, ""},
    +		{"(*NYCbCrA).RGBA64At", Method, 17, ""},
    +		{"(*NYCbCrA).SubImage", Method, 6, ""},
    +		{"(*NYCbCrA).YCbCrAt", Method, 6, ""},
    +		{"(*NYCbCrA).YOffset", Method, 6, ""},
    +		{"(*Paletted).At", Method, 0, ""},
    +		{"(*Paletted).Bounds", Method, 0, ""},
    +		{"(*Paletted).ColorIndexAt", Method, 0, ""},
    +		{"(*Paletted).ColorModel", Method, 0, ""},
    +		{"(*Paletted).Opaque", Method, 0, ""},
    +		{"(*Paletted).PixOffset", Method, 0, ""},
    +		{"(*Paletted).RGBA64At", Method, 17, ""},
    +		{"(*Paletted).Set", Method, 0, ""},
    +		{"(*Paletted).SetColorIndex", Method, 0, ""},
    +		{"(*Paletted).SetRGBA64", Method, 17, ""},
    +		{"(*Paletted).SubImage", Method, 0, ""},
    +		{"(*RGBA).At", Method, 0, ""},
    +		{"(*RGBA).Bounds", Method, 0, ""},
    +		{"(*RGBA).ColorModel", Method, 0, ""},
    +		{"(*RGBA).Opaque", Method, 0, ""},
    +		{"(*RGBA).PixOffset", Method, 0, ""},
    +		{"(*RGBA).RGBA64At", Method, 17, ""},
    +		{"(*RGBA).RGBAAt", Method, 4, ""},
    +		{"(*RGBA).Set", Method, 0, ""},
    +		{"(*RGBA).SetRGBA", Method, 0, ""},
    +		{"(*RGBA).SetRGBA64", Method, 17, ""},
    +		{"(*RGBA).SubImage", Method, 0, ""},
    +		{"(*RGBA64).At", Method, 0, ""},
    +		{"(*RGBA64).Bounds", Method, 0, ""},
    +		{"(*RGBA64).ColorModel", Method, 0, ""},
    +		{"(*RGBA64).Opaque", Method, 0, ""},
    +		{"(*RGBA64).PixOffset", Method, 0, ""},
    +		{"(*RGBA64).RGBA64At", Method, 4, ""},
    +		{"(*RGBA64).Set", Method, 0, ""},
    +		{"(*RGBA64).SetRGBA64", Method, 0, ""},
    +		{"(*RGBA64).SubImage", Method, 0, ""},
    +		{"(*Uniform).At", Method, 0, ""},
    +		{"(*Uniform).Bounds", Method, 0, ""},
    +		{"(*Uniform).ColorModel", Method, 0, ""},
    +		{"(*Uniform).Convert", Method, 0, ""},
    +		{"(*Uniform).Opaque", Method, 0, ""},
    +		{"(*Uniform).RGBA", Method, 0, ""},
    +		{"(*Uniform).RGBA64At", Method, 17, ""},
    +		{"(*YCbCr).At", Method, 0, ""},
    +		{"(*YCbCr).Bounds", Method, 0, ""},
    +		{"(*YCbCr).COffset", Method, 0, ""},
    +		{"(*YCbCr).ColorModel", Method, 0, ""},
    +		{"(*YCbCr).Opaque", Method, 0, ""},
    +		{"(*YCbCr).RGBA64At", Method, 17, ""},
    +		{"(*YCbCr).SubImage", Method, 0, ""},
    +		{"(*YCbCr).YCbCrAt", Method, 4, ""},
    +		{"(*YCbCr).YOffset", Method, 0, ""},
    +		{"(Point).Add", Method, 0, ""},
    +		{"(Point).Div", Method, 0, ""},
    +		{"(Point).Eq", Method, 0, ""},
    +		{"(Point).In", Method, 0, ""},
    +		{"(Point).Mod", Method, 0, ""},
    +		{"(Point).Mul", Method, 0, ""},
    +		{"(Point).String", Method, 0, ""},
    +		{"(Point).Sub", Method, 0, ""},
    +		{"(Rectangle).Add", Method, 0, ""},
    +		{"(Rectangle).At", Method, 5, ""},
    +		{"(Rectangle).Bounds", Method, 5, ""},
    +		{"(Rectangle).Canon", Method, 0, ""},
    +		{"(Rectangle).ColorModel", Method, 5, ""},
    +		{"(Rectangle).Dx", Method, 0, ""},
    +		{"(Rectangle).Dy", Method, 0, ""},
    +		{"(Rectangle).Empty", Method, 0, ""},
    +		{"(Rectangle).Eq", Method, 0, ""},
    +		{"(Rectangle).In", Method, 0, ""},
    +		{"(Rectangle).Inset", Method, 0, ""},
    +		{"(Rectangle).Intersect", Method, 0, ""},
    +		{"(Rectangle).Overlaps", Method, 0, ""},
    +		{"(Rectangle).RGBA64At", Method, 17, ""},
    +		{"(Rectangle).Size", Method, 0, ""},
    +		{"(Rectangle).String", Method, 0, ""},
    +		{"(Rectangle).Sub", Method, 0, ""},
    +		{"(Rectangle).Union", Method, 0, ""},
    +		{"(YCbCrSubsampleRatio).String", Method, 0, ""},
    +		{"Alpha", Type, 0, ""},
    +		{"Alpha.Pix", Field, 0, ""},
    +		{"Alpha.Rect", Field, 0, ""},
    +		{"Alpha.Stride", Field, 0, ""},
    +		{"Alpha16", Type, 0, ""},
    +		{"Alpha16.Pix", Field, 0, ""},
    +		{"Alpha16.Rect", Field, 0, ""},
    +		{"Alpha16.Stride", Field, 0, ""},
    +		{"Black", Var, 0, ""},
    +		{"CMYK", Type, 5, ""},
    +		{"CMYK.Pix", Field, 5, ""},
    +		{"CMYK.Rect", Field, 5, ""},
    +		{"CMYK.Stride", Field, 5, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.ColorModel", Field, 0, ""},
    +		{"Config.Height", Field, 0, ""},
    +		{"Config.Width", Field, 0, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"},
    +		{"ErrFormat", Var, 0, ""},
    +		{"Gray", Type, 0, ""},
    +		{"Gray.Pix", Field, 0, ""},
    +		{"Gray.Rect", Field, 0, ""},
    +		{"Gray.Stride", Field, 0, ""},
    +		{"Gray16", Type, 0, ""},
    +		{"Gray16.Pix", Field, 0, ""},
    +		{"Gray16.Rect", Field, 0, ""},
    +		{"Gray16.Stride", Field, 0, ""},
    +		{"Image", Type, 0, ""},
    +		{"NRGBA", Type, 0, ""},
    +		{"NRGBA.Pix", Field, 0, ""},
    +		{"NRGBA.Rect", Field, 0, ""},
    +		{"NRGBA.Stride", Field, 0, ""},
    +		{"NRGBA64", Type, 0, ""},
    +		{"NRGBA64.Pix", Field, 0, ""},
    +		{"NRGBA64.Rect", Field, 0, ""},
    +		{"NRGBA64.Stride", Field, 0, ""},
    +		{"NYCbCrA", Type, 6, ""},
    +		{"NYCbCrA.A", Field, 6, ""},
    +		{"NYCbCrA.AStride", Field, 6, ""},
    +		{"NYCbCrA.YCbCr", Field, 6, ""},
    +		{"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"},
    +		{"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"},
    +		{"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"},
    +		{"NewGray", Func, 0, "func(r Rectangle) *Gray"},
    +		{"NewGray16", Func, 0, "func(r Rectangle) *Gray16"},
    +		{"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"},
    +		{"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"},
    +		{"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"},
    +		{"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"},
    +		{"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"},
    +		{"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"},
    +		{"NewUniform", Func, 0, "func(c color.Color) *Uniform"},
    +		{"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"},
    +		{"Opaque", Var, 0, ""},
    +		{"Paletted", Type, 0, ""},
    +		{"Paletted.Palette", Field, 0, ""},
    +		{"Paletted.Pix", Field, 0, ""},
    +		{"Paletted.Rect", Field, 0, ""},
    +		{"Paletted.Stride", Field, 0, ""},
    +		{"PalettedImage", Type, 0, ""},
    +		{"Point", Type, 0, ""},
    +		{"Point.X", Field, 0, ""},
    +		{"Point.Y", Field, 0, ""},
    +		{"Pt", Func, 0, "func(X int, Y int) Point"},
    +		{"RGBA", Type, 0, ""},
    +		{"RGBA.Pix", Field, 0, ""},
    +		{"RGBA.Rect", Field, 0, ""},
    +		{"RGBA.Stride", Field, 0, ""},
    +		{"RGBA64", Type, 0, ""},
    +		{"RGBA64.Pix", Field, 0, ""},
    +		{"RGBA64.Rect", Field, 0, ""},
    +		{"RGBA64.Stride", Field, 0, ""},
    +		{"RGBA64Image", Type, 17, ""},
    +		{"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"},
    +		{"Rectangle", Type, 0, ""},
    +		{"Rectangle.Max", Field, 0, ""},
    +		{"Rectangle.Min", Field, 0, ""},
    +		{"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"},
    +		{"Transparent", Var, 0, ""},
    +		{"Uniform", Type, 0, ""},
    +		{"Uniform.C", Field, 0, ""},
    +		{"White", Var, 0, ""},
    +		{"YCbCr", Type, 0, ""},
    +		{"YCbCr.CStride", Field, 0, ""},
    +		{"YCbCr.Cb", Field, 0, ""},
    +		{"YCbCr.Cr", Field, 0, ""},
    +		{"YCbCr.Rect", Field, 0, ""},
    +		{"YCbCr.SubsampleRatio", Field, 0, ""},
    +		{"YCbCr.Y", Field, 0, ""},
    +		{"YCbCr.YStride", Field, 0, ""},
    +		{"YCbCrSubsampleRatio", Type, 0, ""},
    +		{"YCbCrSubsampleRatio410", Const, 5, ""},
    +		{"YCbCrSubsampleRatio411", Const, 5, ""},
    +		{"YCbCrSubsampleRatio420", Const, 0, ""},
    +		{"YCbCrSubsampleRatio422", Const, 0, ""},
    +		{"YCbCrSubsampleRatio440", Const, 1, ""},
    +		{"YCbCrSubsampleRatio444", Const, 0, ""},
    +		{"ZP", Var, 0, ""},
    +		{"ZR", Var, 0, ""},
     	},
     	"image/color": {
    -		{"(Alpha).RGBA", Method, 0},
    -		{"(Alpha16).RGBA", Method, 0},
    -		{"(CMYK).RGBA", Method, 5},
    -		{"(Gray).RGBA", Method, 0},
    -		{"(Gray16).RGBA", Method, 0},
    -		{"(NRGBA).RGBA", Method, 0},
    -		{"(NRGBA64).RGBA", Method, 0},
    -		{"(NYCbCrA).RGBA", Method, 6},
    -		{"(Palette).Convert", Method, 0},
    -		{"(Palette).Index", Method, 0},
    -		{"(RGBA).RGBA", Method, 0},
    -		{"(RGBA64).RGBA", Method, 0},
    -		{"(YCbCr).RGBA", Method, 0},
    -		{"Alpha", Type, 0},
    -		{"Alpha.A", Field, 0},
    -		{"Alpha16", Type, 0},
    -		{"Alpha16.A", Field, 0},
    -		{"Alpha16Model", Var, 0},
    -		{"AlphaModel", Var, 0},
    -		{"Black", Var, 0},
    -		{"CMYK", Type, 5},
    -		{"CMYK.C", Field, 5},
    -		{"CMYK.K", Field, 5},
    -		{"CMYK.M", Field, 5},
    -		{"CMYK.Y", Field, 5},
    -		{"CMYKModel", Var, 5},
    -		{"CMYKToRGB", Func, 5},
    -		{"Color", Type, 0},
    -		{"Gray", Type, 0},
    -		{"Gray.Y", Field, 0},
    -		{"Gray16", Type, 0},
    -		{"Gray16.Y", Field, 0},
    -		{"Gray16Model", Var, 0},
    -		{"GrayModel", Var, 0},
    -		{"Model", Type, 0},
    -		{"ModelFunc", Func, 0},
    -		{"NRGBA", Type, 0},
    -		{"NRGBA.A", Field, 0},
    -		{"NRGBA.B", Field, 0},
    -		{"NRGBA.G", Field, 0},
    -		{"NRGBA.R", Field, 0},
    -		{"NRGBA64", Type, 0},
    -		{"NRGBA64.A", Field, 0},
    -		{"NRGBA64.B", Field, 0},
    -		{"NRGBA64.G", Field, 0},
    -		{"NRGBA64.R", Field, 0},
    -		{"NRGBA64Model", Var, 0},
    -		{"NRGBAModel", Var, 0},
    -		{"NYCbCrA", Type, 6},
    -		{"NYCbCrA.A", Field, 6},
    -		{"NYCbCrA.YCbCr", Field, 6},
    -		{"NYCbCrAModel", Var, 6},
    -		{"Opaque", Var, 0},
    -		{"Palette", Type, 0},
    -		{"RGBA", Type, 0},
    -		{"RGBA.A", Field, 0},
    -		{"RGBA.B", Field, 0},
    -		{"RGBA.G", Field, 0},
    -		{"RGBA.R", Field, 0},
    -		{"RGBA64", Type, 0},
    -		{"RGBA64.A", Field, 0},
    -		{"RGBA64.B", Field, 0},
    -		{"RGBA64.G", Field, 0},
    -		{"RGBA64.R", Field, 0},
    -		{"RGBA64Model", Var, 0},
    -		{"RGBAModel", Var, 0},
    -		{"RGBToCMYK", Func, 5},
    -		{"RGBToYCbCr", Func, 0},
    -		{"Transparent", Var, 0},
    -		{"White", Var, 0},
    -		{"YCbCr", Type, 0},
    -		{"YCbCr.Cb", Field, 0},
    -		{"YCbCr.Cr", Field, 0},
    -		{"YCbCr.Y", Field, 0},
    -		{"YCbCrModel", Var, 0},
    -		{"YCbCrToRGB", Func, 0},
    +		{"(Alpha).RGBA", Method, 0, ""},
    +		{"(Alpha16).RGBA", Method, 0, ""},
    +		{"(CMYK).RGBA", Method, 5, ""},
    +		{"(Gray).RGBA", Method, 0, ""},
    +		{"(Gray16).RGBA", Method, 0, ""},
    +		{"(NRGBA).RGBA", Method, 0, ""},
    +		{"(NRGBA64).RGBA", Method, 0, ""},
    +		{"(NYCbCrA).RGBA", Method, 6, ""},
    +		{"(Palette).Convert", Method, 0, ""},
    +		{"(Palette).Index", Method, 0, ""},
    +		{"(RGBA).RGBA", Method, 0, ""},
    +		{"(RGBA64).RGBA", Method, 0, ""},
    +		{"(YCbCr).RGBA", Method, 0, ""},
    +		{"Alpha", Type, 0, ""},
    +		{"Alpha.A", Field, 0, ""},
    +		{"Alpha16", Type, 0, ""},
    +		{"Alpha16.A", Field, 0, ""},
    +		{"Alpha16Model", Var, 0, ""},
    +		{"AlphaModel", Var, 0, ""},
    +		{"Black", Var, 0, ""},
    +		{"CMYK", Type, 5, ""},
    +		{"CMYK.C", Field, 5, ""},
    +		{"CMYK.K", Field, 5, ""},
    +		{"CMYK.M", Field, 5, ""},
    +		{"CMYK.Y", Field, 5, ""},
    +		{"CMYKModel", Var, 5, ""},
    +		{"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"},
    +		{"Color", Type, 0, ""},
    +		{"Gray", Type, 0, ""},
    +		{"Gray.Y", Field, 0, ""},
    +		{"Gray16", Type, 0, ""},
    +		{"Gray16.Y", Field, 0, ""},
    +		{"Gray16Model", Var, 0, ""},
    +		{"GrayModel", Var, 0, ""},
    +		{"Model", Type, 0, ""},
    +		{"ModelFunc", Func, 0, "func(f func(Color) Color) Model"},
    +		{"NRGBA", Type, 0, ""},
    +		{"NRGBA.A", Field, 0, ""},
    +		{"NRGBA.B", Field, 0, ""},
    +		{"NRGBA.G", Field, 0, ""},
    +		{"NRGBA.R", Field, 0, ""},
    +		{"NRGBA64", Type, 0, ""},
    +		{"NRGBA64.A", Field, 0, ""},
    +		{"NRGBA64.B", Field, 0, ""},
    +		{"NRGBA64.G", Field, 0, ""},
    +		{"NRGBA64.R", Field, 0, ""},
    +		{"NRGBA64Model", Var, 0, ""},
    +		{"NRGBAModel", Var, 0, ""},
    +		{"NYCbCrA", Type, 6, ""},
    +		{"NYCbCrA.A", Field, 6, ""},
    +		{"NYCbCrA.YCbCr", Field, 6, ""},
    +		{"NYCbCrAModel", Var, 6, ""},
    +		{"Opaque", Var, 0, ""},
    +		{"Palette", Type, 0, ""},
    +		{"RGBA", Type, 0, ""},
    +		{"RGBA.A", Field, 0, ""},
    +		{"RGBA.B", Field, 0, ""},
    +		{"RGBA.G", Field, 0, ""},
    +		{"RGBA.R", Field, 0, ""},
    +		{"RGBA64", Type, 0, ""},
    +		{"RGBA64.A", Field, 0, ""},
    +		{"RGBA64.B", Field, 0, ""},
    +		{"RGBA64.G", Field, 0, ""},
    +		{"RGBA64.R", Field, 0, ""},
    +		{"RGBA64Model", Var, 0, ""},
    +		{"RGBAModel", Var, 0, ""},
    +		{"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"},
    +		{"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"},
    +		{"Transparent", Var, 0, ""},
    +		{"White", Var, 0, ""},
    +		{"YCbCr", Type, 0, ""},
    +		{"YCbCr.Cb", Field, 0, ""},
    +		{"YCbCr.Cr", Field, 0, ""},
    +		{"YCbCr.Y", Field, 0, ""},
    +		{"YCbCrModel", Var, 0, ""},
    +		{"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"},
     	},
     	"image/color/palette": {
    -		{"Plan9", Var, 2},
    -		{"WebSafe", Var, 2},
    +		{"Plan9", Var, 2, ""},
    +		{"WebSafe", Var, 2, ""},
     	},
     	"image/draw": {
    -		{"(Op).Draw", Method, 2},
    -		{"Draw", Func, 0},
    -		{"DrawMask", Func, 0},
    -		{"Drawer", Type, 2},
    -		{"FloydSteinberg", Var, 2},
    -		{"Image", Type, 0},
    -		{"Op", Type, 0},
    -		{"Over", Const, 0},
    -		{"Quantizer", Type, 2},
    -		{"RGBA64Image", Type, 17},
    -		{"Src", Const, 0},
    +		{"(Op).Draw", Method, 2, ""},
    +		{"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
    +		{"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
    +		{"Drawer", Type, 2, ""},
    +		{"FloydSteinberg", Var, 2, ""},
    +		{"Image", Type, 0, ""},
    +		{"Op", Type, 0, ""},
    +		{"Over", Const, 0, ""},
    +		{"Quantizer", Type, 2, ""},
    +		{"RGBA64Image", Type, 17, ""},
    +		{"Src", Const, 0, ""},
     	},
     	"image/gif": {
    -		{"Decode", Func, 0},
    -		{"DecodeAll", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DisposalBackground", Const, 5},
    -		{"DisposalNone", Const, 5},
    -		{"DisposalPrevious", Const, 5},
    -		{"Encode", Func, 2},
    -		{"EncodeAll", Func, 2},
    -		{"GIF", Type, 0},
    -		{"GIF.BackgroundIndex", Field, 5},
    -		{"GIF.Config", Field, 5},
    -		{"GIF.Delay", Field, 0},
    -		{"GIF.Disposal", Field, 5},
    -		{"GIF.Image", Field, 0},
    -		{"GIF.LoopCount", Field, 0},
    -		{"Options", Type, 2},
    -		{"Options.Drawer", Field, 2},
    -		{"Options.NumColors", Field, 2},
    -		{"Options.Quantizer", Field, 2},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DisposalBackground", Const, 5, ""},
    +		{"DisposalNone", Const, 5, ""},
    +		{"DisposalPrevious", Const, 5, ""},
    +		{"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"},
    +		{"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"},
    +		{"GIF", Type, 0, ""},
    +		{"GIF.BackgroundIndex", Field, 5, ""},
    +		{"GIF.Config", Field, 5, ""},
    +		{"GIF.Delay", Field, 0, ""},
    +		{"GIF.Disposal", Field, 5, ""},
    +		{"GIF.Image", Field, 0, ""},
    +		{"GIF.LoopCount", Field, 0, ""},
    +		{"Options", Type, 2, ""},
    +		{"Options.Drawer", Field, 2, ""},
    +		{"Options.NumColors", Field, 2, ""},
    +		{"Options.Quantizer", Field, 2, ""},
     	},
     	"image/jpeg": {
    -		{"(FormatError).Error", Method, 0},
    -		{"(UnsupportedError).Error", Method, 0},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DefaultQuality", Const, 0},
    -		{"Encode", Func, 0},
    -		{"FormatError", Type, 0},
    -		{"Options", Type, 0},
    -		{"Options.Quality", Field, 0},
    -		{"Reader", Type, 0},
    -		{"UnsupportedError", Type, 0},
    +		{"(FormatError).Error", Method, 0, ""},
    +		{"(UnsupportedError).Error", Method, 0, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DefaultQuality", Const, 0, ""},
    +		{"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"},
    +		{"FormatError", Type, 0, ""},
    +		{"Options", Type, 0, ""},
    +		{"Options.Quality", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"UnsupportedError", Type, 0, ""},
     	},
     	"image/png": {
    -		{"(*Encoder).Encode", Method, 4},
    -		{"(FormatError).Error", Method, 0},
    -		{"(UnsupportedError).Error", Method, 0},
    -		{"BestCompression", Const, 4},
    -		{"BestSpeed", Const, 4},
    -		{"CompressionLevel", Type, 4},
    -		{"Decode", Func, 0},
    -		{"DecodeConfig", Func, 0},
    -		{"DefaultCompression", Const, 4},
    -		{"Encode", Func, 0},
    -		{"Encoder", Type, 4},
    -		{"Encoder.BufferPool", Field, 9},
    -		{"Encoder.CompressionLevel", Field, 4},
    -		{"EncoderBuffer", Type, 9},
    -		{"EncoderBufferPool", Type, 9},
    -		{"FormatError", Type, 0},
    -		{"NoCompression", Const, 4},
    -		{"UnsupportedError", Type, 0},
    +		{"(*Encoder).Encode", Method, 4, ""},
    +		{"(FormatError).Error", Method, 0, ""},
    +		{"(UnsupportedError).Error", Method, 0, ""},
    +		{"BestCompression", Const, 4, ""},
    +		{"BestSpeed", Const, 4, ""},
    +		{"CompressionLevel", Type, 4, ""},
    +		{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
    +		{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
    +		{"DefaultCompression", Const, 4, ""},
    +		{"Encode", Func, 0, "func(w io.Writer, m image.Image) error"},
    +		{"Encoder", Type, 4, ""},
    +		{"Encoder.BufferPool", Field, 9, ""},
    +		{"Encoder.CompressionLevel", Field, 4, ""},
    +		{"EncoderBuffer", Type, 9, ""},
    +		{"EncoderBufferPool", Type, 9, ""},
    +		{"FormatError", Type, 0, ""},
    +		{"NoCompression", Const, 4, ""},
    +		{"UnsupportedError", Type, 0, ""},
     	},
     	"index/suffixarray": {
    -		{"(*Index).Bytes", Method, 0},
    -		{"(*Index).FindAllIndex", Method, 0},
    -		{"(*Index).Lookup", Method, 0},
    -		{"(*Index).Read", Method, 0},
    -		{"(*Index).Write", Method, 0},
    -		{"Index", Type, 0},
    -		{"New", Func, 0},
    +		{"(*Index).Bytes", Method, 0, ""},
    +		{"(*Index).FindAllIndex", Method, 0, ""},
    +		{"(*Index).Lookup", Method, 0, ""},
    +		{"(*Index).Read", Method, 0, ""},
    +		{"(*Index).Write", Method, 0, ""},
    +		{"Index", Type, 0, ""},
    +		{"New", Func, 0, "func(data []byte) *Index"},
     	},
     	"io": {
    -		{"(*LimitedReader).Read", Method, 0},
    -		{"(*OffsetWriter).Seek", Method, 20},
    -		{"(*OffsetWriter).Write", Method, 20},
    -		{"(*OffsetWriter).WriteAt", Method, 20},
    -		{"(*PipeReader).Close", Method, 0},
    -		{"(*PipeReader).CloseWithError", Method, 0},
    -		{"(*PipeReader).Read", Method, 0},
    -		{"(*PipeWriter).Close", Method, 0},
    -		{"(*PipeWriter).CloseWithError", Method, 0},
    -		{"(*PipeWriter).Write", Method, 0},
    -		{"(*SectionReader).Outer", Method, 22},
    -		{"(*SectionReader).Read", Method, 0},
    -		{"(*SectionReader).ReadAt", Method, 0},
    -		{"(*SectionReader).Seek", Method, 0},
    -		{"(*SectionReader).Size", Method, 0},
    -		{"ByteReader", Type, 0},
    -		{"ByteScanner", Type, 0},
    -		{"ByteWriter", Type, 1},
    -		{"Closer", Type, 0},
    -		{"Copy", Func, 0},
    -		{"CopyBuffer", Func, 5},
    -		{"CopyN", Func, 0},
    -		{"Discard", Var, 16},
    -		{"EOF", Var, 0},
    -		{"ErrClosedPipe", Var, 0},
    -		{"ErrNoProgress", Var, 1},
    -		{"ErrShortBuffer", Var, 0},
    -		{"ErrShortWrite", Var, 0},
    -		{"ErrUnexpectedEOF", Var, 0},
    -		{"LimitReader", Func, 0},
    -		{"LimitedReader", Type, 0},
    -		{"LimitedReader.N", Field, 0},
    -		{"LimitedReader.R", Field, 0},
    -		{"MultiReader", Func, 0},
    -		{"MultiWriter", Func, 0},
    -		{"NewOffsetWriter", Func, 20},
    -		{"NewSectionReader", Func, 0},
    -		{"NopCloser", Func, 16},
    -		{"OffsetWriter", Type, 20},
    -		{"Pipe", Func, 0},
    -		{"PipeReader", Type, 0},
    -		{"PipeWriter", Type, 0},
    -		{"ReadAll", Func, 16},
    -		{"ReadAtLeast", Func, 0},
    -		{"ReadCloser", Type, 0},
    -		{"ReadFull", Func, 0},
    -		{"ReadSeekCloser", Type, 16},
    -		{"ReadSeeker", Type, 0},
    -		{"ReadWriteCloser", Type, 0},
    -		{"ReadWriteSeeker", Type, 0},
    -		{"ReadWriter", Type, 0},
    -		{"Reader", Type, 0},
    -		{"ReaderAt", Type, 0},
    -		{"ReaderFrom", Type, 0},
    -		{"RuneReader", Type, 0},
    -		{"RuneScanner", Type, 0},
    -		{"SectionReader", Type, 0},
    -		{"SeekCurrent", Const, 7},
    -		{"SeekEnd", Const, 7},
    -		{"SeekStart", Const, 7},
    -		{"Seeker", Type, 0},
    -		{"StringWriter", Type, 12},
    -		{"TeeReader", Func, 0},
    -		{"WriteCloser", Type, 0},
    -		{"WriteSeeker", Type, 0},
    -		{"WriteString", Func, 0},
    -		{"Writer", Type, 0},
    -		{"WriterAt", Type, 0},
    -		{"WriterTo", Type, 0},
    +		{"(*LimitedReader).Read", Method, 0, ""},
    +		{"(*OffsetWriter).Seek", Method, 20, ""},
    +		{"(*OffsetWriter).Write", Method, 20, ""},
    +		{"(*OffsetWriter).WriteAt", Method, 20, ""},
    +		{"(*PipeReader).Close", Method, 0, ""},
    +		{"(*PipeReader).CloseWithError", Method, 0, ""},
    +		{"(*PipeReader).Read", Method, 0, ""},
    +		{"(*PipeWriter).Close", Method, 0, ""},
    +		{"(*PipeWriter).CloseWithError", Method, 0, ""},
    +		{"(*PipeWriter).Write", Method, 0, ""},
    +		{"(*SectionReader).Outer", Method, 22, ""},
    +		{"(*SectionReader).Read", Method, 0, ""},
    +		{"(*SectionReader).ReadAt", Method, 0, ""},
    +		{"(*SectionReader).Seek", Method, 0, ""},
    +		{"(*SectionReader).Size", Method, 0, ""},
    +		{"ByteReader", Type, 0, ""},
    +		{"ByteScanner", Type, 0, ""},
    +		{"ByteWriter", Type, 1, ""},
    +		{"Closer", Type, 0, ""},
    +		{"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"},
    +		{"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"},
    +		{"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"},
    +		{"Discard", Var, 16, ""},
    +		{"EOF", Var, 0, ""},
    +		{"ErrClosedPipe", Var, 0, ""},
    +		{"ErrNoProgress", Var, 1, ""},
    +		{"ErrShortBuffer", Var, 0, ""},
    +		{"ErrShortWrite", Var, 0, ""},
    +		{"ErrUnexpectedEOF", Var, 0, ""},
    +		{"LimitReader", Func, 0, "func(r Reader, n int64) Reader"},
    +		{"LimitedReader", Type, 0, ""},
    +		{"LimitedReader.N", Field, 0, ""},
    +		{"LimitedReader.R", Field, 0, ""},
    +		{"MultiReader", Func, 0, "func(readers ...Reader) Reader"},
    +		{"MultiWriter", Func, 0, "func(writers ...Writer) Writer"},
    +		{"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"},
    +		{"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"},
    +		{"NopCloser", Func, 16, "func(r Reader) ReadCloser"},
    +		{"OffsetWriter", Type, 20, ""},
    +		{"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"},
    +		{"PipeReader", Type, 0, ""},
    +		{"PipeWriter", Type, 0, ""},
    +		{"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"},
    +		{"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"},
    +		{"ReadCloser", Type, 0, ""},
    +		{"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"},
    +		{"ReadSeekCloser", Type, 16, ""},
    +		{"ReadSeeker", Type, 0, ""},
    +		{"ReadWriteCloser", Type, 0, ""},
    +		{"ReadWriteSeeker", Type, 0, ""},
    +		{"ReadWriter", Type, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"ReaderAt", Type, 0, ""},
    +		{"ReaderFrom", Type, 0, ""},
    +		{"RuneReader", Type, 0, ""},
    +		{"RuneScanner", Type, 0, ""},
    +		{"SectionReader", Type, 0, ""},
    +		{"SeekCurrent", Const, 7, ""},
    +		{"SeekEnd", Const, 7, ""},
    +		{"SeekStart", Const, 7, ""},
    +		{"Seeker", Type, 0, ""},
    +		{"StringWriter", Type, 12, ""},
    +		{"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"},
    +		{"WriteCloser", Type, 0, ""},
    +		{"WriteSeeker", Type, 0, ""},
    +		{"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"},
    +		{"Writer", Type, 0, ""},
    +		{"WriterAt", Type, 0, ""},
    +		{"WriterTo", Type, 0, ""},
     	},
     	"io/fs": {
    -		{"(*PathError).Error", Method, 16},
    -		{"(*PathError).Timeout", Method, 16},
    -		{"(*PathError).Unwrap", Method, 16},
    -		{"(FileMode).IsDir", Method, 16},
    -		{"(FileMode).IsRegular", Method, 16},
    -		{"(FileMode).Perm", Method, 16},
    -		{"(FileMode).String", Method, 16},
    -		{"(FileMode).Type", Method, 16},
    -		{"DirEntry", Type, 16},
    -		{"ErrClosed", Var, 16},
    -		{"ErrExist", Var, 16},
    -		{"ErrInvalid", Var, 16},
    -		{"ErrNotExist", Var, 16},
    -		{"ErrPermission", Var, 16},
    -		{"FS", Type, 16},
    -		{"File", Type, 16},
    -		{"FileInfo", Type, 16},
    -		{"FileInfoToDirEntry", Func, 17},
    -		{"FileMode", Type, 16},
    -		{"FormatDirEntry", Func, 21},
    -		{"FormatFileInfo", Func, 21},
    -		{"Glob", Func, 16},
    -		{"GlobFS", Type, 16},
    -		{"Lstat", Func, 25},
    -		{"ModeAppend", Const, 16},
    -		{"ModeCharDevice", Const, 16},
    -		{"ModeDevice", Const, 16},
    -		{"ModeDir", Const, 16},
    -		{"ModeExclusive", Const, 16},
    -		{"ModeIrregular", Const, 16},
    -		{"ModeNamedPipe", Const, 16},
    -		{"ModePerm", Const, 16},
    -		{"ModeSetgid", Const, 16},
    -		{"ModeSetuid", Const, 16},
    -		{"ModeSocket", Const, 16},
    -		{"ModeSticky", Const, 16},
    -		{"ModeSymlink", Const, 16},
    -		{"ModeTemporary", Const, 16},
    -		{"ModeType", Const, 16},
    -		{"PathError", Type, 16},
    -		{"PathError.Err", Field, 16},
    -		{"PathError.Op", Field, 16},
    -		{"PathError.Path", Field, 16},
    -		{"ReadDir", Func, 16},
    -		{"ReadDirFS", Type, 16},
    -		{"ReadDirFile", Type, 16},
    -		{"ReadFile", Func, 16},
    -		{"ReadFileFS", Type, 16},
    -		{"ReadLink", Func, 25},
    -		{"ReadLinkFS", Type, 25},
    -		{"SkipAll", Var, 20},
    -		{"SkipDir", Var, 16},
    -		{"Stat", Func, 16},
    -		{"StatFS", Type, 16},
    -		{"Sub", Func, 16},
    -		{"SubFS", Type, 16},
    -		{"ValidPath", Func, 16},
    -		{"WalkDir", Func, 16},
    -		{"WalkDirFunc", Type, 16},
    +		{"(*PathError).Error", Method, 16, ""},
    +		{"(*PathError).Timeout", Method, 16, ""},
    +		{"(*PathError).Unwrap", Method, 16, ""},
    +		{"(FileMode).IsDir", Method, 16, ""},
    +		{"(FileMode).IsRegular", Method, 16, ""},
    +		{"(FileMode).Perm", Method, 16, ""},
    +		{"(FileMode).String", Method, 16, ""},
    +		{"(FileMode).Type", Method, 16, ""},
    +		{"DirEntry", Type, 16, ""},
    +		{"ErrClosed", Var, 16, ""},
    +		{"ErrExist", Var, 16, ""},
    +		{"ErrInvalid", Var, 16, ""},
    +		{"ErrNotExist", Var, 16, ""},
    +		{"ErrPermission", Var, 16, ""},
    +		{"FS", Type, 16, ""},
    +		{"File", Type, 16, ""},
    +		{"FileInfo", Type, 16, ""},
    +		{"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"},
    +		{"FileMode", Type, 16, ""},
    +		{"FormatDirEntry", Func, 21, "func(dir DirEntry) string"},
    +		{"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
    +		{"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
    +		{"GlobFS", Type, 16, ""},
    +		{"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
    +		{"ModeAppend", Const, 16, ""},
    +		{"ModeCharDevice", Const, 16, ""},
    +		{"ModeDevice", Const, 16, ""},
    +		{"ModeDir", Const, 16, ""},
    +		{"ModeExclusive", Const, 16, ""},
    +		{"ModeIrregular", Const, 16, ""},
    +		{"ModeNamedPipe", Const, 16, ""},
    +		{"ModePerm", Const, 16, ""},
    +		{"ModeSetgid", Const, 16, ""},
    +		{"ModeSetuid", Const, 16, ""},
    +		{"ModeSocket", Const, 16, ""},
    +		{"ModeSticky", Const, 16, ""},
    +		{"ModeSymlink", Const, 16, ""},
    +		{"ModeTemporary", Const, 16, ""},
    +		{"ModeType", Const, 16, ""},
    +		{"PathError", Type, 16, ""},
    +		{"PathError.Err", Field, 16, ""},
    +		{"PathError.Op", Field, 16, ""},
    +		{"PathError.Path", Field, 16, ""},
    +		{"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"},
    +		{"ReadDirFS", Type, 16, ""},
    +		{"ReadDirFile", Type, 16, ""},
    +		{"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
    +		{"ReadFileFS", Type, 16, ""},
    +		{"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
    +		{"ReadLinkFS", Type, 25, ""},
    +		{"SkipAll", Var, 20, ""},
    +		{"SkipDir", Var, 16, ""},
    +		{"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"},
    +		{"StatFS", Type, 16, ""},
    +		{"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"},
    +		{"SubFS", Type, 16, ""},
    +		{"ValidPath", Func, 16, "func(name string) bool"},
    +		{"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"},
    +		{"WalkDirFunc", Type, 16, ""},
     	},
     	"io/ioutil": {
    -		{"Discard", Var, 0},
    -		{"NopCloser", Func, 0},
    -		{"ReadAll", Func, 0},
    -		{"ReadDir", Func, 0},
    -		{"ReadFile", Func, 0},
    -		{"TempDir", Func, 0},
    -		{"TempFile", Func, 0},
    -		{"WriteFile", Func, 0},
    +		{"Discard", Var, 0, ""},
    +		{"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"},
    +		{"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"},
    +		{"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"},
    +		{"ReadFile", Func, 0, "func(filename string) ([]byte, error)"},
    +		{"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"},
    +		{"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"},
    +		{"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"},
     	},
     	"iter": {
    -		{"Pull", Func, 23},
    -		{"Pull2", Func, 23},
    -		{"Seq", Type, 23},
    -		{"Seq2", Type, 23},
    +		{"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"},
    +		{"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"},
    +		{"Seq", Type, 23, ""},
    +		{"Seq2", Type, 23, ""},
     	},
     	"log": {
    -		{"(*Logger).Fatal", Method, 0},
    -		{"(*Logger).Fatalf", Method, 0},
    -		{"(*Logger).Fatalln", Method, 0},
    -		{"(*Logger).Flags", Method, 0},
    -		{"(*Logger).Output", Method, 0},
    -		{"(*Logger).Panic", Method, 0},
    -		{"(*Logger).Panicf", Method, 0},
    -		{"(*Logger).Panicln", Method, 0},
    -		{"(*Logger).Prefix", Method, 0},
    -		{"(*Logger).Print", Method, 0},
    -		{"(*Logger).Printf", Method, 0},
    -		{"(*Logger).Println", Method, 0},
    -		{"(*Logger).SetFlags", Method, 0},
    -		{"(*Logger).SetOutput", Method, 5},
    -		{"(*Logger).SetPrefix", Method, 0},
    -		{"(*Logger).Writer", Method, 12},
    -		{"Default", Func, 16},
    -		{"Fatal", Func, 0},
    -		{"Fatalf", Func, 0},
    -		{"Fatalln", Func, 0},
    -		{"Flags", Func, 0},
    -		{"LUTC", Const, 5},
    -		{"Ldate", Const, 0},
    -		{"Llongfile", Const, 0},
    -		{"Lmicroseconds", Const, 0},
    -		{"Lmsgprefix", Const, 14},
    -		{"Logger", Type, 0},
    -		{"Lshortfile", Const, 0},
    -		{"LstdFlags", Const, 0},
    -		{"Ltime", Const, 0},
    -		{"New", Func, 0},
    -		{"Output", Func, 5},
    -		{"Panic", Func, 0},
    -		{"Panicf", Func, 0},
    -		{"Panicln", Func, 0},
    -		{"Prefix", Func, 0},
    -		{"Print", Func, 0},
    -		{"Printf", Func, 0},
    -		{"Println", Func, 0},
    -		{"SetFlags", Func, 0},
    -		{"SetOutput", Func, 0},
    -		{"SetPrefix", Func, 0},
    -		{"Writer", Func, 13},
    +		{"(*Logger).Fatal", Method, 0, ""},
    +		{"(*Logger).Fatalf", Method, 0, ""},
    +		{"(*Logger).Fatalln", Method, 0, ""},
    +		{"(*Logger).Flags", Method, 0, ""},
    +		{"(*Logger).Output", Method, 0, ""},
    +		{"(*Logger).Panic", Method, 0, ""},
    +		{"(*Logger).Panicf", Method, 0, ""},
    +		{"(*Logger).Panicln", Method, 0, ""},
    +		{"(*Logger).Prefix", Method, 0, ""},
    +		{"(*Logger).Print", Method, 0, ""},
    +		{"(*Logger).Printf", Method, 0, ""},
    +		{"(*Logger).Println", Method, 0, ""},
    +		{"(*Logger).SetFlags", Method, 0, ""},
    +		{"(*Logger).SetOutput", Method, 5, ""},
    +		{"(*Logger).SetPrefix", Method, 0, ""},
    +		{"(*Logger).Writer", Method, 12, ""},
    +		{"Default", Func, 16, "func() *Logger"},
    +		{"Fatal", Func, 0, "func(v ...any)"},
    +		{"Fatalf", Func, 0, "func(format string, v ...any)"},
    +		{"Fatalln", Func, 0, "func(v ...any)"},
    +		{"Flags", Func, 0, "func() int"},
    +		{"LUTC", Const, 5, ""},
    +		{"Ldate", Const, 0, ""},
    +		{"Llongfile", Const, 0, ""},
    +		{"Lmicroseconds", Const, 0, ""},
    +		{"Lmsgprefix", Const, 14, ""},
    +		{"Logger", Type, 0, ""},
    +		{"Lshortfile", Const, 0, ""},
    +		{"LstdFlags", Const, 0, ""},
    +		{"Ltime", Const, 0, ""},
    +		{"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"},
    +		{"Output", Func, 5, "func(calldepth int, s string) error"},
    +		{"Panic", Func, 0, "func(v ...any)"},
    +		{"Panicf", Func, 0, "func(format string, v ...any)"},
    +		{"Panicln", Func, 0, "func(v ...any)"},
    +		{"Prefix", Func, 0, "func() string"},
    +		{"Print", Func, 0, "func(v ...any)"},
    +		{"Printf", Func, 0, "func(format string, v ...any)"},
    +		{"Println", Func, 0, "func(v ...any)"},
    +		{"SetFlags", Func, 0, "func(flag int)"},
    +		{"SetOutput", Func, 0, "func(w io.Writer)"},
    +		{"SetPrefix", Func, 0, "func(prefix string)"},
    +		{"Writer", Func, 13, "func() io.Writer"},
     	},
     	"log/slog": {
    -		{"(*JSONHandler).Enabled", Method, 21},
    -		{"(*JSONHandler).Handle", Method, 21},
    -		{"(*JSONHandler).WithAttrs", Method, 21},
    -		{"(*JSONHandler).WithGroup", Method, 21},
    -		{"(*Level).UnmarshalJSON", Method, 21},
    -		{"(*Level).UnmarshalText", Method, 21},
    -		{"(*LevelVar).AppendText", Method, 24},
    -		{"(*LevelVar).Level", Method, 21},
    -		{"(*LevelVar).MarshalText", Method, 21},
    -		{"(*LevelVar).Set", Method, 21},
    -		{"(*LevelVar).String", Method, 21},
    -		{"(*LevelVar).UnmarshalText", Method, 21},
    -		{"(*Logger).Debug", Method, 21},
    -		{"(*Logger).DebugContext", Method, 21},
    -		{"(*Logger).Enabled", Method, 21},
    -		{"(*Logger).Error", Method, 21},
    -		{"(*Logger).ErrorContext", Method, 21},
    -		{"(*Logger).Handler", Method, 21},
    -		{"(*Logger).Info", Method, 21},
    -		{"(*Logger).InfoContext", Method, 21},
    -		{"(*Logger).Log", Method, 21},
    -		{"(*Logger).LogAttrs", Method, 21},
    -		{"(*Logger).Warn", Method, 21},
    -		{"(*Logger).WarnContext", Method, 21},
    -		{"(*Logger).With", Method, 21},
    -		{"(*Logger).WithGroup", Method, 21},
    -		{"(*Record).Add", Method, 21},
    -		{"(*Record).AddAttrs", Method, 21},
    -		{"(*TextHandler).Enabled", Method, 21},
    -		{"(*TextHandler).Handle", Method, 21},
    -		{"(*TextHandler).WithAttrs", Method, 21},
    -		{"(*TextHandler).WithGroup", Method, 21},
    -		{"(Attr).Equal", Method, 21},
    -		{"(Attr).String", Method, 21},
    -		{"(Kind).String", Method, 21},
    -		{"(Level).AppendText", Method, 24},
    -		{"(Level).Level", Method, 21},
    -		{"(Level).MarshalJSON", Method, 21},
    -		{"(Level).MarshalText", Method, 21},
    -		{"(Level).String", Method, 21},
    -		{"(Record).Attrs", Method, 21},
    -		{"(Record).Clone", Method, 21},
    -		{"(Record).NumAttrs", Method, 21},
    -		{"(Value).Any", Method, 21},
    -		{"(Value).Bool", Method, 21},
    -		{"(Value).Duration", Method, 21},
    -		{"(Value).Equal", Method, 21},
    -		{"(Value).Float64", Method, 21},
    -		{"(Value).Group", Method, 21},
    -		{"(Value).Int64", Method, 21},
    -		{"(Value).Kind", Method, 21},
    -		{"(Value).LogValuer", Method, 21},
    -		{"(Value).Resolve", Method, 21},
    -		{"(Value).String", Method, 21},
    -		{"(Value).Time", Method, 21},
    -		{"(Value).Uint64", Method, 21},
    -		{"Any", Func, 21},
    -		{"AnyValue", Func, 21},
    -		{"Attr", Type, 21},
    -		{"Attr.Key", Field, 21},
    -		{"Attr.Value", Field, 21},
    -		{"Bool", Func, 21},
    -		{"BoolValue", Func, 21},
    -		{"Debug", Func, 21},
    -		{"DebugContext", Func, 21},
    -		{"Default", Func, 21},
    -		{"DiscardHandler", Var, 24},
    -		{"Duration", Func, 21},
    -		{"DurationValue", Func, 21},
    -		{"Error", Func, 21},
    -		{"ErrorContext", Func, 21},
    -		{"Float64", Func, 21},
    -		{"Float64Value", Func, 21},
    -		{"Group", Func, 21},
    -		{"GroupValue", Func, 21},
    -		{"Handler", Type, 21},
    -		{"HandlerOptions", Type, 21},
    -		{"HandlerOptions.AddSource", Field, 21},
    -		{"HandlerOptions.Level", Field, 21},
    -		{"HandlerOptions.ReplaceAttr", Field, 21},
    -		{"Info", Func, 21},
    -		{"InfoContext", Func, 21},
    -		{"Int", Func, 21},
    -		{"Int64", Func, 21},
    -		{"Int64Value", Func, 21},
    -		{"IntValue", Func, 21},
    -		{"JSONHandler", Type, 21},
    -		{"Kind", Type, 21},
    -		{"KindAny", Const, 21},
    -		{"KindBool", Const, 21},
    -		{"KindDuration", Const, 21},
    -		{"KindFloat64", Const, 21},
    -		{"KindGroup", Const, 21},
    -		{"KindInt64", Const, 21},
    -		{"KindLogValuer", Const, 21},
    -		{"KindString", Const, 21},
    -		{"KindTime", Const, 21},
    -		{"KindUint64", Const, 21},
    -		{"Level", Type, 21},
    -		{"LevelDebug", Const, 21},
    -		{"LevelError", Const, 21},
    -		{"LevelInfo", Const, 21},
    -		{"LevelKey", Const, 21},
    -		{"LevelVar", Type, 21},
    -		{"LevelWarn", Const, 21},
    -		{"Leveler", Type, 21},
    -		{"Log", Func, 21},
    -		{"LogAttrs", Func, 21},
    -		{"LogValuer", Type, 21},
    -		{"Logger", Type, 21},
    -		{"MessageKey", Const, 21},
    -		{"New", Func, 21},
    -		{"NewJSONHandler", Func, 21},
    -		{"NewLogLogger", Func, 21},
    -		{"NewRecord", Func, 21},
    -		{"NewTextHandler", Func, 21},
    -		{"Record", Type, 21},
    -		{"Record.Level", Field, 21},
    -		{"Record.Message", Field, 21},
    -		{"Record.PC", Field, 21},
    -		{"Record.Time", Field, 21},
    -		{"SetDefault", Func, 21},
    -		{"SetLogLoggerLevel", Func, 22},
    -		{"Source", Type, 21},
    -		{"Source.File", Field, 21},
    -		{"Source.Function", Field, 21},
    -		{"Source.Line", Field, 21},
    -		{"SourceKey", Const, 21},
    -		{"String", Func, 21},
    -		{"StringValue", Func, 21},
    -		{"TextHandler", Type, 21},
    -		{"Time", Func, 21},
    -		{"TimeKey", Const, 21},
    -		{"TimeValue", Func, 21},
    -		{"Uint64", Func, 21},
    -		{"Uint64Value", Func, 21},
    -		{"Value", Type, 21},
    -		{"Warn", Func, 21},
    -		{"WarnContext", Func, 21},
    -		{"With", Func, 21},
    +		{"(*JSONHandler).Enabled", Method, 21, ""},
    +		{"(*JSONHandler).Handle", Method, 21, ""},
    +		{"(*JSONHandler).WithAttrs", Method, 21, ""},
    +		{"(*JSONHandler).WithGroup", Method, 21, ""},
    +		{"(*Level).UnmarshalJSON", Method, 21, ""},
    +		{"(*Level).UnmarshalText", Method, 21, ""},
    +		{"(*LevelVar).AppendText", Method, 24, ""},
    +		{"(*LevelVar).Level", Method, 21, ""},
    +		{"(*LevelVar).MarshalText", Method, 21, ""},
    +		{"(*LevelVar).Set", Method, 21, ""},
    +		{"(*LevelVar).String", Method, 21, ""},
    +		{"(*LevelVar).UnmarshalText", Method, 21, ""},
    +		{"(*Logger).Debug", Method, 21, ""},
    +		{"(*Logger).DebugContext", Method, 21, ""},
    +		{"(*Logger).Enabled", Method, 21, ""},
    +		{"(*Logger).Error", Method, 21, ""},
    +		{"(*Logger).ErrorContext", Method, 21, ""},
    +		{"(*Logger).Handler", Method, 21, ""},
    +		{"(*Logger).Info", Method, 21, ""},
    +		{"(*Logger).InfoContext", Method, 21, ""},
    +		{"(*Logger).Log", Method, 21, ""},
    +		{"(*Logger).LogAttrs", Method, 21, ""},
    +		{"(*Logger).Warn", Method, 21, ""},
    +		{"(*Logger).WarnContext", Method, 21, ""},
    +		{"(*Logger).With", Method, 21, ""},
    +		{"(*Logger).WithGroup", Method, 21, ""},
    +		{"(*Record).Add", Method, 21, ""},
    +		{"(*Record).AddAttrs", Method, 21, ""},
    +		{"(*TextHandler).Enabled", Method, 21, ""},
    +		{"(*TextHandler).Handle", Method, 21, ""},
    +		{"(*TextHandler).WithAttrs", Method, 21, ""},
    +		{"(*TextHandler).WithGroup", Method, 21, ""},
    +		{"(Attr).Equal", Method, 21, ""},
    +		{"(Attr).String", Method, 21, ""},
    +		{"(Kind).String", Method, 21, ""},
    +		{"(Level).AppendText", Method, 24, ""},
    +		{"(Level).Level", Method, 21, ""},
    +		{"(Level).MarshalJSON", Method, 21, ""},
    +		{"(Level).MarshalText", Method, 21, ""},
    +		{"(Level).String", Method, 21, ""},
    +		{"(Record).Attrs", Method, 21, ""},
    +		{"(Record).Clone", Method, 21, ""},
    +		{"(Record).NumAttrs", Method, 21, ""},
    +		{"(Record).Source", Method, 25, ""},
    +		{"(Value).Any", Method, 21, ""},
    +		{"(Value).Bool", Method, 21, ""},
    +		{"(Value).Duration", Method, 21, ""},
    +		{"(Value).Equal", Method, 21, ""},
    +		{"(Value).Float64", Method, 21, ""},
    +		{"(Value).Group", Method, 21, ""},
    +		{"(Value).Int64", Method, 21, ""},
    +		{"(Value).Kind", Method, 21, ""},
    +		{"(Value).LogValuer", Method, 21, ""},
    +		{"(Value).Resolve", Method, 21, ""},
    +		{"(Value).String", Method, 21, ""},
    +		{"(Value).Time", Method, 21, ""},
    +		{"(Value).Uint64", Method, 21, ""},
    +		{"Any", Func, 21, "func(key string, value any) Attr"},
    +		{"AnyValue", Func, 21, "func(v any) Value"},
    +		{"Attr", Type, 21, ""},
    +		{"Attr.Key", Field, 21, ""},
    +		{"Attr.Value", Field, 21, ""},
    +		{"Bool", Func, 21, "func(key string, v bool) Attr"},
    +		{"BoolValue", Func, 21, "func(v bool) Value"},
    +		{"Debug", Func, 21, "func(msg string, args ...any)"},
    +		{"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Default", Func, 21, "func() *Logger"},
    +		{"DiscardHandler", Var, 24, ""},
    +		{"Duration", Func, 21, "func(key string, v time.Duration) Attr"},
    +		{"DurationValue", Func, 21, "func(v time.Duration) Value"},
    +		{"Error", Func, 21, "func(msg string, args ...any)"},
    +		{"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Float64", Func, 21, "func(key string, v float64) Attr"},
    +		{"Float64Value", Func, 21, "func(v float64) Value"},
    +		{"Group", Func, 21, "func(key string, args ...any) Attr"},
    +		{"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
    +		{"GroupValue", Func, 21, "func(as ...Attr) Value"},
    +		{"Handler", Type, 21, ""},
    +		{"HandlerOptions", Type, 21, ""},
    +		{"HandlerOptions.AddSource", Field, 21, ""},
    +		{"HandlerOptions.Level", Field, 21, ""},
    +		{"HandlerOptions.ReplaceAttr", Field, 21, ""},
    +		{"Info", Func, 21, "func(msg string, args ...any)"},
    +		{"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"Int", Func, 21, "func(key string, value int) Attr"},
    +		{"Int64", Func, 21, "func(key string, value int64) Attr"},
    +		{"Int64Value", Func, 21, "func(v int64) Value"},
    +		{"IntValue", Func, 21, "func(v int) Value"},
    +		{"JSONHandler", Type, 21, ""},
    +		{"Kind", Type, 21, ""},
    +		{"KindAny", Const, 21, ""},
    +		{"KindBool", Const, 21, ""},
    +		{"KindDuration", Const, 21, ""},
    +		{"KindFloat64", Const, 21, ""},
    +		{"KindGroup", Const, 21, ""},
    +		{"KindInt64", Const, 21, ""},
    +		{"KindLogValuer", Const, 21, ""},
    +		{"KindString", Const, 21, ""},
    +		{"KindTime", Const, 21, ""},
    +		{"KindUint64", Const, 21, ""},
    +		{"Level", Type, 21, ""},
    +		{"LevelDebug", Const, 21, ""},
    +		{"LevelError", Const, 21, ""},
    +		{"LevelInfo", Const, 21, ""},
    +		{"LevelKey", Const, 21, ""},
    +		{"LevelVar", Type, 21, ""},
    +		{"LevelWarn", Const, 21, ""},
    +		{"Leveler", Type, 21, ""},
    +		{"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"},
    +		{"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"},
    +		{"LogValuer", Type, 21, ""},
    +		{"Logger", Type, 21, ""},
    +		{"MessageKey", Const, 21, ""},
    +		{"New", Func, 21, "func(h Handler) *Logger"},
    +		{"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"},
    +		{"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"},
    +		{"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"},
    +		{"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"},
    +		{"Record", Type, 21, ""},
    +		{"Record.Level", Field, 21, ""},
    +		{"Record.Message", Field, 21, ""},
    +		{"Record.PC", Field, 21, ""},
    +		{"Record.Time", Field, 21, ""},
    +		{"SetDefault", Func, 21, "func(l *Logger)"},
    +		{"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"},
    +		{"Source", Type, 21, ""},
    +		{"Source.File", Field, 21, ""},
    +		{"Source.Function", Field, 21, ""},
    +		{"Source.Line", Field, 21, ""},
    +		{"SourceKey", Const, 21, ""},
    +		{"String", Func, 21, "func(key string, value string) Attr"},
    +		{"StringValue", Func, 21, "func(value string) Value"},
    +		{"TextHandler", Type, 21, ""},
    +		{"Time", Func, 21, "func(key string, v time.Time) Attr"},
    +		{"TimeKey", Const, 21, ""},
    +		{"TimeValue", Func, 21, "func(v time.Time) Value"},
    +		{"Uint64", Func, 21, "func(key string, v uint64) Attr"},
    +		{"Uint64Value", Func, 21, "func(v uint64) Value"},
    +		{"Value", Type, 21, ""},
    +		{"Warn", Func, 21, "func(msg string, args ...any)"},
    +		{"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
    +		{"With", Func, 21, "func(args ...any) *Logger"},
     	},
     	"log/syslog": {
    -		{"(*Writer).Alert", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).Crit", Method, 0},
    -		{"(*Writer).Debug", Method, 0},
    -		{"(*Writer).Emerg", Method, 0},
    -		{"(*Writer).Err", Method, 0},
    -		{"(*Writer).Info", Method, 0},
    -		{"(*Writer).Notice", Method, 0},
    -		{"(*Writer).Warning", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"Dial", Func, 0},
    -		{"LOG_ALERT", Const, 0},
    -		{"LOG_AUTH", Const, 1},
    -		{"LOG_AUTHPRIV", Const, 1},
    -		{"LOG_CRIT", Const, 0},
    -		{"LOG_CRON", Const, 1},
    -		{"LOG_DAEMON", Const, 1},
    -		{"LOG_DEBUG", Const, 0},
    -		{"LOG_EMERG", Const, 0},
    -		{"LOG_ERR", Const, 0},
    -		{"LOG_FTP", Const, 1},
    -		{"LOG_INFO", Const, 0},
    -		{"LOG_KERN", Const, 1},
    -		{"LOG_LOCAL0", Const, 1},
    -		{"LOG_LOCAL1", Const, 1},
    -		{"LOG_LOCAL2", Const, 1},
    -		{"LOG_LOCAL3", Const, 1},
    -		{"LOG_LOCAL4", Const, 1},
    -		{"LOG_LOCAL5", Const, 1},
    -		{"LOG_LOCAL6", Const, 1},
    -		{"LOG_LOCAL7", Const, 1},
    -		{"LOG_LPR", Const, 1},
    -		{"LOG_MAIL", Const, 1},
    -		{"LOG_NEWS", Const, 1},
    -		{"LOG_NOTICE", Const, 0},
    -		{"LOG_SYSLOG", Const, 1},
    -		{"LOG_USER", Const, 1},
    -		{"LOG_UUCP", Const, 1},
    -		{"LOG_WARNING", Const, 0},
    -		{"New", Func, 0},
    -		{"NewLogger", Func, 0},
    -		{"Priority", Type, 0},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Alert", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).Crit", Method, 0, ""},
    +		{"(*Writer).Debug", Method, 0, ""},
    +		{"(*Writer).Emerg", Method, 0, ""},
    +		{"(*Writer).Err", Method, 0, ""},
    +		{"(*Writer).Info", Method, 0, ""},
    +		{"(*Writer).Notice", Method, 0, ""},
    +		{"(*Writer).Warning", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"},
    +		{"LOG_ALERT", Const, 0, ""},
    +		{"LOG_AUTH", Const, 1, ""},
    +		{"LOG_AUTHPRIV", Const, 1, ""},
    +		{"LOG_CRIT", Const, 0, ""},
    +		{"LOG_CRON", Const, 1, ""},
    +		{"LOG_DAEMON", Const, 1, ""},
    +		{"LOG_DEBUG", Const, 0, ""},
    +		{"LOG_EMERG", Const, 0, ""},
    +		{"LOG_ERR", Const, 0, ""},
    +		{"LOG_FTP", Const, 1, ""},
    +		{"LOG_INFO", Const, 0, ""},
    +		{"LOG_KERN", Const, 1, ""},
    +		{"LOG_LOCAL0", Const, 1, ""},
    +		{"LOG_LOCAL1", Const, 1, ""},
    +		{"LOG_LOCAL2", Const, 1, ""},
    +		{"LOG_LOCAL3", Const, 1, ""},
    +		{"LOG_LOCAL4", Const, 1, ""},
    +		{"LOG_LOCAL5", Const, 1, ""},
    +		{"LOG_LOCAL6", Const, 1, ""},
    +		{"LOG_LOCAL7", Const, 1, ""},
    +		{"LOG_LPR", Const, 1, ""},
    +		{"LOG_MAIL", Const, 1, ""},
    +		{"LOG_NEWS", Const, 1, ""},
    +		{"LOG_NOTICE", Const, 0, ""},
    +		{"LOG_SYSLOG", Const, 1, ""},
    +		{"LOG_USER", Const, 1, ""},
    +		{"LOG_UUCP", Const, 1, ""},
    +		{"LOG_WARNING", Const, 0, ""},
    +		{"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"},
    +		{"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"},
    +		{"Priority", Type, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"maps": {
    -		{"All", Func, 23},
    -		{"Clone", Func, 21},
    -		{"Collect", Func, 23},
    -		{"Copy", Func, 21},
    -		{"DeleteFunc", Func, 21},
    -		{"Equal", Func, 21},
    -		{"EqualFunc", Func, 21},
    -		{"Insert", Func, 23},
    -		{"Keys", Func, 23},
    -		{"Values", Func, 23},
    +		{"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"},
    +		{"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"},
    +		{"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"},
    +		{"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"},
    +		{"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"},
    +		{"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"},
    +		{"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"},
    +		{"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"},
    +		{"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"},
    +		{"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"},
     	},
     	"math": {
    -		{"Abs", Func, 0},
    -		{"Acos", Func, 0},
    -		{"Acosh", Func, 0},
    -		{"Asin", Func, 0},
    -		{"Asinh", Func, 0},
    -		{"Atan", Func, 0},
    -		{"Atan2", Func, 0},
    -		{"Atanh", Func, 0},
    -		{"Cbrt", Func, 0},
    -		{"Ceil", Func, 0},
    -		{"Copysign", Func, 0},
    -		{"Cos", Func, 0},
    -		{"Cosh", Func, 0},
    -		{"Dim", Func, 0},
    -		{"E", Const, 0},
    -		{"Erf", Func, 0},
    -		{"Erfc", Func, 0},
    -		{"Erfcinv", Func, 10},
    -		{"Erfinv", Func, 10},
    -		{"Exp", Func, 0},
    -		{"Exp2", Func, 0},
    -		{"Expm1", Func, 0},
    -		{"FMA", Func, 14},
    -		{"Float32bits", Func, 0},
    -		{"Float32frombits", Func, 0},
    -		{"Float64bits", Func, 0},
    -		{"Float64frombits", Func, 0},
    -		{"Floor", Func, 0},
    -		{"Frexp", Func, 0},
    -		{"Gamma", Func, 0},
    -		{"Hypot", Func, 0},
    -		{"Ilogb", Func, 0},
    -		{"Inf", Func, 0},
    -		{"IsInf", Func, 0},
    -		{"IsNaN", Func, 0},
    -		{"J0", Func, 0},
    -		{"J1", Func, 0},
    -		{"Jn", Func, 0},
    -		{"Ldexp", Func, 0},
    -		{"Lgamma", Func, 0},
    -		{"Ln10", Const, 0},
    -		{"Ln2", Const, 0},
    -		{"Log", Func, 0},
    -		{"Log10", Func, 0},
    -		{"Log10E", Const, 0},
    -		{"Log1p", Func, 0},
    -		{"Log2", Func, 0},
    -		{"Log2E", Const, 0},
    -		{"Logb", Func, 0},
    -		{"Max", Func, 0},
    -		{"MaxFloat32", Const, 0},
    -		{"MaxFloat64", Const, 0},
    -		{"MaxInt", Const, 17},
    -		{"MaxInt16", Const, 0},
    -		{"MaxInt32", Const, 0},
    -		{"MaxInt64", Const, 0},
    -		{"MaxInt8", Const, 0},
    -		{"MaxUint", Const, 17},
    -		{"MaxUint16", Const, 0},
    -		{"MaxUint32", Const, 0},
    -		{"MaxUint64", Const, 0},
    -		{"MaxUint8", Const, 0},
    -		{"Min", Func, 0},
    -		{"MinInt", Const, 17},
    -		{"MinInt16", Const, 0},
    -		{"MinInt32", Const, 0},
    -		{"MinInt64", Const, 0},
    -		{"MinInt8", Const, 0},
    -		{"Mod", Func, 0},
    -		{"Modf", Func, 0},
    -		{"NaN", Func, 0},
    -		{"Nextafter", Func, 0},
    -		{"Nextafter32", Func, 4},
    -		{"Phi", Const, 0},
    -		{"Pi", Const, 0},
    -		{"Pow", Func, 0},
    -		{"Pow10", Func, 0},
    -		{"Remainder", Func, 0},
    -		{"Round", Func, 10},
    -		{"RoundToEven", Func, 10},
    -		{"Signbit", Func, 0},
    -		{"Sin", Func, 0},
    -		{"Sincos", Func, 0},
    -		{"Sinh", Func, 0},
    -		{"SmallestNonzeroFloat32", Const, 0},
    -		{"SmallestNonzeroFloat64", Const, 0},
    -		{"Sqrt", Func, 0},
    -		{"Sqrt2", Const, 0},
    -		{"SqrtE", Const, 0},
    -		{"SqrtPhi", Const, 0},
    -		{"SqrtPi", Const, 0},
    -		{"Tan", Func, 0},
    -		{"Tanh", Func, 0},
    -		{"Trunc", Func, 0},
    -		{"Y0", Func, 0},
    -		{"Y1", Func, 0},
    -		{"Yn", Func, 0},
    +		{"Abs", Func, 0, "func(x float64) float64"},
    +		{"Acos", Func, 0, "func(x float64) float64"},
    +		{"Acosh", Func, 0, "func(x float64) float64"},
    +		{"Asin", Func, 0, "func(x float64) float64"},
    +		{"Asinh", Func, 0, "func(x float64) float64"},
    +		{"Atan", Func, 0, "func(x float64) float64"},
    +		{"Atan2", Func, 0, "func(y float64, x float64) float64"},
    +		{"Atanh", Func, 0, "func(x float64) float64"},
    +		{"Cbrt", Func, 0, "func(x float64) float64"},
    +		{"Ceil", Func, 0, "func(x float64) float64"},
    +		{"Copysign", Func, 0, "func(f float64, sign float64) float64"},
    +		{"Cos", Func, 0, "func(x float64) float64"},
    +		{"Cosh", Func, 0, "func(x float64) float64"},
    +		{"Dim", Func, 0, "func(x float64, y float64) float64"},
    +		{"E", Const, 0, ""},
    +		{"Erf", Func, 0, "func(x float64) float64"},
    +		{"Erfc", Func, 0, "func(x float64) float64"},
    +		{"Erfcinv", Func, 10, "func(x float64) float64"},
    +		{"Erfinv", Func, 10, "func(x float64) float64"},
    +		{"Exp", Func, 0, "func(x float64) float64"},
    +		{"Exp2", Func, 0, "func(x float64) float64"},
    +		{"Expm1", Func, 0, "func(x float64) float64"},
    +		{"FMA", Func, 14, "func(x float64, y float64, z float64) float64"},
    +		{"Float32bits", Func, 0, "func(f float32) uint32"},
    +		{"Float32frombits", Func, 0, "func(b uint32) float32"},
    +		{"Float64bits", Func, 0, "func(f float64) uint64"},
    +		{"Float64frombits", Func, 0, "func(b uint64) float64"},
    +		{"Floor", Func, 0, "func(x float64) float64"},
    +		{"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"},
    +		{"Gamma", Func, 0, "func(x float64) float64"},
    +		{"Hypot", Func, 0, "func(p float64, q float64) float64"},
    +		{"Ilogb", Func, 0, "func(x float64) int"},
    +		{"Inf", Func, 0, "func(sign int) float64"},
    +		{"IsInf", Func, 0, "func(f float64, sign int) bool"},
    +		{"IsNaN", Func, 0, "func(f float64) (is bool)"},
    +		{"J0", Func, 0, "func(x float64) float64"},
    +		{"J1", Func, 0, "func(x float64) float64"},
    +		{"Jn", Func, 0, "func(n int, x float64) float64"},
    +		{"Ldexp", Func, 0, "func(frac float64, exp int) float64"},
    +		{"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"},
    +		{"Ln10", Const, 0, ""},
    +		{"Ln2", Const, 0, ""},
    +		{"Log", Func, 0, "func(x float64) float64"},
    +		{"Log10", Func, 0, "func(x float64) float64"},
    +		{"Log10E", Const, 0, ""},
    +		{"Log1p", Func, 0, "func(x float64) float64"},
    +		{"Log2", Func, 0, "func(x float64) float64"},
    +		{"Log2E", Const, 0, ""},
    +		{"Logb", Func, 0, "func(x float64) float64"},
    +		{"Max", Func, 0, "func(x float64, y float64) float64"},
    +		{"MaxFloat32", Const, 0, ""},
    +		{"MaxFloat64", Const, 0, ""},
    +		{"MaxInt", Const, 17, ""},
    +		{"MaxInt16", Const, 0, ""},
    +		{"MaxInt32", Const, 0, ""},
    +		{"MaxInt64", Const, 0, ""},
    +		{"MaxInt8", Const, 0, ""},
    +		{"MaxUint", Const, 17, ""},
    +		{"MaxUint16", Const, 0, ""},
    +		{"MaxUint32", Const, 0, ""},
    +		{"MaxUint64", Const, 0, ""},
    +		{"MaxUint8", Const, 0, ""},
    +		{"Min", Func, 0, "func(x float64, y float64) float64"},
    +		{"MinInt", Const, 17, ""},
    +		{"MinInt16", Const, 0, ""},
    +		{"MinInt32", Const, 0, ""},
    +		{"MinInt64", Const, 0, ""},
    +		{"MinInt8", Const, 0, ""},
    +		{"Mod", Func, 0, "func(x float64, y float64) float64"},
    +		{"Modf", Func, 0, "func(f float64) (int float64, frac float64)"},
    +		{"NaN", Func, 0, "func() float64"},
    +		{"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"},
    +		{"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"},
    +		{"Phi", Const, 0, ""},
    +		{"Pi", Const, 0, ""},
    +		{"Pow", Func, 0, "func(x float64, y float64) float64"},
    +		{"Pow10", Func, 0, "func(n int) float64"},
    +		{"Remainder", Func, 0, "func(x float64, y float64) float64"},
    +		{"Round", Func, 10, "func(x float64) float64"},
    +		{"RoundToEven", Func, 10, "func(x float64) float64"},
    +		{"Signbit", Func, 0, "func(x float64) bool"},
    +		{"Sin", Func, 0, "func(x float64) float64"},
    +		{"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"},
    +		{"Sinh", Func, 0, "func(x float64) float64"},
    +		{"SmallestNonzeroFloat32", Const, 0, ""},
    +		{"SmallestNonzeroFloat64", Const, 0, ""},
    +		{"Sqrt", Func, 0, "func(x float64) float64"},
    +		{"Sqrt2", Const, 0, ""},
    +		{"SqrtE", Const, 0, ""},
    +		{"SqrtPhi", Const, 0, ""},
    +		{"SqrtPi", Const, 0, ""},
    +		{"Tan", Func, 0, "func(x float64) float64"},
    +		{"Tanh", Func, 0, "func(x float64) float64"},
    +		{"Trunc", Func, 0, "func(x float64) float64"},
    +		{"Y0", Func, 0, "func(x float64) float64"},
    +		{"Y1", Func, 0, "func(x float64) float64"},
    +		{"Yn", Func, 0, "func(n int, x float64) float64"},
     	},
     	"math/big": {
    -		{"(*Float).Abs", Method, 5},
    -		{"(*Float).Acc", Method, 5},
    -		{"(*Float).Add", Method, 5},
    -		{"(*Float).Append", Method, 5},
    -		{"(*Float).AppendText", Method, 24},
    -		{"(*Float).Cmp", Method, 5},
    -		{"(*Float).Copy", Method, 5},
    -		{"(*Float).Float32", Method, 5},
    -		{"(*Float).Float64", Method, 5},
    -		{"(*Float).Format", Method, 5},
    -		{"(*Float).GobDecode", Method, 7},
    -		{"(*Float).GobEncode", Method, 7},
    -		{"(*Float).Int", Method, 5},
    -		{"(*Float).Int64", Method, 5},
    -		{"(*Float).IsInf", Method, 5},
    -		{"(*Float).IsInt", Method, 5},
    -		{"(*Float).MantExp", Method, 5},
    -		{"(*Float).MarshalText", Method, 6},
    -		{"(*Float).MinPrec", Method, 5},
    -		{"(*Float).Mode", Method, 5},
    -		{"(*Float).Mul", Method, 5},
    -		{"(*Float).Neg", Method, 5},
    -		{"(*Float).Parse", Method, 5},
    -		{"(*Float).Prec", Method, 5},
    -		{"(*Float).Quo", Method, 5},
    -		{"(*Float).Rat", Method, 5},
    -		{"(*Float).Scan", Method, 8},
    -		{"(*Float).Set", Method, 5},
    -		{"(*Float).SetFloat64", Method, 5},
    -		{"(*Float).SetInf", Method, 5},
    -		{"(*Float).SetInt", Method, 5},
    -		{"(*Float).SetInt64", Method, 5},
    -		{"(*Float).SetMantExp", Method, 5},
    -		{"(*Float).SetMode", Method, 5},
    -		{"(*Float).SetPrec", Method, 5},
    -		{"(*Float).SetRat", Method, 5},
    -		{"(*Float).SetString", Method, 5},
    -		{"(*Float).SetUint64", Method, 5},
    -		{"(*Float).Sign", Method, 5},
    -		{"(*Float).Signbit", Method, 5},
    -		{"(*Float).Sqrt", Method, 10},
    -		{"(*Float).String", Method, 5},
    -		{"(*Float).Sub", Method, 5},
    -		{"(*Float).Text", Method, 5},
    -		{"(*Float).Uint64", Method, 5},
    -		{"(*Float).UnmarshalText", Method, 6},
    -		{"(*Int).Abs", Method, 0},
    -		{"(*Int).Add", Method, 0},
    -		{"(*Int).And", Method, 0},
    -		{"(*Int).AndNot", Method, 0},
    -		{"(*Int).Append", Method, 6},
    -		{"(*Int).AppendText", Method, 24},
    -		{"(*Int).Binomial", Method, 0},
    -		{"(*Int).Bit", Method, 0},
    -		{"(*Int).BitLen", Method, 0},
    -		{"(*Int).Bits", Method, 0},
    -		{"(*Int).Bytes", Method, 0},
    -		{"(*Int).Cmp", Method, 0},
    -		{"(*Int).CmpAbs", Method, 10},
    -		{"(*Int).Div", Method, 0},
    -		{"(*Int).DivMod", Method, 0},
    -		{"(*Int).Exp", Method, 0},
    -		{"(*Int).FillBytes", Method, 15},
    -		{"(*Int).Float64", Method, 21},
    -		{"(*Int).Format", Method, 0},
    -		{"(*Int).GCD", Method, 0},
    -		{"(*Int).GobDecode", Method, 0},
    -		{"(*Int).GobEncode", Method, 0},
    -		{"(*Int).Int64", Method, 0},
    -		{"(*Int).IsInt64", Method, 9},
    -		{"(*Int).IsUint64", Method, 9},
    -		{"(*Int).Lsh", Method, 0},
    -		{"(*Int).MarshalJSON", Method, 1},
    -		{"(*Int).MarshalText", Method, 3},
    -		{"(*Int).Mod", Method, 0},
    -		{"(*Int).ModInverse", Method, 0},
    -		{"(*Int).ModSqrt", Method, 5},
    -		{"(*Int).Mul", Method, 0},
    -		{"(*Int).MulRange", Method, 0},
    -		{"(*Int).Neg", Method, 0},
    -		{"(*Int).Not", Method, 0},
    -		{"(*Int).Or", Method, 0},
    -		{"(*Int).ProbablyPrime", Method, 0},
    -		{"(*Int).Quo", Method, 0},
    -		{"(*Int).QuoRem", Method, 0},
    -		{"(*Int).Rand", Method, 0},
    -		{"(*Int).Rem", Method, 0},
    -		{"(*Int).Rsh", Method, 0},
    -		{"(*Int).Scan", Method, 0},
    -		{"(*Int).Set", Method, 0},
    -		{"(*Int).SetBit", Method, 0},
    -		{"(*Int).SetBits", Method, 0},
    -		{"(*Int).SetBytes", Method, 0},
    -		{"(*Int).SetInt64", Method, 0},
    -		{"(*Int).SetString", Method, 0},
    -		{"(*Int).SetUint64", Method, 1},
    -		{"(*Int).Sign", Method, 0},
    -		{"(*Int).Sqrt", Method, 8},
    -		{"(*Int).String", Method, 0},
    -		{"(*Int).Sub", Method, 0},
    -		{"(*Int).Text", Method, 6},
    -		{"(*Int).TrailingZeroBits", Method, 13},
    -		{"(*Int).Uint64", Method, 1},
    -		{"(*Int).UnmarshalJSON", Method, 1},
    -		{"(*Int).UnmarshalText", Method, 3},
    -		{"(*Int).Xor", Method, 0},
    -		{"(*Rat).Abs", Method, 0},
    -		{"(*Rat).Add", Method, 0},
    -		{"(*Rat).AppendText", Method, 24},
    -		{"(*Rat).Cmp", Method, 0},
    -		{"(*Rat).Denom", Method, 0},
    -		{"(*Rat).Float32", Method, 4},
    -		{"(*Rat).Float64", Method, 1},
    -		{"(*Rat).FloatPrec", Method, 22},
    -		{"(*Rat).FloatString", Method, 0},
    -		{"(*Rat).GobDecode", Method, 0},
    -		{"(*Rat).GobEncode", Method, 0},
    -		{"(*Rat).Inv", Method, 0},
    -		{"(*Rat).IsInt", Method, 0},
    -		{"(*Rat).MarshalText", Method, 3},
    -		{"(*Rat).Mul", Method, 0},
    -		{"(*Rat).Neg", Method, 0},
    -		{"(*Rat).Num", Method, 0},
    -		{"(*Rat).Quo", Method, 0},
    -		{"(*Rat).RatString", Method, 0},
    -		{"(*Rat).Scan", Method, 0},
    -		{"(*Rat).Set", Method, 0},
    -		{"(*Rat).SetFloat64", Method, 1},
    -		{"(*Rat).SetFrac", Method, 0},
    -		{"(*Rat).SetFrac64", Method, 0},
    -		{"(*Rat).SetInt", Method, 0},
    -		{"(*Rat).SetInt64", Method, 0},
    -		{"(*Rat).SetString", Method, 0},
    -		{"(*Rat).SetUint64", Method, 13},
    -		{"(*Rat).Sign", Method, 0},
    -		{"(*Rat).String", Method, 0},
    -		{"(*Rat).Sub", Method, 0},
    -		{"(*Rat).UnmarshalText", Method, 3},
    -		{"(Accuracy).String", Method, 5},
    -		{"(ErrNaN).Error", Method, 5},
    -		{"(RoundingMode).String", Method, 5},
    -		{"Above", Const, 5},
    -		{"Accuracy", Type, 5},
    -		{"AwayFromZero", Const, 5},
    -		{"Below", Const, 5},
    -		{"ErrNaN", Type, 5},
    -		{"Exact", Const, 5},
    -		{"Float", Type, 5},
    -		{"Int", Type, 0},
    -		{"Jacobi", Func, 5},
    -		{"MaxBase", Const, 0},
    -		{"MaxExp", Const, 5},
    -		{"MaxPrec", Const, 5},
    -		{"MinExp", Const, 5},
    -		{"NewFloat", Func, 5},
    -		{"NewInt", Func, 0},
    -		{"NewRat", Func, 0},
    -		{"ParseFloat", Func, 5},
    -		{"Rat", Type, 0},
    -		{"RoundingMode", Type, 5},
    -		{"ToNearestAway", Const, 5},
    -		{"ToNearestEven", Const, 5},
    -		{"ToNegativeInf", Const, 5},
    -		{"ToPositiveInf", Const, 5},
    -		{"ToZero", Const, 5},
    -		{"Word", Type, 0},
    +		{"(*Float).Abs", Method, 5, ""},
    +		{"(*Float).Acc", Method, 5, ""},
    +		{"(*Float).Add", Method, 5, ""},
    +		{"(*Float).Append", Method, 5, ""},
    +		{"(*Float).AppendText", Method, 24, ""},
    +		{"(*Float).Cmp", Method, 5, ""},
    +		{"(*Float).Copy", Method, 5, ""},
    +		{"(*Float).Float32", Method, 5, ""},
    +		{"(*Float).Float64", Method, 5, ""},
    +		{"(*Float).Format", Method, 5, ""},
    +		{"(*Float).GobDecode", Method, 7, ""},
    +		{"(*Float).GobEncode", Method, 7, ""},
    +		{"(*Float).Int", Method, 5, ""},
    +		{"(*Float).Int64", Method, 5, ""},
    +		{"(*Float).IsInf", Method, 5, ""},
    +		{"(*Float).IsInt", Method, 5, ""},
    +		{"(*Float).MantExp", Method, 5, ""},
    +		{"(*Float).MarshalText", Method, 6, ""},
    +		{"(*Float).MinPrec", Method, 5, ""},
    +		{"(*Float).Mode", Method, 5, ""},
    +		{"(*Float).Mul", Method, 5, ""},
    +		{"(*Float).Neg", Method, 5, ""},
    +		{"(*Float).Parse", Method, 5, ""},
    +		{"(*Float).Prec", Method, 5, ""},
    +		{"(*Float).Quo", Method, 5, ""},
    +		{"(*Float).Rat", Method, 5, ""},
    +		{"(*Float).Scan", Method, 8, ""},
    +		{"(*Float).Set", Method, 5, ""},
    +		{"(*Float).SetFloat64", Method, 5, ""},
    +		{"(*Float).SetInf", Method, 5, ""},
    +		{"(*Float).SetInt", Method, 5, ""},
    +		{"(*Float).SetInt64", Method, 5, ""},
    +		{"(*Float).SetMantExp", Method, 5, ""},
    +		{"(*Float).SetMode", Method, 5, ""},
    +		{"(*Float).SetPrec", Method, 5, ""},
    +		{"(*Float).SetRat", Method, 5, ""},
    +		{"(*Float).SetString", Method, 5, ""},
    +		{"(*Float).SetUint64", Method, 5, ""},
    +		{"(*Float).Sign", Method, 5, ""},
    +		{"(*Float).Signbit", Method, 5, ""},
    +		{"(*Float).Sqrt", Method, 10, ""},
    +		{"(*Float).String", Method, 5, ""},
    +		{"(*Float).Sub", Method, 5, ""},
    +		{"(*Float).Text", Method, 5, ""},
    +		{"(*Float).Uint64", Method, 5, ""},
    +		{"(*Float).UnmarshalText", Method, 6, ""},
    +		{"(*Int).Abs", Method, 0, ""},
    +		{"(*Int).Add", Method, 0, ""},
    +		{"(*Int).And", Method, 0, ""},
    +		{"(*Int).AndNot", Method, 0, ""},
    +		{"(*Int).Append", Method, 6, ""},
    +		{"(*Int).AppendText", Method, 24, ""},
    +		{"(*Int).Binomial", Method, 0, ""},
    +		{"(*Int).Bit", Method, 0, ""},
    +		{"(*Int).BitLen", Method, 0, ""},
    +		{"(*Int).Bits", Method, 0, ""},
    +		{"(*Int).Bytes", Method, 0, ""},
    +		{"(*Int).Cmp", Method, 0, ""},
    +		{"(*Int).CmpAbs", Method, 10, ""},
    +		{"(*Int).Div", Method, 0, ""},
    +		{"(*Int).DivMod", Method, 0, ""},
    +		{"(*Int).Exp", Method, 0, ""},
    +		{"(*Int).FillBytes", Method, 15, ""},
    +		{"(*Int).Float64", Method, 21, ""},
    +		{"(*Int).Format", Method, 0, ""},
    +		{"(*Int).GCD", Method, 0, ""},
    +		{"(*Int).GobDecode", Method, 0, ""},
    +		{"(*Int).GobEncode", Method, 0, ""},
    +		{"(*Int).Int64", Method, 0, ""},
    +		{"(*Int).IsInt64", Method, 9, ""},
    +		{"(*Int).IsUint64", Method, 9, ""},
    +		{"(*Int).Lsh", Method, 0, ""},
    +		{"(*Int).MarshalJSON", Method, 1, ""},
    +		{"(*Int).MarshalText", Method, 3, ""},
    +		{"(*Int).Mod", Method, 0, ""},
    +		{"(*Int).ModInverse", Method, 0, ""},
    +		{"(*Int).ModSqrt", Method, 5, ""},
    +		{"(*Int).Mul", Method, 0, ""},
    +		{"(*Int).MulRange", Method, 0, ""},
    +		{"(*Int).Neg", Method, 0, ""},
    +		{"(*Int).Not", Method, 0, ""},
    +		{"(*Int).Or", Method, 0, ""},
    +		{"(*Int).ProbablyPrime", Method, 0, ""},
    +		{"(*Int).Quo", Method, 0, ""},
    +		{"(*Int).QuoRem", Method, 0, ""},
    +		{"(*Int).Rand", Method, 0, ""},
    +		{"(*Int).Rem", Method, 0, ""},
    +		{"(*Int).Rsh", Method, 0, ""},
    +		{"(*Int).Scan", Method, 0, ""},
    +		{"(*Int).Set", Method, 0, ""},
    +		{"(*Int).SetBit", Method, 0, ""},
    +		{"(*Int).SetBits", Method, 0, ""},
    +		{"(*Int).SetBytes", Method, 0, ""},
    +		{"(*Int).SetInt64", Method, 0, ""},
    +		{"(*Int).SetString", Method, 0, ""},
    +		{"(*Int).SetUint64", Method, 1, ""},
    +		{"(*Int).Sign", Method, 0, ""},
    +		{"(*Int).Sqrt", Method, 8, ""},
    +		{"(*Int).String", Method, 0, ""},
    +		{"(*Int).Sub", Method, 0, ""},
    +		{"(*Int).Text", Method, 6, ""},
    +		{"(*Int).TrailingZeroBits", Method, 13, ""},
    +		{"(*Int).Uint64", Method, 1, ""},
    +		{"(*Int).UnmarshalJSON", Method, 1, ""},
    +		{"(*Int).UnmarshalText", Method, 3, ""},
    +		{"(*Int).Xor", Method, 0, ""},
    +		{"(*Rat).Abs", Method, 0, ""},
    +		{"(*Rat).Add", Method, 0, ""},
    +		{"(*Rat).AppendText", Method, 24, ""},
    +		{"(*Rat).Cmp", Method, 0, ""},
    +		{"(*Rat).Denom", Method, 0, ""},
    +		{"(*Rat).Float32", Method, 4, ""},
    +		{"(*Rat).Float64", Method, 1, ""},
    +		{"(*Rat).FloatPrec", Method, 22, ""},
    +		{"(*Rat).FloatString", Method, 0, ""},
    +		{"(*Rat).GobDecode", Method, 0, ""},
    +		{"(*Rat).GobEncode", Method, 0, ""},
    +		{"(*Rat).Inv", Method, 0, ""},
    +		{"(*Rat).IsInt", Method, 0, ""},
    +		{"(*Rat).MarshalText", Method, 3, ""},
    +		{"(*Rat).Mul", Method, 0, ""},
    +		{"(*Rat).Neg", Method, 0, ""},
    +		{"(*Rat).Num", Method, 0, ""},
    +		{"(*Rat).Quo", Method, 0, ""},
    +		{"(*Rat).RatString", Method, 0, ""},
    +		{"(*Rat).Scan", Method, 0, ""},
    +		{"(*Rat).Set", Method, 0, ""},
    +		{"(*Rat).SetFloat64", Method, 1, ""},
    +		{"(*Rat).SetFrac", Method, 0, ""},
    +		{"(*Rat).SetFrac64", Method, 0, ""},
    +		{"(*Rat).SetInt", Method, 0, ""},
    +		{"(*Rat).SetInt64", Method, 0, ""},
    +		{"(*Rat).SetString", Method, 0, ""},
    +		{"(*Rat).SetUint64", Method, 13, ""},
    +		{"(*Rat).Sign", Method, 0, ""},
    +		{"(*Rat).String", Method, 0, ""},
    +		{"(*Rat).Sub", Method, 0, ""},
    +		{"(*Rat).UnmarshalText", Method, 3, ""},
    +		{"(Accuracy).String", Method, 5, ""},
    +		{"(ErrNaN).Error", Method, 5, ""},
    +		{"(RoundingMode).String", Method, 5, ""},
    +		{"Above", Const, 5, ""},
    +		{"Accuracy", Type, 5, ""},
    +		{"AwayFromZero", Const, 5, ""},
    +		{"Below", Const, 5, ""},
    +		{"ErrNaN", Type, 5, ""},
    +		{"Exact", Const, 5, ""},
    +		{"Float", Type, 5, ""},
    +		{"Int", Type, 0, ""},
    +		{"Jacobi", Func, 5, "func(x *Int, y *Int) int"},
    +		{"MaxBase", Const, 0, ""},
    +		{"MaxExp", Const, 5, ""},
    +		{"MaxPrec", Const, 5, ""},
    +		{"MinExp", Const, 5, ""},
    +		{"NewFloat", Func, 5, "func(x float64) *Float"},
    +		{"NewInt", Func, 0, "func(x int64) *Int"},
    +		{"NewRat", Func, 0, "func(a int64, b int64) *Rat"},
    +		{"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"},
    +		{"Rat", Type, 0, ""},
    +		{"RoundingMode", Type, 5, ""},
    +		{"ToNearestAway", Const, 5, ""},
    +		{"ToNearestEven", Const, 5, ""},
    +		{"ToNegativeInf", Const, 5, ""},
    +		{"ToPositiveInf", Const, 5, ""},
    +		{"ToZero", Const, 5, ""},
    +		{"Word", Type, 0, ""},
     	},
     	"math/bits": {
    -		{"Add", Func, 12},
    -		{"Add32", Func, 12},
    -		{"Add64", Func, 12},
    -		{"Div", Func, 12},
    -		{"Div32", Func, 12},
    -		{"Div64", Func, 12},
    -		{"LeadingZeros", Func, 9},
    -		{"LeadingZeros16", Func, 9},
    -		{"LeadingZeros32", Func, 9},
    -		{"LeadingZeros64", Func, 9},
    -		{"LeadingZeros8", Func, 9},
    -		{"Len", Func, 9},
    -		{"Len16", Func, 9},
    -		{"Len32", Func, 9},
    -		{"Len64", Func, 9},
    -		{"Len8", Func, 9},
    -		{"Mul", Func, 12},
    -		{"Mul32", Func, 12},
    -		{"Mul64", Func, 12},
    -		{"OnesCount", Func, 9},
    -		{"OnesCount16", Func, 9},
    -		{"OnesCount32", Func, 9},
    -		{"OnesCount64", Func, 9},
    -		{"OnesCount8", Func, 9},
    -		{"Rem", Func, 14},
    -		{"Rem32", Func, 14},
    -		{"Rem64", Func, 14},
    -		{"Reverse", Func, 9},
    -		{"Reverse16", Func, 9},
    -		{"Reverse32", Func, 9},
    -		{"Reverse64", Func, 9},
    -		{"Reverse8", Func, 9},
    -		{"ReverseBytes", Func, 9},
    -		{"ReverseBytes16", Func, 9},
    -		{"ReverseBytes32", Func, 9},
    -		{"ReverseBytes64", Func, 9},
    -		{"RotateLeft", Func, 9},
    -		{"RotateLeft16", Func, 9},
    -		{"RotateLeft32", Func, 9},
    -		{"RotateLeft64", Func, 9},
    -		{"RotateLeft8", Func, 9},
    -		{"Sub", Func, 12},
    -		{"Sub32", Func, 12},
    -		{"Sub64", Func, 12},
    -		{"TrailingZeros", Func, 9},
    -		{"TrailingZeros16", Func, 9},
    -		{"TrailingZeros32", Func, 9},
    -		{"TrailingZeros64", Func, 9},
    -		{"TrailingZeros8", Func, 9},
    -		{"UintSize", Const, 9},
    +		{"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"},
    +		{"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"},
    +		{"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"},
    +		{"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"},
    +		{"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"},
    +		{"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"},
    +		{"LeadingZeros", Func, 9, "func(x uint) int"},
    +		{"LeadingZeros16", Func, 9, "func(x uint16) int"},
    +		{"LeadingZeros32", Func, 9, "func(x uint32) int"},
    +		{"LeadingZeros64", Func, 9, "func(x uint64) int"},
    +		{"LeadingZeros8", Func, 9, "func(x uint8) int"},
    +		{"Len", Func, 9, "func(x uint) int"},
    +		{"Len16", Func, 9, "func(x uint16) (n int)"},
    +		{"Len32", Func, 9, "func(x uint32) (n int)"},
    +		{"Len64", Func, 9, "func(x uint64) (n int)"},
    +		{"Len8", Func, 9, "func(x uint8) int"},
    +		{"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"},
    +		{"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"},
    +		{"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"},
    +		{"OnesCount", Func, 9, "func(x uint) int"},
    +		{"OnesCount16", Func, 9, "func(x uint16) int"},
    +		{"OnesCount32", Func, 9, "func(x uint32) int"},
    +		{"OnesCount64", Func, 9, "func(x uint64) int"},
    +		{"OnesCount8", Func, 9, "func(x uint8) int"},
    +		{"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"},
    +		{"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"},
    +		{"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"},
    +		{"Reverse", Func, 9, "func(x uint) uint"},
    +		{"Reverse16", Func, 9, "func(x uint16) uint16"},
    +		{"Reverse32", Func, 9, "func(x uint32) uint32"},
    +		{"Reverse64", Func, 9, "func(x uint64) uint64"},
    +		{"Reverse8", Func, 9, "func(x uint8) uint8"},
    +		{"ReverseBytes", Func, 9, "func(x uint) uint"},
    +		{"ReverseBytes16", Func, 9, "func(x uint16) uint16"},
    +		{"ReverseBytes32", Func, 9, "func(x uint32) uint32"},
    +		{"ReverseBytes64", Func, 9, "func(x uint64) uint64"},
    +		{"RotateLeft", Func, 9, "func(x uint, k int) uint"},
    +		{"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"},
    +		{"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"},
    +		{"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"},
    +		{"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"},
    +		{"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"},
    +		{"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"},
    +		{"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"},
    +		{"TrailingZeros", Func, 9, "func(x uint) int"},
    +		{"TrailingZeros16", Func, 9, "func(x uint16) int"},
    +		{"TrailingZeros32", Func, 9, "func(x uint32) int"},
    +		{"TrailingZeros64", Func, 9, "func(x uint64) int"},
    +		{"TrailingZeros8", Func, 9, "func(x uint8) int"},
    +		{"UintSize", Const, 9, ""},
     	},
     	"math/cmplx": {
    -		{"Abs", Func, 0},
    -		{"Acos", Func, 0},
    -		{"Acosh", Func, 0},
    -		{"Asin", Func, 0},
    -		{"Asinh", Func, 0},
    -		{"Atan", Func, 0},
    -		{"Atanh", Func, 0},
    -		{"Conj", Func, 0},
    -		{"Cos", Func, 0},
    -		{"Cosh", Func, 0},
    -		{"Cot", Func, 0},
    -		{"Exp", Func, 0},
    -		{"Inf", Func, 0},
    -		{"IsInf", Func, 0},
    -		{"IsNaN", Func, 0},
    -		{"Log", Func, 0},
    -		{"Log10", Func, 0},
    -		{"NaN", Func, 0},
    -		{"Phase", Func, 0},
    -		{"Polar", Func, 0},
    -		{"Pow", Func, 0},
    -		{"Rect", Func, 0},
    -		{"Sin", Func, 0},
    -		{"Sinh", Func, 0},
    -		{"Sqrt", Func, 0},
    -		{"Tan", Func, 0},
    -		{"Tanh", Func, 0},
    +		{"Abs", Func, 0, "func(x complex128) float64"},
    +		{"Acos", Func, 0, "func(x complex128) complex128"},
    +		{"Acosh", Func, 0, "func(x complex128) complex128"},
    +		{"Asin", Func, 0, "func(x complex128) complex128"},
    +		{"Asinh", Func, 0, "func(x complex128) complex128"},
    +		{"Atan", Func, 0, "func(x complex128) complex128"},
    +		{"Atanh", Func, 0, "func(x complex128) complex128"},
    +		{"Conj", Func, 0, "func(x complex128) complex128"},
    +		{"Cos", Func, 0, "func(x complex128) complex128"},
    +		{"Cosh", Func, 0, "func(x complex128) complex128"},
    +		{"Cot", Func, 0, "func(x complex128) complex128"},
    +		{"Exp", Func, 0, "func(x complex128) complex128"},
    +		{"Inf", Func, 0, "func() complex128"},
    +		{"IsInf", Func, 0, "func(x complex128) bool"},
    +		{"IsNaN", Func, 0, "func(x complex128) bool"},
    +		{"Log", Func, 0, "func(x complex128) complex128"},
    +		{"Log10", Func, 0, "func(x complex128) complex128"},
    +		{"NaN", Func, 0, "func() complex128"},
    +		{"Phase", Func, 0, "func(x complex128) float64"},
    +		{"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"},
    +		{"Pow", Func, 0, "func(x complex128, y complex128) complex128"},
    +		{"Rect", Func, 0, "func(r float64, θ float64) complex128"},
    +		{"Sin", Func, 0, "func(x complex128) complex128"},
    +		{"Sinh", Func, 0, "func(x complex128) complex128"},
    +		{"Sqrt", Func, 0, "func(x complex128) complex128"},
    +		{"Tan", Func, 0, "func(x complex128) complex128"},
    +		{"Tanh", Func, 0, "func(x complex128) complex128"},
     	},
     	"math/rand": {
    -		{"(*Rand).ExpFloat64", Method, 0},
    -		{"(*Rand).Float32", Method, 0},
    -		{"(*Rand).Float64", Method, 0},
    -		{"(*Rand).Int", Method, 0},
    -		{"(*Rand).Int31", Method, 0},
    -		{"(*Rand).Int31n", Method, 0},
    -		{"(*Rand).Int63", Method, 0},
    -		{"(*Rand).Int63n", Method, 0},
    -		{"(*Rand).Intn", Method, 0},
    -		{"(*Rand).NormFloat64", Method, 0},
    -		{"(*Rand).Perm", Method, 0},
    -		{"(*Rand).Read", Method, 6},
    -		{"(*Rand).Seed", Method, 0},
    -		{"(*Rand).Shuffle", Method, 10},
    -		{"(*Rand).Uint32", Method, 0},
    -		{"(*Rand).Uint64", Method, 8},
    -		{"(*Zipf).Uint64", Method, 0},
    -		{"ExpFloat64", Func, 0},
    -		{"Float32", Func, 0},
    -		{"Float64", Func, 0},
    -		{"Int", Func, 0},
    -		{"Int31", Func, 0},
    -		{"Int31n", Func, 0},
    -		{"Int63", Func, 0},
    -		{"Int63n", Func, 0},
    -		{"Intn", Func, 0},
    -		{"New", Func, 0},
    -		{"NewSource", Func, 0},
    -		{"NewZipf", Func, 0},
    -		{"NormFloat64", Func, 0},
    -		{"Perm", Func, 0},
    -		{"Rand", Type, 0},
    -		{"Read", Func, 6},
    -		{"Seed", Func, 0},
    -		{"Shuffle", Func, 10},
    -		{"Source", Type, 0},
    -		{"Source64", Type, 8},
    -		{"Uint32", Func, 0},
    -		{"Uint64", Func, 8},
    -		{"Zipf", Type, 0},
    +		{"(*Rand).ExpFloat64", Method, 0, ""},
    +		{"(*Rand).Float32", Method, 0, ""},
    +		{"(*Rand).Float64", Method, 0, ""},
    +		{"(*Rand).Int", Method, 0, ""},
    +		{"(*Rand).Int31", Method, 0, ""},
    +		{"(*Rand).Int31n", Method, 0, ""},
    +		{"(*Rand).Int63", Method, 0, ""},
    +		{"(*Rand).Int63n", Method, 0, ""},
    +		{"(*Rand).Intn", Method, 0, ""},
    +		{"(*Rand).NormFloat64", Method, 0, ""},
    +		{"(*Rand).Perm", Method, 0, ""},
    +		{"(*Rand).Read", Method, 6, ""},
    +		{"(*Rand).Seed", Method, 0, ""},
    +		{"(*Rand).Shuffle", Method, 10, ""},
    +		{"(*Rand).Uint32", Method, 0, ""},
    +		{"(*Rand).Uint64", Method, 8, ""},
    +		{"(*Zipf).Uint64", Method, 0, ""},
    +		{"ExpFloat64", Func, 0, "func() float64"},
    +		{"Float32", Func, 0, "func() float32"},
    +		{"Float64", Func, 0, "func() float64"},
    +		{"Int", Func, 0, "func() int"},
    +		{"Int31", Func, 0, "func() int32"},
    +		{"Int31n", Func, 0, "func(n int32) int32"},
    +		{"Int63", Func, 0, "func() int64"},
    +		{"Int63n", Func, 0, "func(n int64) int64"},
    +		{"Intn", Func, 0, "func(n int) int"},
    +		{"New", Func, 0, "func(src Source) *Rand"},
    +		{"NewSource", Func, 0, "func(seed int64) Source"},
    +		{"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
    +		{"NormFloat64", Func, 0, "func() float64"},
    +		{"Perm", Func, 0, "func(n int) []int"},
    +		{"Rand", Type, 0, ""},
    +		{"Read", Func, 6, "func(p []byte) (n int, err error)"},
    +		{"Seed", Func, 0, "func(seed int64)"},
    +		{"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"},
    +		{"Source", Type, 0, ""},
    +		{"Source64", Type, 8, ""},
    +		{"Uint32", Func, 0, "func() uint32"},
    +		{"Uint64", Func, 8, "func() uint64"},
    +		{"Zipf", Type, 0, ""},
     	},
     	"math/rand/v2": {
    -		{"(*ChaCha8).AppendBinary", Method, 24},
    -		{"(*ChaCha8).MarshalBinary", Method, 22},
    -		{"(*ChaCha8).Read", Method, 23},
    -		{"(*ChaCha8).Seed", Method, 22},
    -		{"(*ChaCha8).Uint64", Method, 22},
    -		{"(*ChaCha8).UnmarshalBinary", Method, 22},
    -		{"(*PCG).AppendBinary", Method, 24},
    -		{"(*PCG).MarshalBinary", Method, 22},
    -		{"(*PCG).Seed", Method, 22},
    -		{"(*PCG).Uint64", Method, 22},
    -		{"(*PCG).UnmarshalBinary", Method, 22},
    -		{"(*Rand).ExpFloat64", Method, 22},
    -		{"(*Rand).Float32", Method, 22},
    -		{"(*Rand).Float64", Method, 22},
    -		{"(*Rand).Int", Method, 22},
    -		{"(*Rand).Int32", Method, 22},
    -		{"(*Rand).Int32N", Method, 22},
    -		{"(*Rand).Int64", Method, 22},
    -		{"(*Rand).Int64N", Method, 22},
    -		{"(*Rand).IntN", Method, 22},
    -		{"(*Rand).NormFloat64", Method, 22},
    -		{"(*Rand).Perm", Method, 22},
    -		{"(*Rand).Shuffle", Method, 22},
    -		{"(*Rand).Uint", Method, 23},
    -		{"(*Rand).Uint32", Method, 22},
    -		{"(*Rand).Uint32N", Method, 22},
    -		{"(*Rand).Uint64", Method, 22},
    -		{"(*Rand).Uint64N", Method, 22},
    -		{"(*Rand).UintN", Method, 22},
    -		{"(*Zipf).Uint64", Method, 22},
    -		{"ChaCha8", Type, 22},
    -		{"ExpFloat64", Func, 22},
    -		{"Float32", Func, 22},
    -		{"Float64", Func, 22},
    -		{"Int", Func, 22},
    -		{"Int32", Func, 22},
    -		{"Int32N", Func, 22},
    -		{"Int64", Func, 22},
    -		{"Int64N", Func, 22},
    -		{"IntN", Func, 22},
    -		{"N", Func, 22},
    -		{"New", Func, 22},
    -		{"NewChaCha8", Func, 22},
    -		{"NewPCG", Func, 22},
    -		{"NewZipf", Func, 22},
    -		{"NormFloat64", Func, 22},
    -		{"PCG", Type, 22},
    -		{"Perm", Func, 22},
    -		{"Rand", Type, 22},
    -		{"Shuffle", Func, 22},
    -		{"Source", Type, 22},
    -		{"Uint", Func, 23},
    -		{"Uint32", Func, 22},
    -		{"Uint32N", Func, 22},
    -		{"Uint64", Func, 22},
    -		{"Uint64N", Func, 22},
    -		{"UintN", Func, 22},
    -		{"Zipf", Type, 22},
    +		{"(*ChaCha8).AppendBinary", Method, 24, ""},
    +		{"(*ChaCha8).MarshalBinary", Method, 22, ""},
    +		{"(*ChaCha8).Read", Method, 23, ""},
    +		{"(*ChaCha8).Seed", Method, 22, ""},
    +		{"(*ChaCha8).Uint64", Method, 22, ""},
    +		{"(*ChaCha8).UnmarshalBinary", Method, 22, ""},
    +		{"(*PCG).AppendBinary", Method, 24, ""},
    +		{"(*PCG).MarshalBinary", Method, 22, ""},
    +		{"(*PCG).Seed", Method, 22, ""},
    +		{"(*PCG).Uint64", Method, 22, ""},
    +		{"(*PCG).UnmarshalBinary", Method, 22, ""},
    +		{"(*Rand).ExpFloat64", Method, 22, ""},
    +		{"(*Rand).Float32", Method, 22, ""},
    +		{"(*Rand).Float64", Method, 22, ""},
    +		{"(*Rand).Int", Method, 22, ""},
    +		{"(*Rand).Int32", Method, 22, ""},
    +		{"(*Rand).Int32N", Method, 22, ""},
    +		{"(*Rand).Int64", Method, 22, ""},
    +		{"(*Rand).Int64N", Method, 22, ""},
    +		{"(*Rand).IntN", Method, 22, ""},
    +		{"(*Rand).NormFloat64", Method, 22, ""},
    +		{"(*Rand).Perm", Method, 22, ""},
    +		{"(*Rand).Shuffle", Method, 22, ""},
    +		{"(*Rand).Uint", Method, 23, ""},
    +		{"(*Rand).Uint32", Method, 22, ""},
    +		{"(*Rand).Uint32N", Method, 22, ""},
    +		{"(*Rand).Uint64", Method, 22, ""},
    +		{"(*Rand).Uint64N", Method, 22, ""},
    +		{"(*Rand).UintN", Method, 22, ""},
    +		{"(*Zipf).Uint64", Method, 22, ""},
    +		{"ChaCha8", Type, 22, ""},
    +		{"ExpFloat64", Func, 22, "func() float64"},
    +		{"Float32", Func, 22, "func() float32"},
    +		{"Float64", Func, 22, "func() float64"},
    +		{"Int", Func, 22, "func() int"},
    +		{"Int32", Func, 22, "func() int32"},
    +		{"Int32N", Func, 22, "func(n int32) int32"},
    +		{"Int64", Func, 22, "func() int64"},
    +		{"Int64N", Func, 22, "func(n int64) int64"},
    +		{"IntN", Func, 22, "func(n int) int"},
    +		{"N", Func, 22, "func[Int intType](n Int) Int"},
    +		{"New", Func, 22, "func(src Source) *Rand"},
    +		{"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"},
    +		{"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"},
    +		{"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
    +		{"NormFloat64", Func, 22, "func() float64"},
    +		{"PCG", Type, 22, ""},
    +		{"Perm", Func, 22, "func(n int) []int"},
    +		{"Rand", Type, 22, ""},
    +		{"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"},
    +		{"Source", Type, 22, ""},
    +		{"Uint", Func, 23, "func() uint"},
    +		{"Uint32", Func, 22, "func() uint32"},
    +		{"Uint32N", Func, 22, "func(n uint32) uint32"},
    +		{"Uint64", Func, 22, "func() uint64"},
    +		{"Uint64N", Func, 22, "func(n uint64) uint64"},
    +		{"UintN", Func, 22, "func(n uint) uint"},
    +		{"Zipf", Type, 22, ""},
     	},
     	"mime": {
    -		{"(*WordDecoder).Decode", Method, 5},
    -		{"(*WordDecoder).DecodeHeader", Method, 5},
    -		{"(WordEncoder).Encode", Method, 5},
    -		{"AddExtensionType", Func, 0},
    -		{"BEncoding", Const, 5},
    -		{"ErrInvalidMediaParameter", Var, 9},
    -		{"ExtensionsByType", Func, 5},
    -		{"FormatMediaType", Func, 0},
    -		{"ParseMediaType", Func, 0},
    -		{"QEncoding", Const, 5},
    -		{"TypeByExtension", Func, 0},
    -		{"WordDecoder", Type, 5},
    -		{"WordDecoder.CharsetReader", Field, 5},
    -		{"WordEncoder", Type, 5},
    +		{"(*WordDecoder).Decode", Method, 5, ""},
    +		{"(*WordDecoder).DecodeHeader", Method, 5, ""},
    +		{"(WordEncoder).Encode", Method, 5, ""},
    +		{"AddExtensionType", Func, 0, "func(ext string, typ string) error"},
    +		{"BEncoding", Const, 5, ""},
    +		{"ErrInvalidMediaParameter", Var, 9, ""},
    +		{"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"},
    +		{"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"},
    +		{"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"},
    +		{"QEncoding", Const, 5, ""},
    +		{"TypeByExtension", Func, 0, "func(ext string) string"},
    +		{"WordDecoder", Type, 5, ""},
    +		{"WordDecoder.CharsetReader", Field, 5, ""},
    +		{"WordEncoder", Type, 5, ""},
     	},
     	"mime/multipart": {
    -		{"(*FileHeader).Open", Method, 0},
    -		{"(*Form).RemoveAll", Method, 0},
    -		{"(*Part).Close", Method, 0},
    -		{"(*Part).FileName", Method, 0},
    -		{"(*Part).FormName", Method, 0},
    -		{"(*Part).Read", Method, 0},
    -		{"(*Reader).NextPart", Method, 0},
    -		{"(*Reader).NextRawPart", Method, 14},
    -		{"(*Reader).ReadForm", Method, 0},
    -		{"(*Writer).Boundary", Method, 0},
    -		{"(*Writer).Close", Method, 0},
    -		{"(*Writer).CreateFormField", Method, 0},
    -		{"(*Writer).CreateFormFile", Method, 0},
    -		{"(*Writer).CreatePart", Method, 0},
    -		{"(*Writer).FormDataContentType", Method, 0},
    -		{"(*Writer).SetBoundary", Method, 1},
    -		{"(*Writer).WriteField", Method, 0},
    -		{"ErrMessageTooLarge", Var, 9},
    -		{"File", Type, 0},
    -		{"FileHeader", Type, 0},
    -		{"FileHeader.Filename", Field, 0},
    -		{"FileHeader.Header", Field, 0},
    -		{"FileHeader.Size", Field, 9},
    -		{"Form", Type, 0},
    -		{"Form.File", Field, 0},
    -		{"Form.Value", Field, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Part", Type, 0},
    -		{"Part.Header", Field, 0},
    -		{"Reader", Type, 0},
    -		{"Writer", Type, 0},
    +		{"(*FileHeader).Open", Method, 0, ""},
    +		{"(*Form).RemoveAll", Method, 0, ""},
    +		{"(*Part).Close", Method, 0, ""},
    +		{"(*Part).FileName", Method, 0, ""},
    +		{"(*Part).FormName", Method, 0, ""},
    +		{"(*Part).Read", Method, 0, ""},
    +		{"(*Reader).NextPart", Method, 0, ""},
    +		{"(*Reader).NextRawPart", Method, 14, ""},
    +		{"(*Reader).ReadForm", Method, 0, ""},
    +		{"(*Writer).Boundary", Method, 0, ""},
    +		{"(*Writer).Close", Method, 0, ""},
    +		{"(*Writer).CreateFormField", Method, 0, ""},
    +		{"(*Writer).CreateFormFile", Method, 0, ""},
    +		{"(*Writer).CreatePart", Method, 0, ""},
    +		{"(*Writer).FormDataContentType", Method, 0, ""},
    +		{"(*Writer).SetBoundary", Method, 1, ""},
    +		{"(*Writer).WriteField", Method, 0, ""},
    +		{"ErrMessageTooLarge", Var, 9, ""},
    +		{"File", Type, 0, ""},
    +		{"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
    +		{"FileHeader", Type, 0, ""},
    +		{"FileHeader.Filename", Field, 0, ""},
    +		{"FileHeader.Header", Field, 0, ""},
    +		{"FileHeader.Size", Field, 9, ""},
    +		{"Form", Type, 0, ""},
    +		{"Form.File", Field, 0, ""},
    +		{"Form.Value", Field, 0, ""},
    +		{"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"},
    +		{"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
    +		{"Part", Type, 0, ""},
    +		{"Part.Header", Field, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"mime/quotedprintable": {
    -		{"(*Reader).Read", Method, 5},
    -		{"(*Writer).Close", Method, 5},
    -		{"(*Writer).Write", Method, 5},
    -		{"NewReader", Func, 5},
    -		{"NewWriter", Func, 5},
    -		{"Reader", Type, 5},
    -		{"Writer", Type, 5},
    -		{"Writer.Binary", Field, 5},
    +		{"(*Reader).Read", Method, 5, ""},
    +		{"(*Writer).Close", Method, 5, ""},
    +		{"(*Writer).Write", Method, 5, ""},
    +		{"NewReader", Func, 5, "func(r io.Reader) *Reader"},
    +		{"NewWriter", Func, 5, "func(w io.Writer) *Writer"},
    +		{"Reader", Type, 5, ""},
    +		{"Writer", Type, 5, ""},
    +		{"Writer.Binary", Field, 5, ""},
     	},
     	"net": {
    -		{"(*AddrError).Error", Method, 0},
    -		{"(*AddrError).Temporary", Method, 0},
    -		{"(*AddrError).Timeout", Method, 0},
    -		{"(*Buffers).Read", Method, 8},
    -		{"(*Buffers).WriteTo", Method, 8},
    -		{"(*DNSConfigError).Error", Method, 0},
    -		{"(*DNSConfigError).Temporary", Method, 0},
    -		{"(*DNSConfigError).Timeout", Method, 0},
    -		{"(*DNSConfigError).Unwrap", Method, 13},
    -		{"(*DNSError).Error", Method, 0},
    -		{"(*DNSError).Temporary", Method, 0},
    -		{"(*DNSError).Timeout", Method, 0},
    -		{"(*DNSError).Unwrap", Method, 23},
    -		{"(*Dialer).Dial", Method, 1},
    -		{"(*Dialer).DialContext", Method, 7},
    -		{"(*Dialer).MultipathTCP", Method, 21},
    -		{"(*Dialer).SetMultipathTCP", Method, 21},
    -		{"(*IP).UnmarshalText", Method, 2},
    -		{"(*IPAddr).Network", Method, 0},
    -		{"(*IPAddr).String", Method, 0},
    -		{"(*IPConn).Close", Method, 0},
    -		{"(*IPConn).File", Method, 0},
    -		{"(*IPConn).LocalAddr", Method, 0},
    -		{"(*IPConn).Read", Method, 0},
    -		{"(*IPConn).ReadFrom", Method, 0},
    -		{"(*IPConn).ReadFromIP", Method, 0},
    -		{"(*IPConn).ReadMsgIP", Method, 1},
    -		{"(*IPConn).RemoteAddr", Method, 0},
    -		{"(*IPConn).SetDeadline", Method, 0},
    -		{"(*IPConn).SetReadBuffer", Method, 0},
    -		{"(*IPConn).SetReadDeadline", Method, 0},
    -		{"(*IPConn).SetWriteBuffer", Method, 0},
    -		{"(*IPConn).SetWriteDeadline", Method, 0},
    -		{"(*IPConn).SyscallConn", Method, 9},
    -		{"(*IPConn).Write", Method, 0},
    -		{"(*IPConn).WriteMsgIP", Method, 1},
    -		{"(*IPConn).WriteTo", Method, 0},
    -		{"(*IPConn).WriteToIP", Method, 0},
    -		{"(*IPNet).Contains", Method, 0},
    -		{"(*IPNet).Network", Method, 0},
    -		{"(*IPNet).String", Method, 0},
    -		{"(*Interface).Addrs", Method, 0},
    -		{"(*Interface).MulticastAddrs", Method, 0},
    -		{"(*ListenConfig).Listen", Method, 11},
    -		{"(*ListenConfig).ListenPacket", Method, 11},
    -		{"(*ListenConfig).MultipathTCP", Method, 21},
    -		{"(*ListenConfig).SetMultipathTCP", Method, 21},
    -		{"(*OpError).Error", Method, 0},
    -		{"(*OpError).Temporary", Method, 0},
    -		{"(*OpError).Timeout", Method, 0},
    -		{"(*OpError).Unwrap", Method, 13},
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*ParseError).Temporary", Method, 17},
    -		{"(*ParseError).Timeout", Method, 17},
    -		{"(*Resolver).LookupAddr", Method, 8},
    -		{"(*Resolver).LookupCNAME", Method, 8},
    -		{"(*Resolver).LookupHost", Method, 8},
    -		{"(*Resolver).LookupIP", Method, 15},
    -		{"(*Resolver).LookupIPAddr", Method, 8},
    -		{"(*Resolver).LookupMX", Method, 8},
    -		{"(*Resolver).LookupNS", Method, 8},
    -		{"(*Resolver).LookupNetIP", Method, 18},
    -		{"(*Resolver).LookupPort", Method, 8},
    -		{"(*Resolver).LookupSRV", Method, 8},
    -		{"(*Resolver).LookupTXT", Method, 8},
    -		{"(*TCPAddr).AddrPort", Method, 18},
    -		{"(*TCPAddr).Network", Method, 0},
    -		{"(*TCPAddr).String", Method, 0},
    -		{"(*TCPConn).Close", Method, 0},
    -		{"(*TCPConn).CloseRead", Method, 0},
    -		{"(*TCPConn).CloseWrite", Method, 0},
    -		{"(*TCPConn).File", Method, 0},
    -		{"(*TCPConn).LocalAddr", Method, 0},
    -		{"(*TCPConn).MultipathTCP", Method, 21},
    -		{"(*TCPConn).Read", Method, 0},
    -		{"(*TCPConn).ReadFrom", Method, 0},
    -		{"(*TCPConn).RemoteAddr", Method, 0},
    -		{"(*TCPConn).SetDeadline", Method, 0},
    -		{"(*TCPConn).SetKeepAlive", Method, 0},
    -		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
    -		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
    -		{"(*TCPConn).SetLinger", Method, 0},
    -		{"(*TCPConn).SetNoDelay", Method, 0},
    -		{"(*TCPConn).SetReadBuffer", Method, 0},
    -		{"(*TCPConn).SetReadDeadline", Method, 0},
    -		{"(*TCPConn).SetWriteBuffer", Method, 0},
    -		{"(*TCPConn).SetWriteDeadline", Method, 0},
    -		{"(*TCPConn).SyscallConn", Method, 9},
    -		{"(*TCPConn).Write", Method, 0},
    -		{"(*TCPConn).WriteTo", Method, 22},
    -		{"(*TCPListener).Accept", Method, 0},
    -		{"(*TCPListener).AcceptTCP", Method, 0},
    -		{"(*TCPListener).Addr", Method, 0},
    -		{"(*TCPListener).Close", Method, 0},
    -		{"(*TCPListener).File", Method, 0},
    -		{"(*TCPListener).SetDeadline", Method, 0},
    -		{"(*TCPListener).SyscallConn", Method, 10},
    -		{"(*UDPAddr).AddrPort", Method, 18},
    -		{"(*UDPAddr).Network", Method, 0},
    -		{"(*UDPAddr).String", Method, 0},
    -		{"(*UDPConn).Close", Method, 0},
    -		{"(*UDPConn).File", Method, 0},
    -		{"(*UDPConn).LocalAddr", Method, 0},
    -		{"(*UDPConn).Read", Method, 0},
    -		{"(*UDPConn).ReadFrom", Method, 0},
    -		{"(*UDPConn).ReadFromUDP", Method, 0},
    -		{"(*UDPConn).ReadFromUDPAddrPort", Method, 18},
    -		{"(*UDPConn).ReadMsgUDP", Method, 1},
    -		{"(*UDPConn).ReadMsgUDPAddrPort", Method, 18},
    -		{"(*UDPConn).RemoteAddr", Method, 0},
    -		{"(*UDPConn).SetDeadline", Method, 0},
    -		{"(*UDPConn).SetReadBuffer", Method, 0},
    -		{"(*UDPConn).SetReadDeadline", Method, 0},
    -		{"(*UDPConn).SetWriteBuffer", Method, 0},
    -		{"(*UDPConn).SetWriteDeadline", Method, 0},
    -		{"(*UDPConn).SyscallConn", Method, 9},
    -		{"(*UDPConn).Write", Method, 0},
    -		{"(*UDPConn).WriteMsgUDP", Method, 1},
    -		{"(*UDPConn).WriteMsgUDPAddrPort", Method, 18},
    -		{"(*UDPConn).WriteTo", Method, 0},
    -		{"(*UDPConn).WriteToUDP", Method, 0},
    -		{"(*UDPConn).WriteToUDPAddrPort", Method, 18},
    -		{"(*UnixAddr).Network", Method, 0},
    -		{"(*UnixAddr).String", Method, 0},
    -		{"(*UnixConn).Close", Method, 0},
    -		{"(*UnixConn).CloseRead", Method, 1},
    -		{"(*UnixConn).CloseWrite", Method, 1},
    -		{"(*UnixConn).File", Method, 0},
    -		{"(*UnixConn).LocalAddr", Method, 0},
    -		{"(*UnixConn).Read", Method, 0},
    -		{"(*UnixConn).ReadFrom", Method, 0},
    -		{"(*UnixConn).ReadFromUnix", Method, 0},
    -		{"(*UnixConn).ReadMsgUnix", Method, 0},
    -		{"(*UnixConn).RemoteAddr", Method, 0},
    -		{"(*UnixConn).SetDeadline", Method, 0},
    -		{"(*UnixConn).SetReadBuffer", Method, 0},
    -		{"(*UnixConn).SetReadDeadline", Method, 0},
    -		{"(*UnixConn).SetWriteBuffer", Method, 0},
    -		{"(*UnixConn).SetWriteDeadline", Method, 0},
    -		{"(*UnixConn).SyscallConn", Method, 9},
    -		{"(*UnixConn).Write", Method, 0},
    -		{"(*UnixConn).WriteMsgUnix", Method, 0},
    -		{"(*UnixConn).WriteTo", Method, 0},
    -		{"(*UnixConn).WriteToUnix", Method, 0},
    -		{"(*UnixListener).Accept", Method, 0},
    -		{"(*UnixListener).AcceptUnix", Method, 0},
    -		{"(*UnixListener).Addr", Method, 0},
    -		{"(*UnixListener).Close", Method, 0},
    -		{"(*UnixListener).File", Method, 0},
    -		{"(*UnixListener).SetDeadline", Method, 0},
    -		{"(*UnixListener).SetUnlinkOnClose", Method, 8},
    -		{"(*UnixListener).SyscallConn", Method, 10},
    -		{"(Flags).String", Method, 0},
    -		{"(HardwareAddr).String", Method, 0},
    -		{"(IP).AppendText", Method, 24},
    -		{"(IP).DefaultMask", Method, 0},
    -		{"(IP).Equal", Method, 0},
    -		{"(IP).IsGlobalUnicast", Method, 0},
    -		{"(IP).IsInterfaceLocalMulticast", Method, 0},
    -		{"(IP).IsLinkLocalMulticast", Method, 0},
    -		{"(IP).IsLinkLocalUnicast", Method, 0},
    -		{"(IP).IsLoopback", Method, 0},
    -		{"(IP).IsMulticast", Method, 0},
    -		{"(IP).IsPrivate", Method, 17},
    -		{"(IP).IsUnspecified", Method, 0},
    -		{"(IP).MarshalText", Method, 2},
    -		{"(IP).Mask", Method, 0},
    -		{"(IP).String", Method, 0},
    -		{"(IP).To16", Method, 0},
    -		{"(IP).To4", Method, 0},
    -		{"(IPMask).Size", Method, 0},
    -		{"(IPMask).String", Method, 0},
    -		{"(InvalidAddrError).Error", Method, 0},
    -		{"(InvalidAddrError).Temporary", Method, 0},
    -		{"(InvalidAddrError).Timeout", Method, 0},
    -		{"(UnknownNetworkError).Error", Method, 0},
    -		{"(UnknownNetworkError).Temporary", Method, 0},
    -		{"(UnknownNetworkError).Timeout", Method, 0},
    -		{"Addr", Type, 0},
    -		{"AddrError", Type, 0},
    -		{"AddrError.Addr", Field, 0},
    -		{"AddrError.Err", Field, 0},
    -		{"Buffers", Type, 8},
    -		{"CIDRMask", Func, 0},
    -		{"Conn", Type, 0},
    -		{"DNSConfigError", Type, 0},
    -		{"DNSConfigError.Err", Field, 0},
    -		{"DNSError", Type, 0},
    -		{"DNSError.Err", Field, 0},
    -		{"DNSError.IsNotFound", Field, 13},
    -		{"DNSError.IsTemporary", Field, 6},
    -		{"DNSError.IsTimeout", Field, 0},
    -		{"DNSError.Name", Field, 0},
    -		{"DNSError.Server", Field, 0},
    -		{"DNSError.UnwrapErr", Field, 23},
    -		{"DefaultResolver", Var, 8},
    -		{"Dial", Func, 0},
    -		{"DialIP", Func, 0},
    -		{"DialTCP", Func, 0},
    -		{"DialTimeout", Func, 0},
    -		{"DialUDP", Func, 0},
    -		{"DialUnix", Func, 0},
    -		{"Dialer", Type, 1},
    -		{"Dialer.Cancel", Field, 6},
    -		{"Dialer.Control", Field, 11},
    -		{"Dialer.ControlContext", Field, 20},
    -		{"Dialer.Deadline", Field, 1},
    -		{"Dialer.DualStack", Field, 2},
    -		{"Dialer.FallbackDelay", Field, 5},
    -		{"Dialer.KeepAlive", Field, 3},
    -		{"Dialer.KeepAliveConfig", Field, 23},
    -		{"Dialer.LocalAddr", Field, 1},
    -		{"Dialer.Resolver", Field, 8},
    -		{"Dialer.Timeout", Field, 1},
    -		{"ErrClosed", Var, 16},
    -		{"ErrWriteToConnected", Var, 0},
    -		{"Error", Type, 0},
    -		{"FileConn", Func, 0},
    -		{"FileListener", Func, 0},
    -		{"FilePacketConn", Func, 0},
    -		{"FlagBroadcast", Const, 0},
    -		{"FlagLoopback", Const, 0},
    -		{"FlagMulticast", Const, 0},
    -		{"FlagPointToPoint", Const, 0},
    -		{"FlagRunning", Const, 20},
    -		{"FlagUp", Const, 0},
    -		{"Flags", Type, 0},
    -		{"HardwareAddr", Type, 0},
    -		{"IP", Type, 0},
    -		{"IPAddr", Type, 0},
    -		{"IPAddr.IP", Field, 0},
    -		{"IPAddr.Zone", Field, 1},
    -		{"IPConn", Type, 0},
    -		{"IPMask", Type, 0},
    -		{"IPNet", Type, 0},
    -		{"IPNet.IP", Field, 0},
    -		{"IPNet.Mask", Field, 0},
    -		{"IPv4", Func, 0},
    -		{"IPv4Mask", Func, 0},
    -		{"IPv4allrouter", Var, 0},
    -		{"IPv4allsys", Var, 0},
    -		{"IPv4bcast", Var, 0},
    -		{"IPv4len", Const, 0},
    -		{"IPv4zero", Var, 0},
    -		{"IPv6interfacelocalallnodes", Var, 0},
    -		{"IPv6len", Const, 0},
    -		{"IPv6linklocalallnodes", Var, 0},
    -		{"IPv6linklocalallrouters", Var, 0},
    -		{"IPv6loopback", Var, 0},
    -		{"IPv6unspecified", Var, 0},
    -		{"IPv6zero", Var, 0},
    -		{"Interface", Type, 0},
    -		{"Interface.Flags", Field, 0},
    -		{"Interface.HardwareAddr", Field, 0},
    -		{"Interface.Index", Field, 0},
    -		{"Interface.MTU", Field, 0},
    -		{"Interface.Name", Field, 0},
    -		{"InterfaceAddrs", Func, 0},
    -		{"InterfaceByIndex", Func, 0},
    -		{"InterfaceByName", Func, 0},
    -		{"Interfaces", Func, 0},
    -		{"InvalidAddrError", Type, 0},
    -		{"JoinHostPort", Func, 0},
    -		{"KeepAliveConfig", Type, 23},
    -		{"KeepAliveConfig.Count", Field, 23},
    -		{"KeepAliveConfig.Enable", Field, 23},
    -		{"KeepAliveConfig.Idle", Field, 23},
    -		{"KeepAliveConfig.Interval", Field, 23},
    -		{"Listen", Func, 0},
    -		{"ListenConfig", Type, 11},
    -		{"ListenConfig.Control", Field, 11},
    -		{"ListenConfig.KeepAlive", Field, 13},
    -		{"ListenConfig.KeepAliveConfig", Field, 23},
    -		{"ListenIP", Func, 0},
    -		{"ListenMulticastUDP", Func, 0},
    -		{"ListenPacket", Func, 0},
    -		{"ListenTCP", Func, 0},
    -		{"ListenUDP", Func, 0},
    -		{"ListenUnix", Func, 0},
    -		{"ListenUnixgram", Func, 0},
    -		{"Listener", Type, 0},
    -		{"LookupAddr", Func, 0},
    -		{"LookupCNAME", Func, 0},
    -		{"LookupHost", Func, 0},
    -		{"LookupIP", Func, 0},
    -		{"LookupMX", Func, 0},
    -		{"LookupNS", Func, 1},
    -		{"LookupPort", Func, 0},
    -		{"LookupSRV", Func, 0},
    -		{"LookupTXT", Func, 0},
    -		{"MX", Type, 0},
    -		{"MX.Host", Field, 0},
    -		{"MX.Pref", Field, 0},
    -		{"NS", Type, 1},
    -		{"NS.Host", Field, 1},
    -		{"OpError", Type, 0},
    -		{"OpError.Addr", Field, 0},
    -		{"OpError.Err", Field, 0},
    -		{"OpError.Net", Field, 0},
    -		{"OpError.Op", Field, 0},
    -		{"OpError.Source", Field, 5},
    -		{"PacketConn", Type, 0},
    -		{"ParseCIDR", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Text", Field, 0},
    -		{"ParseError.Type", Field, 0},
    -		{"ParseIP", Func, 0},
    -		{"ParseMAC", Func, 0},
    -		{"Pipe", Func, 0},
    -		{"ResolveIPAddr", Func, 0},
    -		{"ResolveTCPAddr", Func, 0},
    -		{"ResolveUDPAddr", Func, 0},
    -		{"ResolveUnixAddr", Func, 0},
    -		{"Resolver", Type, 8},
    -		{"Resolver.Dial", Field, 9},
    -		{"Resolver.PreferGo", Field, 8},
    -		{"Resolver.StrictErrors", Field, 9},
    -		{"SRV", Type, 0},
    -		{"SRV.Port", Field, 0},
    -		{"SRV.Priority", Field, 0},
    -		{"SRV.Target", Field, 0},
    -		{"SRV.Weight", Field, 0},
    -		{"SplitHostPort", Func, 0},
    -		{"TCPAddr", Type, 0},
    -		{"TCPAddr.IP", Field, 0},
    -		{"TCPAddr.Port", Field, 0},
    -		{"TCPAddr.Zone", Field, 1},
    -		{"TCPAddrFromAddrPort", Func, 18},
    -		{"TCPConn", Type, 0},
    -		{"TCPListener", Type, 0},
    -		{"UDPAddr", Type, 0},
    -		{"UDPAddr.IP", Field, 0},
    -		{"UDPAddr.Port", Field, 0},
    -		{"UDPAddr.Zone", Field, 1},
    -		{"UDPAddrFromAddrPort", Func, 18},
    -		{"UDPConn", Type, 0},
    -		{"UnixAddr", Type, 0},
    -		{"UnixAddr.Name", Field, 0},
    -		{"UnixAddr.Net", Field, 0},
    -		{"UnixConn", Type, 0},
    -		{"UnixListener", Type, 0},
    -		{"UnknownNetworkError", Type, 0},
    +		{"(*AddrError).Error", Method, 0, ""},
    +		{"(*AddrError).Temporary", Method, 0, ""},
    +		{"(*AddrError).Timeout", Method, 0, ""},
    +		{"(*Buffers).Read", Method, 8, ""},
    +		{"(*Buffers).WriteTo", Method, 8, ""},
    +		{"(*DNSConfigError).Error", Method, 0, ""},
    +		{"(*DNSConfigError).Temporary", Method, 0, ""},
    +		{"(*DNSConfigError).Timeout", Method, 0, ""},
    +		{"(*DNSConfigError).Unwrap", Method, 13, ""},
    +		{"(*DNSError).Error", Method, 0, ""},
    +		{"(*DNSError).Temporary", Method, 0, ""},
    +		{"(*DNSError).Timeout", Method, 0, ""},
    +		{"(*DNSError).Unwrap", Method, 23, ""},
    +		{"(*Dialer).Dial", Method, 1, ""},
    +		{"(*Dialer).DialContext", Method, 7, ""},
    +		{"(*Dialer).MultipathTCP", Method, 21, ""},
    +		{"(*Dialer).SetMultipathTCP", Method, 21, ""},
    +		{"(*IP).UnmarshalText", Method, 2, ""},
    +		{"(*IPAddr).Network", Method, 0, ""},
    +		{"(*IPAddr).String", Method, 0, ""},
    +		{"(*IPConn).Close", Method, 0, ""},
    +		{"(*IPConn).File", Method, 0, ""},
    +		{"(*IPConn).LocalAddr", Method, 0, ""},
    +		{"(*IPConn).Read", Method, 0, ""},
    +		{"(*IPConn).ReadFrom", Method, 0, ""},
    +		{"(*IPConn).ReadFromIP", Method, 0, ""},
    +		{"(*IPConn).ReadMsgIP", Method, 1, ""},
    +		{"(*IPConn).RemoteAddr", Method, 0, ""},
    +		{"(*IPConn).SetDeadline", Method, 0, ""},
    +		{"(*IPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*IPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*IPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*IPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*IPConn).SyscallConn", Method, 9, ""},
    +		{"(*IPConn).Write", Method, 0, ""},
    +		{"(*IPConn).WriteMsgIP", Method, 1, ""},
    +		{"(*IPConn).WriteTo", Method, 0, ""},
    +		{"(*IPConn).WriteToIP", Method, 0, ""},
    +		{"(*IPNet).Contains", Method, 0, ""},
    +		{"(*IPNet).Network", Method, 0, ""},
    +		{"(*IPNet).String", Method, 0, ""},
    +		{"(*Interface).Addrs", Method, 0, ""},
    +		{"(*Interface).MulticastAddrs", Method, 0, ""},
    +		{"(*ListenConfig).Listen", Method, 11, ""},
    +		{"(*ListenConfig).ListenPacket", Method, 11, ""},
    +		{"(*ListenConfig).MultipathTCP", Method, 21, ""},
    +		{"(*ListenConfig).SetMultipathTCP", Method, 21, ""},
    +		{"(*OpError).Error", Method, 0, ""},
    +		{"(*OpError).Temporary", Method, 0, ""},
    +		{"(*OpError).Timeout", Method, 0, ""},
    +		{"(*OpError).Unwrap", Method, 13, ""},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*ParseError).Temporary", Method, 17, ""},
    +		{"(*ParseError).Timeout", Method, 17, ""},
    +		{"(*Resolver).LookupAddr", Method, 8, ""},
    +		{"(*Resolver).LookupCNAME", Method, 8, ""},
    +		{"(*Resolver).LookupHost", Method, 8, ""},
    +		{"(*Resolver).LookupIP", Method, 15, ""},
    +		{"(*Resolver).LookupIPAddr", Method, 8, ""},
    +		{"(*Resolver).LookupMX", Method, 8, ""},
    +		{"(*Resolver).LookupNS", Method, 8, ""},
    +		{"(*Resolver).LookupNetIP", Method, 18, ""},
    +		{"(*Resolver).LookupPort", Method, 8, ""},
    +		{"(*Resolver).LookupSRV", Method, 8, ""},
    +		{"(*Resolver).LookupTXT", Method, 8, ""},
    +		{"(*TCPAddr).AddrPort", Method, 18, ""},
    +		{"(*TCPAddr).Network", Method, 0, ""},
    +		{"(*TCPAddr).String", Method, 0, ""},
    +		{"(*TCPConn).Close", Method, 0, ""},
    +		{"(*TCPConn).CloseRead", Method, 0, ""},
    +		{"(*TCPConn).CloseWrite", Method, 0, ""},
    +		{"(*TCPConn).File", Method, 0, ""},
    +		{"(*TCPConn).LocalAddr", Method, 0, ""},
    +		{"(*TCPConn).MultipathTCP", Method, 21, ""},
    +		{"(*TCPConn).Read", Method, 0, ""},
    +		{"(*TCPConn).ReadFrom", Method, 0, ""},
    +		{"(*TCPConn).RemoteAddr", Method, 0, ""},
    +		{"(*TCPConn).SetDeadline", Method, 0, ""},
    +		{"(*TCPConn).SetKeepAlive", Method, 0, ""},
    +		{"(*TCPConn).SetKeepAliveConfig", Method, 23, ""},
    +		{"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""},
    +		{"(*TCPConn).SetLinger", Method, 0, ""},
    +		{"(*TCPConn).SetNoDelay", Method, 0, ""},
    +		{"(*TCPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*TCPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*TCPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*TCPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*TCPConn).SyscallConn", Method, 9, ""},
    +		{"(*TCPConn).Write", Method, 0, ""},
    +		{"(*TCPConn).WriteTo", Method, 22, ""},
    +		{"(*TCPListener).Accept", Method, 0, ""},
    +		{"(*TCPListener).AcceptTCP", Method, 0, ""},
    +		{"(*TCPListener).Addr", Method, 0, ""},
    +		{"(*TCPListener).Close", Method, 0, ""},
    +		{"(*TCPListener).File", Method, 0, ""},
    +		{"(*TCPListener).SetDeadline", Method, 0, ""},
    +		{"(*TCPListener).SyscallConn", Method, 10, ""},
    +		{"(*UDPAddr).AddrPort", Method, 18, ""},
    +		{"(*UDPAddr).Network", Method, 0, ""},
    +		{"(*UDPAddr).String", Method, 0, ""},
    +		{"(*UDPConn).Close", Method, 0, ""},
    +		{"(*UDPConn).File", Method, 0, ""},
    +		{"(*UDPConn).LocalAddr", Method, 0, ""},
    +		{"(*UDPConn).Read", Method, 0, ""},
    +		{"(*UDPConn).ReadFrom", Method, 0, ""},
    +		{"(*UDPConn).ReadFromUDP", Method, 0, ""},
    +		{"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).ReadMsgUDP", Method, 1, ""},
    +		{"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).RemoteAddr", Method, 0, ""},
    +		{"(*UDPConn).SetDeadline", Method, 0, ""},
    +		{"(*UDPConn).SetReadBuffer", Method, 0, ""},
    +		{"(*UDPConn).SetReadDeadline", Method, 0, ""},
    +		{"(*UDPConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*UDPConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*UDPConn).SyscallConn", Method, 9, ""},
    +		{"(*UDPConn).Write", Method, 0, ""},
    +		{"(*UDPConn).WriteMsgUDP", Method, 1, ""},
    +		{"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""},
    +		{"(*UDPConn).WriteTo", Method, 0, ""},
    +		{"(*UDPConn).WriteToUDP", Method, 0, ""},
    +		{"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""},
    +		{"(*UnixAddr).Network", Method, 0, ""},
    +		{"(*UnixAddr).String", Method, 0, ""},
    +		{"(*UnixConn).Close", Method, 0, ""},
    +		{"(*UnixConn).CloseRead", Method, 1, ""},
    +		{"(*UnixConn).CloseWrite", Method, 1, ""},
    +		{"(*UnixConn).File", Method, 0, ""},
    +		{"(*UnixConn).LocalAddr", Method, 0, ""},
    +		{"(*UnixConn).Read", Method, 0, ""},
    +		{"(*UnixConn).ReadFrom", Method, 0, ""},
    +		{"(*UnixConn).ReadFromUnix", Method, 0, ""},
    +		{"(*UnixConn).ReadMsgUnix", Method, 0, ""},
    +		{"(*UnixConn).RemoteAddr", Method, 0, ""},
    +		{"(*UnixConn).SetDeadline", Method, 0, ""},
    +		{"(*UnixConn).SetReadBuffer", Method, 0, ""},
    +		{"(*UnixConn).SetReadDeadline", Method, 0, ""},
    +		{"(*UnixConn).SetWriteBuffer", Method, 0, ""},
    +		{"(*UnixConn).SetWriteDeadline", Method, 0, ""},
    +		{"(*UnixConn).SyscallConn", Method, 9, ""},
    +		{"(*UnixConn).Write", Method, 0, ""},
    +		{"(*UnixConn).WriteMsgUnix", Method, 0, ""},
    +		{"(*UnixConn).WriteTo", Method, 0, ""},
    +		{"(*UnixConn).WriteToUnix", Method, 0, ""},
    +		{"(*UnixListener).Accept", Method, 0, ""},
    +		{"(*UnixListener).AcceptUnix", Method, 0, ""},
    +		{"(*UnixListener).Addr", Method, 0, ""},
    +		{"(*UnixListener).Close", Method, 0, ""},
    +		{"(*UnixListener).File", Method, 0, ""},
    +		{"(*UnixListener).SetDeadline", Method, 0, ""},
    +		{"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
    +		{"(*UnixListener).SyscallConn", Method, 10, ""},
    +		{"(Flags).String", Method, 0, ""},
    +		{"(HardwareAddr).String", Method, 0, ""},
    +		{"(IP).AppendText", Method, 24, ""},
    +		{"(IP).DefaultMask", Method, 0, ""},
    +		{"(IP).Equal", Method, 0, ""},
    +		{"(IP).IsGlobalUnicast", Method, 0, ""},
    +		{"(IP).IsInterfaceLocalMulticast", Method, 0, ""},
    +		{"(IP).IsLinkLocalMulticast", Method, 0, ""},
    +		{"(IP).IsLinkLocalUnicast", Method, 0, ""},
    +		{"(IP).IsLoopback", Method, 0, ""},
    +		{"(IP).IsMulticast", Method, 0, ""},
    +		{"(IP).IsPrivate", Method, 17, ""},
    +		{"(IP).IsUnspecified", Method, 0, ""},
    +		{"(IP).MarshalText", Method, 2, ""},
    +		{"(IP).Mask", Method, 0, ""},
    +		{"(IP).String", Method, 0, ""},
    +		{"(IP).To16", Method, 0, ""},
    +		{"(IP).To4", Method, 0, ""},
    +		{"(IPMask).Size", Method, 0, ""},
    +		{"(IPMask).String", Method, 0, ""},
    +		{"(InvalidAddrError).Error", Method, 0, ""},
    +		{"(InvalidAddrError).Temporary", Method, 0, ""},
    +		{"(InvalidAddrError).Timeout", Method, 0, ""},
    +		{"(UnknownNetworkError).Error", Method, 0, ""},
    +		{"(UnknownNetworkError).Temporary", Method, 0, ""},
    +		{"(UnknownNetworkError).Timeout", Method, 0, ""},
    +		{"Addr", Type, 0, ""},
    +		{"AddrError", Type, 0, ""},
    +		{"AddrError.Addr", Field, 0, ""},
    +		{"AddrError.Err", Field, 0, ""},
    +		{"Buffers", Type, 8, ""},
    +		{"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"},
    +		{"Conn", Type, 0, ""},
    +		{"DNSConfigError", Type, 0, ""},
    +		{"DNSConfigError.Err", Field, 0, ""},
    +		{"DNSError", Type, 0, ""},
    +		{"DNSError.Err", Field, 0, ""},
    +		{"DNSError.IsNotFound", Field, 13, ""},
    +		{"DNSError.IsTemporary", Field, 6, ""},
    +		{"DNSError.IsTimeout", Field, 0, ""},
    +		{"DNSError.Name", Field, 0, ""},
    +		{"DNSError.Server", Field, 0, ""},
    +		{"DNSError.UnwrapErr", Field, 23, ""},
    +		{"DefaultResolver", Var, 8, ""},
    +		{"Dial", Func, 0, "func(network string, address string) (Conn, error)"},
    +		{"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"},
    +		{"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"},
    +		{"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"},
    +		{"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"},
    +		{"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"},
    +		{"Dialer", Type, 1, ""},
    +		{"Dialer.Cancel", Field, 6, ""},
    +		{"Dialer.Control", Field, 11, ""},
    +		{"Dialer.ControlContext", Field, 20, ""},
    +		{"Dialer.Deadline", Field, 1, ""},
    +		{"Dialer.DualStack", Field, 2, ""},
    +		{"Dialer.FallbackDelay", Field, 5, ""},
    +		{"Dialer.KeepAlive", Field, 3, ""},
    +		{"Dialer.KeepAliveConfig", Field, 23, ""},
    +		{"Dialer.LocalAddr", Field, 1, ""},
    +		{"Dialer.Resolver", Field, 8, ""},
    +		{"Dialer.Timeout", Field, 1, ""},
    +		{"ErrClosed", Var, 16, ""},
    +		{"ErrWriteToConnected", Var, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"},
    +		{"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"},
    +		{"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"},
    +		{"FlagBroadcast", Const, 0, ""},
    +		{"FlagLoopback", Const, 0, ""},
    +		{"FlagMulticast", Const, 0, ""},
    +		{"FlagPointToPoint", Const, 0, ""},
    +		{"FlagRunning", Const, 20, ""},
    +		{"FlagUp", Const, 0, ""},
    +		{"Flags", Type, 0, ""},
    +		{"HardwareAddr", Type, 0, ""},
    +		{"IP", Type, 0, ""},
    +		{"IPAddr", Type, 0, ""},
    +		{"IPAddr.IP", Field, 0, ""},
    +		{"IPAddr.Zone", Field, 1, ""},
    +		{"IPConn", Type, 0, ""},
    +		{"IPMask", Type, 0, ""},
    +		{"IPNet", Type, 0, ""},
    +		{"IPNet.IP", Field, 0, ""},
    +		{"IPNet.Mask", Field, 0, ""},
    +		{"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"},
    +		{"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"},
    +		{"IPv4allrouter", Var, 0, ""},
    +		{"IPv4allsys", Var, 0, ""},
    +		{"IPv4bcast", Var, 0, ""},
    +		{"IPv4len", Const, 0, ""},
    +		{"IPv4zero", Var, 0, ""},
    +		{"IPv6interfacelocalallnodes", Var, 0, ""},
    +		{"IPv6len", Const, 0, ""},
    +		{"IPv6linklocalallnodes", Var, 0, ""},
    +		{"IPv6linklocalallrouters", Var, 0, ""},
    +		{"IPv6loopback", Var, 0, ""},
    +		{"IPv6unspecified", Var, 0, ""},
    +		{"IPv6zero", Var, 0, ""},
    +		{"Interface", Type, 0, ""},
    +		{"Interface.Flags", Field, 0, ""},
    +		{"Interface.HardwareAddr", Field, 0, ""},
    +		{"Interface.Index", Field, 0, ""},
    +		{"Interface.MTU", Field, 0, ""},
    +		{"Interface.Name", Field, 0, ""},
    +		{"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"},
    +		{"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"},
    +		{"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"},
    +		{"Interfaces", Func, 0, "func() ([]Interface, error)"},
    +		{"InvalidAddrError", Type, 0, ""},
    +		{"JoinHostPort", Func, 0, "func(host string, port string) string"},
    +		{"KeepAliveConfig", Type, 23, ""},
    +		{"KeepAliveConfig.Count", Field, 23, ""},
    +		{"KeepAliveConfig.Enable", Field, 23, ""},
    +		{"KeepAliveConfig.Idle", Field, 23, ""},
    +		{"KeepAliveConfig.Interval", Field, 23, ""},
    +		{"Listen", Func, 0, "func(network string, address string) (Listener, error)"},
    +		{"ListenConfig", Type, 11, ""},
    +		{"ListenConfig.Control", Field, 11, ""},
    +		{"ListenConfig.KeepAlive", Field, 13, ""},
    +		{"ListenConfig.KeepAliveConfig", Field, 23, ""},
    +		{"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"},
    +		{"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"},
    +		{"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"},
    +		{"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"},
    +		{"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"},
    +		{"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"},
    +		{"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"},
    +		{"Listener", Type, 0, ""},
    +		{"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"},
    +		{"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"},
    +		{"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"},
    +		{"LookupIP", Func, 0, "func(host string) ([]IP, error)"},
    +		{"LookupMX", Func, 0, "func(name string) ([]*MX, error)"},
    +		{"LookupNS", Func, 1, "func(name string) ([]*NS, error)"},
    +		{"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"},
    +		{"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"},
    +		{"LookupTXT", Func, 0, "func(name string) ([]string, error)"},
    +		{"MX", Type, 0, ""},
    +		{"MX.Host", Field, 0, ""},
    +		{"MX.Pref", Field, 0, ""},
    +		{"NS", Type, 1, ""},
    +		{"NS.Host", Field, 1, ""},
    +		{"OpError", Type, 0, ""},
    +		{"OpError.Addr", Field, 0, ""},
    +		{"OpError.Err", Field, 0, ""},
    +		{"OpError.Net", Field, 0, ""},
    +		{"OpError.Op", Field, 0, ""},
    +		{"OpError.Source", Field, 5, ""},
    +		{"PacketConn", Type, 0, ""},
    +		{"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Text", Field, 0, ""},
    +		{"ParseError.Type", Field, 0, ""},
    +		{"ParseIP", Func, 0, "func(s string) IP"},
    +		{"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"},
    +		{"Pipe", Func, 0, "func() (Conn, Conn)"},
    +		{"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"},
    +		{"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"},
    +		{"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"},
    +		{"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"},
    +		{"Resolver", Type, 8, ""},
    +		{"Resolver.Dial", Field, 9, ""},
    +		{"Resolver.PreferGo", Field, 8, ""},
    +		{"Resolver.StrictErrors", Field, 9, ""},
    +		{"SRV", Type, 0, ""},
    +		{"SRV.Port", Field, 0, ""},
    +		{"SRV.Priority", Field, 0, ""},
    +		{"SRV.Target", Field, 0, ""},
    +		{"SRV.Weight", Field, 0, ""},
    +		{"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"},
    +		{"TCPAddr", Type, 0, ""},
    +		{"TCPAddr.IP", Field, 0, ""},
    +		{"TCPAddr.Port", Field, 0, ""},
    +		{"TCPAddr.Zone", Field, 1, ""},
    +		{"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"},
    +		{"TCPConn", Type, 0, ""},
    +		{"TCPListener", Type, 0, ""},
    +		{"UDPAddr", Type, 0, ""},
    +		{"UDPAddr.IP", Field, 0, ""},
    +		{"UDPAddr.Port", Field, 0, ""},
    +		{"UDPAddr.Zone", Field, 1, ""},
    +		{"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"},
    +		{"UDPConn", Type, 0, ""},
    +		{"UnixAddr", Type, 0, ""},
    +		{"UnixAddr.Name", Field, 0, ""},
    +		{"UnixAddr.Net", Field, 0, ""},
    +		{"UnixConn", Type, 0, ""},
    +		{"UnixListener", Type, 0, ""},
    +		{"UnknownNetworkError", Type, 0, ""},
     	},
     	"net/http": {
    -		{"(*Client).CloseIdleConnections", Method, 12},
    -		{"(*Client).Do", Method, 0},
    -		{"(*Client).Get", Method, 0},
    -		{"(*Client).Head", Method, 0},
    -		{"(*Client).Post", Method, 0},
    -		{"(*Client).PostForm", Method, 0},
    -		{"(*Cookie).String", Method, 0},
    -		{"(*Cookie).Valid", Method, 18},
    -		{"(*MaxBytesError).Error", Method, 19},
    -		{"(*ProtocolError).Error", Method, 0},
    -		{"(*ProtocolError).Is", Method, 21},
    -		{"(*Protocols).SetHTTP1", Method, 24},
    -		{"(*Protocols).SetHTTP2", Method, 24},
    -		{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
    -		{"(*Request).AddCookie", Method, 0},
    -		{"(*Request).BasicAuth", Method, 4},
    -		{"(*Request).Clone", Method, 13},
    -		{"(*Request).Context", Method, 7},
    -		{"(*Request).Cookie", Method, 0},
    -		{"(*Request).Cookies", Method, 0},
    -		{"(*Request).CookiesNamed", Method, 23},
    -		{"(*Request).FormFile", Method, 0},
    -		{"(*Request).FormValue", Method, 0},
    -		{"(*Request).MultipartReader", Method, 0},
    -		{"(*Request).ParseForm", Method, 0},
    -		{"(*Request).ParseMultipartForm", Method, 0},
    -		{"(*Request).PathValue", Method, 22},
    -		{"(*Request).PostFormValue", Method, 1},
    -		{"(*Request).ProtoAtLeast", Method, 0},
    -		{"(*Request).Referer", Method, 0},
    -		{"(*Request).SetBasicAuth", Method, 0},
    -		{"(*Request).SetPathValue", Method, 22},
    -		{"(*Request).UserAgent", Method, 0},
    -		{"(*Request).WithContext", Method, 7},
    -		{"(*Request).Write", Method, 0},
    -		{"(*Request).WriteProxy", Method, 0},
    -		{"(*Response).Cookies", Method, 0},
    -		{"(*Response).Location", Method, 0},
    -		{"(*Response).ProtoAtLeast", Method, 0},
    -		{"(*Response).Write", Method, 0},
    -		{"(*ResponseController).EnableFullDuplex", Method, 21},
    -		{"(*ResponseController).Flush", Method, 20},
    -		{"(*ResponseController).Hijack", Method, 20},
    -		{"(*ResponseController).SetReadDeadline", Method, 20},
    -		{"(*ResponseController).SetWriteDeadline", Method, 20},
    -		{"(*ServeMux).Handle", Method, 0},
    -		{"(*ServeMux).HandleFunc", Method, 0},
    -		{"(*ServeMux).Handler", Method, 1},
    -		{"(*ServeMux).ServeHTTP", Method, 0},
    -		{"(*Server).Close", Method, 8},
    -		{"(*Server).ListenAndServe", Method, 0},
    -		{"(*Server).ListenAndServeTLS", Method, 0},
    -		{"(*Server).RegisterOnShutdown", Method, 9},
    -		{"(*Server).Serve", Method, 0},
    -		{"(*Server).ServeTLS", Method, 9},
    -		{"(*Server).SetKeepAlivesEnabled", Method, 3},
    -		{"(*Server).Shutdown", Method, 8},
    -		{"(*Transport).CancelRequest", Method, 1},
    -		{"(*Transport).Clone", Method, 13},
    -		{"(*Transport).CloseIdleConnections", Method, 0},
    -		{"(*Transport).RegisterProtocol", Method, 0},
    -		{"(*Transport).RoundTrip", Method, 0},
    -		{"(ConnState).String", Method, 3},
    -		{"(Dir).Open", Method, 0},
    -		{"(HandlerFunc).ServeHTTP", Method, 0},
    -		{"(Header).Add", Method, 0},
    -		{"(Header).Clone", Method, 13},
    -		{"(Header).Del", Method, 0},
    -		{"(Header).Get", Method, 0},
    -		{"(Header).Set", Method, 0},
    -		{"(Header).Values", Method, 14},
    -		{"(Header).Write", Method, 0},
    -		{"(Header).WriteSubset", Method, 0},
    -		{"(Protocols).HTTP1", Method, 24},
    -		{"(Protocols).HTTP2", Method, 24},
    -		{"(Protocols).String", Method, 24},
    -		{"(Protocols).UnencryptedHTTP2", Method, 24},
    -		{"AllowQuerySemicolons", Func, 17},
    -		{"CanonicalHeaderKey", Func, 0},
    -		{"Client", Type, 0},
    -		{"Client.CheckRedirect", Field, 0},
    -		{"Client.Jar", Field, 0},
    -		{"Client.Timeout", Field, 3},
    -		{"Client.Transport", Field, 0},
    -		{"CloseNotifier", Type, 1},
    -		{"ConnState", Type, 3},
    -		{"Cookie", Type, 0},
    -		{"Cookie.Domain", Field, 0},
    -		{"Cookie.Expires", Field, 0},
    -		{"Cookie.HttpOnly", Field, 0},
    -		{"Cookie.MaxAge", Field, 0},
    -		{"Cookie.Name", Field, 0},
    -		{"Cookie.Partitioned", Field, 23},
    -		{"Cookie.Path", Field, 0},
    -		{"Cookie.Quoted", Field, 23},
    -		{"Cookie.Raw", Field, 0},
    -		{"Cookie.RawExpires", Field, 0},
    -		{"Cookie.SameSite", Field, 11},
    -		{"Cookie.Secure", Field, 0},
    -		{"Cookie.Unparsed", Field, 0},
    -		{"Cookie.Value", Field, 0},
    -		{"CookieJar", Type, 0},
    -		{"DefaultClient", Var, 0},
    -		{"DefaultMaxHeaderBytes", Const, 0},
    -		{"DefaultMaxIdleConnsPerHost", Const, 0},
    -		{"DefaultServeMux", Var, 0},
    -		{"DefaultTransport", Var, 0},
    -		{"DetectContentType", Func, 0},
    -		{"Dir", Type, 0},
    -		{"ErrAbortHandler", Var, 8},
    -		{"ErrBodyNotAllowed", Var, 0},
    -		{"ErrBodyReadAfterClose", Var, 0},
    -		{"ErrContentLength", Var, 0},
    -		{"ErrHandlerTimeout", Var, 0},
    -		{"ErrHeaderTooLong", Var, 0},
    -		{"ErrHijacked", Var, 0},
    -		{"ErrLineTooLong", Var, 0},
    -		{"ErrMissingBoundary", Var, 0},
    -		{"ErrMissingContentLength", Var, 0},
    -		{"ErrMissingFile", Var, 0},
    -		{"ErrNoCookie", Var, 0},
    -		{"ErrNoLocation", Var, 0},
    -		{"ErrNotMultipart", Var, 0},
    -		{"ErrNotSupported", Var, 0},
    -		{"ErrSchemeMismatch", Var, 21},
    -		{"ErrServerClosed", Var, 8},
    -		{"ErrShortBody", Var, 0},
    -		{"ErrSkipAltProtocol", Var, 6},
    -		{"ErrUnexpectedTrailer", Var, 0},
    -		{"ErrUseLastResponse", Var, 7},
    -		{"ErrWriteAfterFlush", Var, 0},
    -		{"Error", Func, 0},
    -		{"FS", Func, 16},
    -		{"File", Type, 0},
    -		{"FileServer", Func, 0},
    -		{"FileServerFS", Func, 22},
    -		{"FileSystem", Type, 0},
    -		{"Flusher", Type, 0},
    -		{"Get", Func, 0},
    -		{"HTTP2Config", Type, 24},
    -		{"HTTP2Config.CountError", Field, 24},
    -		{"HTTP2Config.MaxConcurrentStreams", Field, 24},
    -		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
    -		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
    -		{"HTTP2Config.MaxReadFrameSize", Field, 24},
    -		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
    -		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
    -		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
    -		{"HTTP2Config.PingTimeout", Field, 24},
    -		{"HTTP2Config.SendPingTimeout", Field, 24},
    -		{"HTTP2Config.WriteByteTimeout", Field, 24},
    -		{"Handle", Func, 0},
    -		{"HandleFunc", Func, 0},
    -		{"Handler", Type, 0},
    -		{"HandlerFunc", Type, 0},
    -		{"Head", Func, 0},
    -		{"Header", Type, 0},
    -		{"Hijacker", Type, 0},
    -		{"ListenAndServe", Func, 0},
    -		{"ListenAndServeTLS", Func, 0},
    -		{"LocalAddrContextKey", Var, 7},
    -		{"MaxBytesError", Type, 19},
    -		{"MaxBytesError.Limit", Field, 19},
    -		{"MaxBytesHandler", Func, 18},
    -		{"MaxBytesReader", Func, 0},
    -		{"MethodConnect", Const, 6},
    -		{"MethodDelete", Const, 6},
    -		{"MethodGet", Const, 6},
    -		{"MethodHead", Const, 6},
    -		{"MethodOptions", Const, 6},
    -		{"MethodPatch", Const, 6},
    -		{"MethodPost", Const, 6},
    -		{"MethodPut", Const, 6},
    -		{"MethodTrace", Const, 6},
    -		{"NewFileTransport", Func, 0},
    -		{"NewFileTransportFS", Func, 22},
    -		{"NewRequest", Func, 0},
    -		{"NewRequestWithContext", Func, 13},
    -		{"NewResponseController", Func, 20},
    -		{"NewServeMux", Func, 0},
    -		{"NoBody", Var, 8},
    -		{"NotFound", Func, 0},
    -		{"NotFoundHandler", Func, 0},
    -		{"ParseCookie", Func, 23},
    -		{"ParseHTTPVersion", Func, 0},
    -		{"ParseSetCookie", Func, 23},
    -		{"ParseTime", Func, 1},
    -		{"Post", Func, 0},
    -		{"PostForm", Func, 0},
    -		{"ProtocolError", Type, 0},
    -		{"ProtocolError.ErrorString", Field, 0},
    -		{"Protocols", Type, 24},
    -		{"ProxyFromEnvironment", Func, 0},
    -		{"ProxyURL", Func, 0},
    -		{"PushOptions", Type, 8},
    -		{"PushOptions.Header", Field, 8},
    -		{"PushOptions.Method", Field, 8},
    -		{"Pusher", Type, 8},
    -		{"ReadRequest", Func, 0},
    -		{"ReadResponse", Func, 0},
    -		{"Redirect", Func, 0},
    -		{"RedirectHandler", Func, 0},
    -		{"Request", Type, 0},
    -		{"Request.Body", Field, 0},
    -		{"Request.Cancel", Field, 5},
    -		{"Request.Close", Field, 0},
    -		{"Request.ContentLength", Field, 0},
    -		{"Request.Form", Field, 0},
    -		{"Request.GetBody", Field, 8},
    -		{"Request.Header", Field, 0},
    -		{"Request.Host", Field, 0},
    -		{"Request.Method", Field, 0},
    -		{"Request.MultipartForm", Field, 0},
    -		{"Request.Pattern", Field, 23},
    -		{"Request.PostForm", Field, 1},
    -		{"Request.Proto", Field, 0},
    -		{"Request.ProtoMajor", Field, 0},
    -		{"Request.ProtoMinor", Field, 0},
    -		{"Request.RemoteAddr", Field, 0},
    -		{"Request.RequestURI", Field, 0},
    -		{"Request.Response", Field, 7},
    -		{"Request.TLS", Field, 0},
    -		{"Request.Trailer", Field, 0},
    -		{"Request.TransferEncoding", Field, 0},
    -		{"Request.URL", Field, 0},
    -		{"Response", Type, 0},
    -		{"Response.Body", Field, 0},
    -		{"Response.Close", Field, 0},
    -		{"Response.ContentLength", Field, 0},
    -		{"Response.Header", Field, 0},
    -		{"Response.Proto", Field, 0},
    -		{"Response.ProtoMajor", Field, 0},
    -		{"Response.ProtoMinor", Field, 0},
    -		{"Response.Request", Field, 0},
    -		{"Response.Status", Field, 0},
    -		{"Response.StatusCode", Field, 0},
    -		{"Response.TLS", Field, 3},
    -		{"Response.Trailer", Field, 0},
    -		{"Response.TransferEncoding", Field, 0},
    -		{"Response.Uncompressed", Field, 7},
    -		{"ResponseController", Type, 20},
    -		{"ResponseWriter", Type, 0},
    -		{"RoundTripper", Type, 0},
    -		{"SameSite", Type, 11},
    -		{"SameSiteDefaultMode", Const, 11},
    -		{"SameSiteLaxMode", Const, 11},
    -		{"SameSiteNoneMode", Const, 13},
    -		{"SameSiteStrictMode", Const, 11},
    -		{"Serve", Func, 0},
    -		{"ServeContent", Func, 0},
    -		{"ServeFile", Func, 0},
    -		{"ServeFileFS", Func, 22},
    -		{"ServeMux", Type, 0},
    -		{"ServeTLS", Func, 9},
    -		{"Server", Type, 0},
    -		{"Server.Addr", Field, 0},
    -		{"Server.BaseContext", Field, 13},
    -		{"Server.ConnContext", Field, 13},
    -		{"Server.ConnState", Field, 3},
    -		{"Server.DisableGeneralOptionsHandler", Field, 20},
    -		{"Server.ErrorLog", Field, 3},
    -		{"Server.HTTP2", Field, 24},
    -		{"Server.Handler", Field, 0},
    -		{"Server.IdleTimeout", Field, 8},
    -		{"Server.MaxHeaderBytes", Field, 0},
    -		{"Server.Protocols", Field, 24},
    -		{"Server.ReadHeaderTimeout", Field, 8},
    -		{"Server.ReadTimeout", Field, 0},
    -		{"Server.TLSConfig", Field, 0},
    -		{"Server.TLSNextProto", Field, 1},
    -		{"Server.WriteTimeout", Field, 0},
    -		{"ServerContextKey", Var, 7},
    -		{"SetCookie", Func, 0},
    -		{"StateActive", Const, 3},
    -		{"StateClosed", Const, 3},
    -		{"StateHijacked", Const, 3},
    -		{"StateIdle", Const, 3},
    -		{"StateNew", Const, 3},
    -		{"StatusAccepted", Const, 0},
    -		{"StatusAlreadyReported", Const, 7},
    -		{"StatusBadGateway", Const, 0},
    -		{"StatusBadRequest", Const, 0},
    -		{"StatusConflict", Const, 0},
    -		{"StatusContinue", Const, 0},
    -		{"StatusCreated", Const, 0},
    -		{"StatusEarlyHints", Const, 13},
    -		{"StatusExpectationFailed", Const, 0},
    -		{"StatusFailedDependency", Const, 7},
    -		{"StatusForbidden", Const, 0},
    -		{"StatusFound", Const, 0},
    -		{"StatusGatewayTimeout", Const, 0},
    -		{"StatusGone", Const, 0},
    -		{"StatusHTTPVersionNotSupported", Const, 0},
    -		{"StatusIMUsed", Const, 7},
    -		{"StatusInsufficientStorage", Const, 7},
    -		{"StatusInternalServerError", Const, 0},
    -		{"StatusLengthRequired", Const, 0},
    -		{"StatusLocked", Const, 7},
    -		{"StatusLoopDetected", Const, 7},
    -		{"StatusMethodNotAllowed", Const, 0},
    -		{"StatusMisdirectedRequest", Const, 11},
    -		{"StatusMovedPermanently", Const, 0},
    -		{"StatusMultiStatus", Const, 7},
    -		{"StatusMultipleChoices", Const, 0},
    -		{"StatusNetworkAuthenticationRequired", Const, 6},
    -		{"StatusNoContent", Const, 0},
    -		{"StatusNonAuthoritativeInfo", Const, 0},
    -		{"StatusNotAcceptable", Const, 0},
    -		{"StatusNotExtended", Const, 7},
    -		{"StatusNotFound", Const, 0},
    -		{"StatusNotImplemented", Const, 0},
    -		{"StatusNotModified", Const, 0},
    -		{"StatusOK", Const, 0},
    -		{"StatusPartialContent", Const, 0},
    -		{"StatusPaymentRequired", Const, 0},
    -		{"StatusPermanentRedirect", Const, 7},
    -		{"StatusPreconditionFailed", Const, 0},
    -		{"StatusPreconditionRequired", Const, 6},
    -		{"StatusProcessing", Const, 7},
    -		{"StatusProxyAuthRequired", Const, 0},
    -		{"StatusRequestEntityTooLarge", Const, 0},
    -		{"StatusRequestHeaderFieldsTooLarge", Const, 6},
    -		{"StatusRequestTimeout", Const, 0},
    -		{"StatusRequestURITooLong", Const, 0},
    -		{"StatusRequestedRangeNotSatisfiable", Const, 0},
    -		{"StatusResetContent", Const, 0},
    -		{"StatusSeeOther", Const, 0},
    -		{"StatusServiceUnavailable", Const, 0},
    -		{"StatusSwitchingProtocols", Const, 0},
    -		{"StatusTeapot", Const, 0},
    -		{"StatusTemporaryRedirect", Const, 0},
    -		{"StatusText", Func, 0},
    -		{"StatusTooEarly", Const, 12},
    -		{"StatusTooManyRequests", Const, 6},
    -		{"StatusUnauthorized", Const, 0},
    -		{"StatusUnavailableForLegalReasons", Const, 6},
    -		{"StatusUnprocessableEntity", Const, 7},
    -		{"StatusUnsupportedMediaType", Const, 0},
    -		{"StatusUpgradeRequired", Const, 7},
    -		{"StatusUseProxy", Const, 0},
    -		{"StatusVariantAlsoNegotiates", Const, 7},
    -		{"StripPrefix", Func, 0},
    -		{"TimeFormat", Const, 0},
    -		{"TimeoutHandler", Func, 0},
    -		{"TrailerPrefix", Const, 8},
    -		{"Transport", Type, 0},
    -		{"Transport.Dial", Field, 0},
    -		{"Transport.DialContext", Field, 7},
    -		{"Transport.DialTLS", Field, 4},
    -		{"Transport.DialTLSContext", Field, 14},
    -		{"Transport.DisableCompression", Field, 0},
    -		{"Transport.DisableKeepAlives", Field, 0},
    -		{"Transport.ExpectContinueTimeout", Field, 6},
    -		{"Transport.ForceAttemptHTTP2", Field, 13},
    -		{"Transport.GetProxyConnectHeader", Field, 16},
    -		{"Transport.HTTP2", Field, 24},
    -		{"Transport.IdleConnTimeout", Field, 7},
    -		{"Transport.MaxConnsPerHost", Field, 11},
    -		{"Transport.MaxIdleConns", Field, 7},
    -		{"Transport.MaxIdleConnsPerHost", Field, 0},
    -		{"Transport.MaxResponseHeaderBytes", Field, 7},
    -		{"Transport.OnProxyConnectResponse", Field, 20},
    -		{"Transport.Protocols", Field, 24},
    -		{"Transport.Proxy", Field, 0},
    -		{"Transport.ProxyConnectHeader", Field, 8},
    -		{"Transport.ReadBufferSize", Field, 13},
    -		{"Transport.ResponseHeaderTimeout", Field, 1},
    -		{"Transport.TLSClientConfig", Field, 0},
    -		{"Transport.TLSHandshakeTimeout", Field, 3},
    -		{"Transport.TLSNextProto", Field, 6},
    -		{"Transport.WriteBufferSize", Field, 13},
    +		{"(*Client).CloseIdleConnections", Method, 12, ""},
    +		{"(*Client).Do", Method, 0, ""},
    +		{"(*Client).Get", Method, 0, ""},
    +		{"(*Client).Head", Method, 0, ""},
    +		{"(*Client).Post", Method, 0, ""},
    +		{"(*Client).PostForm", Method, 0, ""},
    +		{"(*Cookie).String", Method, 0, ""},
    +		{"(*Cookie).Valid", Method, 18, ""},
    +		{"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
    +		{"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
    +		{"(*CrossOriginProtection).Check", Method, 25, ""},
    +		{"(*CrossOriginProtection).Handler", Method, 25, ""},
    +		{"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
    +		{"(*MaxBytesError).Error", Method, 19, ""},
    +		{"(*ProtocolError).Error", Method, 0, ""},
    +		{"(*ProtocolError).Is", Method, 21, ""},
    +		{"(*Protocols).SetHTTP1", Method, 24, ""},
    +		{"(*Protocols).SetHTTP2", Method, 24, ""},
    +		{"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""},
    +		{"(*Request).AddCookie", Method, 0, ""},
    +		{"(*Request).BasicAuth", Method, 4, ""},
    +		{"(*Request).Clone", Method, 13, ""},
    +		{"(*Request).Context", Method, 7, ""},
    +		{"(*Request).Cookie", Method, 0, ""},
    +		{"(*Request).Cookies", Method, 0, ""},
    +		{"(*Request).CookiesNamed", Method, 23, ""},
    +		{"(*Request).FormFile", Method, 0, ""},
    +		{"(*Request).FormValue", Method, 0, ""},
    +		{"(*Request).MultipartReader", Method, 0, ""},
    +		{"(*Request).ParseForm", Method, 0, ""},
    +		{"(*Request).ParseMultipartForm", Method, 0, ""},
    +		{"(*Request).PathValue", Method, 22, ""},
    +		{"(*Request).PostFormValue", Method, 1, ""},
    +		{"(*Request).ProtoAtLeast", Method, 0, ""},
    +		{"(*Request).Referer", Method, 0, ""},
    +		{"(*Request).SetBasicAuth", Method, 0, ""},
    +		{"(*Request).SetPathValue", Method, 22, ""},
    +		{"(*Request).UserAgent", Method, 0, ""},
    +		{"(*Request).WithContext", Method, 7, ""},
    +		{"(*Request).Write", Method, 0, ""},
    +		{"(*Request).WriteProxy", Method, 0, ""},
    +		{"(*Response).Cookies", Method, 0, ""},
    +		{"(*Response).Location", Method, 0, ""},
    +		{"(*Response).ProtoAtLeast", Method, 0, ""},
    +		{"(*Response).Write", Method, 0, ""},
    +		{"(*ResponseController).EnableFullDuplex", Method, 21, ""},
    +		{"(*ResponseController).Flush", Method, 20, ""},
    +		{"(*ResponseController).Hijack", Method, 20, ""},
    +		{"(*ResponseController).SetReadDeadline", Method, 20, ""},
    +		{"(*ResponseController).SetWriteDeadline", Method, 20, ""},
    +		{"(*ServeMux).Handle", Method, 0, ""},
    +		{"(*ServeMux).HandleFunc", Method, 0, ""},
    +		{"(*ServeMux).Handler", Method, 1, ""},
    +		{"(*ServeMux).ServeHTTP", Method, 0, ""},
    +		{"(*Server).Close", Method, 8, ""},
    +		{"(*Server).ListenAndServe", Method, 0, ""},
    +		{"(*Server).ListenAndServeTLS", Method, 0, ""},
    +		{"(*Server).RegisterOnShutdown", Method, 9, ""},
    +		{"(*Server).Serve", Method, 0, ""},
    +		{"(*Server).ServeTLS", Method, 9, ""},
    +		{"(*Server).SetKeepAlivesEnabled", Method, 3, ""},
    +		{"(*Server).Shutdown", Method, 8, ""},
    +		{"(*Transport).CancelRequest", Method, 1, ""},
    +		{"(*Transport).Clone", Method, 13, ""},
    +		{"(*Transport).CloseIdleConnections", Method, 0, ""},
    +		{"(*Transport).RegisterProtocol", Method, 0, ""},
    +		{"(*Transport).RoundTrip", Method, 0, ""},
    +		{"(ConnState).String", Method, 3, ""},
    +		{"(Dir).Open", Method, 0, ""},
    +		{"(HandlerFunc).ServeHTTP", Method, 0, ""},
    +		{"(Header).Add", Method, 0, ""},
    +		{"(Header).Clone", Method, 13, ""},
    +		{"(Header).Del", Method, 0, ""},
    +		{"(Header).Get", Method, 0, ""},
    +		{"(Header).Set", Method, 0, ""},
    +		{"(Header).Values", Method, 14, ""},
    +		{"(Header).Write", Method, 0, ""},
    +		{"(Header).WriteSubset", Method, 0, ""},
    +		{"(Protocols).HTTP1", Method, 24, ""},
    +		{"(Protocols).HTTP2", Method, 24, ""},
    +		{"(Protocols).String", Method, 24, ""},
    +		{"(Protocols).UnencryptedHTTP2", Method, 24, ""},
    +		{"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
    +		{"CanonicalHeaderKey", Func, 0, "func(s string) string"},
    +		{"Client", Type, 0, ""},
    +		{"Client.CheckRedirect", Field, 0, ""},
    +		{"Client.Jar", Field, 0, ""},
    +		{"Client.Timeout", Field, 3, ""},
    +		{"Client.Transport", Field, 0, ""},
    +		{"CloseNotifier", Type, 1, ""},
    +		{"ConnState", Type, 3, ""},
    +		{"Cookie", Type, 0, ""},
    +		{"Cookie.Domain", Field, 0, ""},
    +		{"Cookie.Expires", Field, 0, ""},
    +		{"Cookie.HttpOnly", Field, 0, ""},
    +		{"Cookie.MaxAge", Field, 0, ""},
    +		{"Cookie.Name", Field, 0, ""},
    +		{"Cookie.Partitioned", Field, 23, ""},
    +		{"Cookie.Path", Field, 0, ""},
    +		{"Cookie.Quoted", Field, 23, ""},
    +		{"Cookie.Raw", Field, 0, ""},
    +		{"Cookie.RawExpires", Field, 0, ""},
    +		{"Cookie.SameSite", Field, 11, ""},
    +		{"Cookie.Secure", Field, 0, ""},
    +		{"Cookie.Unparsed", Field, 0, ""},
    +		{"Cookie.Value", Field, 0, ""},
    +		{"CookieJar", Type, 0, ""},
    +		{"CrossOriginProtection", Type, 25, ""},
    +		{"DefaultClient", Var, 0, ""},
    +		{"DefaultMaxHeaderBytes", Const, 0, ""},
    +		{"DefaultMaxIdleConnsPerHost", Const, 0, ""},
    +		{"DefaultServeMux", Var, 0, ""},
    +		{"DefaultTransport", Var, 0, ""},
    +		{"DetectContentType", Func, 0, "func(data []byte) string"},
    +		{"Dir", Type, 0, ""},
    +		{"ErrAbortHandler", Var, 8, ""},
    +		{"ErrBodyNotAllowed", Var, 0, ""},
    +		{"ErrBodyReadAfterClose", Var, 0, ""},
    +		{"ErrContentLength", Var, 0, ""},
    +		{"ErrHandlerTimeout", Var, 0, ""},
    +		{"ErrHeaderTooLong", Var, 0, ""},
    +		{"ErrHijacked", Var, 0, ""},
    +		{"ErrLineTooLong", Var, 0, ""},
    +		{"ErrMissingBoundary", Var, 0, ""},
    +		{"ErrMissingContentLength", Var, 0, ""},
    +		{"ErrMissingFile", Var, 0, ""},
    +		{"ErrNoCookie", Var, 0, ""},
    +		{"ErrNoLocation", Var, 0, ""},
    +		{"ErrNotMultipart", Var, 0, ""},
    +		{"ErrNotSupported", Var, 0, ""},
    +		{"ErrSchemeMismatch", Var, 21, ""},
    +		{"ErrServerClosed", Var, 8, ""},
    +		{"ErrShortBody", Var, 0, ""},
    +		{"ErrSkipAltProtocol", Var, 6, ""},
    +		{"ErrUnexpectedTrailer", Var, 0, ""},
    +		{"ErrUseLastResponse", Var, 7, ""},
    +		{"ErrWriteAfterFlush", Var, 0, ""},
    +		{"Error", Func, 0, "func(w ResponseWriter, error string, code int)"},
    +		{"FS", Func, 16, "func(fsys fs.FS) FileSystem"},
    +		{"File", Type, 0, ""},
    +		{"FileServer", Func, 0, "func(root FileSystem) Handler"},
    +		{"FileServerFS", Func, 22, "func(root fs.FS) Handler"},
    +		{"FileSystem", Type, 0, ""},
    +		{"Flusher", Type, 0, ""},
    +		{"Get", Func, 0, "func(url string) (resp *Response, err error)"},
    +		{"HTTP2Config", Type, 24, ""},
    +		{"HTTP2Config.CountError", Field, 24, ""},
    +		{"HTTP2Config.MaxConcurrentStreams", Field, 24, ""},
    +		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""},
    +		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""},
    +		{"HTTP2Config.MaxReadFrameSize", Field, 24, ""},
    +		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""},
    +		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""},
    +		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""},
    +		{"HTTP2Config.PingTimeout", Field, 24, ""},
    +		{"HTTP2Config.SendPingTimeout", Field, 24, ""},
    +		{"HTTP2Config.WriteByteTimeout", Field, 24, ""},
    +		{"Handle", Func, 0, "func(pattern string, handler Handler)"},
    +		{"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"},
    +		{"Handler", Type, 0, ""},
    +		{"HandlerFunc", Type, 0, ""},
    +		{"Head", Func, 0, "func(url string) (resp *Response, err error)"},
    +		{"Header", Type, 0, ""},
    +		{"Hijacker", Type, 0, ""},
    +		{"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"},
    +		{"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"},
    +		{"LocalAddrContextKey", Var, 7, ""},
    +		{"MaxBytesError", Type, 19, ""},
    +		{"MaxBytesError.Limit", Field, 19, ""},
    +		{"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"},
    +		{"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"},
    +		{"MethodConnect", Const, 6, ""},
    +		{"MethodDelete", Const, 6, ""},
    +		{"MethodGet", Const, 6, ""},
    +		{"MethodHead", Const, 6, ""},
    +		{"MethodOptions", Const, 6, ""},
    +		{"MethodPatch", Const, 6, ""},
    +		{"MethodPost", Const, 6, ""},
    +		{"MethodPut", Const, 6, ""},
    +		{"MethodTrace", Const, 6, ""},
    +		{"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
    +		{"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
    +		{"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
    +		{"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
    +		{"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"},
    +		{"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"},
    +		{"NewServeMux", Func, 0, "func() *ServeMux"},
    +		{"NoBody", Var, 8, ""},
    +		{"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"},
    +		{"NotFoundHandler", Func, 0, "func() Handler"},
    +		{"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"},
    +		{"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"},
    +		{"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"},
    +		{"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"},
    +		{"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"},
    +		{"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"},
    +		{"ProtocolError", Type, 0, ""},
    +		{"ProtocolError.ErrorString", Field, 0, ""},
    +		{"Protocols", Type, 24, ""},
    +		{"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"},
    +		{"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"},
    +		{"PushOptions", Type, 8, ""},
    +		{"PushOptions.Header", Field, 8, ""},
    +		{"PushOptions.Method", Field, 8, ""},
    +		{"Pusher", Type, 8, ""},
    +		{"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"},
    +		{"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"},
    +		{"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"},
    +		{"RedirectHandler", Func, 0, "func(url string, code int) Handler"},
    +		{"Request", Type, 0, ""},
    +		{"Request.Body", Field, 0, ""},
    +		{"Request.Cancel", Field, 5, ""},
    +		{"Request.Close", Field, 0, ""},
    +		{"Request.ContentLength", Field, 0, ""},
    +		{"Request.Form", Field, 0, ""},
    +		{"Request.GetBody", Field, 8, ""},
    +		{"Request.Header", Field, 0, ""},
    +		{"Request.Host", Field, 0, ""},
    +		{"Request.Method", Field, 0, ""},
    +		{"Request.MultipartForm", Field, 0, ""},
    +		{"Request.Pattern", Field, 23, ""},
    +		{"Request.PostForm", Field, 1, ""},
    +		{"Request.Proto", Field, 0, ""},
    +		{"Request.ProtoMajor", Field, 0, ""},
    +		{"Request.ProtoMinor", Field, 0, ""},
    +		{"Request.RemoteAddr", Field, 0, ""},
    +		{"Request.RequestURI", Field, 0, ""},
    +		{"Request.Response", Field, 7, ""},
    +		{"Request.TLS", Field, 0, ""},
    +		{"Request.Trailer", Field, 0, ""},
    +		{"Request.TransferEncoding", Field, 0, ""},
    +		{"Request.URL", Field, 0, ""},
    +		{"Response", Type, 0, ""},
    +		{"Response.Body", Field, 0, ""},
    +		{"Response.Close", Field, 0, ""},
    +		{"Response.ContentLength", Field, 0, ""},
    +		{"Response.Header", Field, 0, ""},
    +		{"Response.Proto", Field, 0, ""},
    +		{"Response.ProtoMajor", Field, 0, ""},
    +		{"Response.ProtoMinor", Field, 0, ""},
    +		{"Response.Request", Field, 0, ""},
    +		{"Response.Status", Field, 0, ""},
    +		{"Response.StatusCode", Field, 0, ""},
    +		{"Response.TLS", Field, 3, ""},
    +		{"Response.Trailer", Field, 0, ""},
    +		{"Response.TransferEncoding", Field, 0, ""},
    +		{"Response.Uncompressed", Field, 7, ""},
    +		{"ResponseController", Type, 20, ""},
    +		{"ResponseWriter", Type, 0, ""},
    +		{"RoundTripper", Type, 0, ""},
    +		{"SameSite", Type, 11, ""},
    +		{"SameSiteDefaultMode", Const, 11, ""},
    +		{"SameSiteLaxMode", Const, 11, ""},
    +		{"SameSiteNoneMode", Const, 13, ""},
    +		{"SameSiteStrictMode", Const, 11, ""},
    +		{"Serve", Func, 0, "func(l net.Listener, handler Handler) error"},
    +		{"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"},
    +		{"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"},
    +		{"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"},
    +		{"ServeMux", Type, 0, ""},
    +		{"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"},
    +		{"Server", Type, 0, ""},
    +		{"Server.Addr", Field, 0, ""},
    +		{"Server.BaseContext", Field, 13, ""},
    +		{"Server.ConnContext", Field, 13, ""},
    +		{"Server.ConnState", Field, 3, ""},
    +		{"Server.DisableGeneralOptionsHandler", Field, 20, ""},
    +		{"Server.ErrorLog", Field, 3, ""},
    +		{"Server.HTTP2", Field, 24, ""},
    +		{"Server.Handler", Field, 0, ""},
    +		{"Server.IdleTimeout", Field, 8, ""},
    +		{"Server.MaxHeaderBytes", Field, 0, ""},
    +		{"Server.Protocols", Field, 24, ""},
    +		{"Server.ReadHeaderTimeout", Field, 8, ""},
    +		{"Server.ReadTimeout", Field, 0, ""},
    +		{"Server.TLSConfig", Field, 0, ""},
    +		{"Server.TLSNextProto", Field, 1, ""},
    +		{"Server.WriteTimeout", Field, 0, ""},
    +		{"ServerContextKey", Var, 7, ""},
    +		{"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"},
    +		{"StateActive", Const, 3, ""},
    +		{"StateClosed", Const, 3, ""},
    +		{"StateHijacked", Const, 3, ""},
    +		{"StateIdle", Const, 3, ""},
    +		{"StateNew", Const, 3, ""},
    +		{"StatusAccepted", Const, 0, ""},
    +		{"StatusAlreadyReported", Const, 7, ""},
    +		{"StatusBadGateway", Const, 0, ""},
    +		{"StatusBadRequest", Const, 0, ""},
    +		{"StatusConflict", Const, 0, ""},
    +		{"StatusContinue", Const, 0, ""},
    +		{"StatusCreated", Const, 0, ""},
    +		{"StatusEarlyHints", Const, 13, ""},
    +		{"StatusExpectationFailed", Const, 0, ""},
    +		{"StatusFailedDependency", Const, 7, ""},
    +		{"StatusForbidden", Const, 0, ""},
    +		{"StatusFound", Const, 0, ""},
    +		{"StatusGatewayTimeout", Const, 0, ""},
    +		{"StatusGone", Const, 0, ""},
    +		{"StatusHTTPVersionNotSupported", Const, 0, ""},
    +		{"StatusIMUsed", Const, 7, ""},
    +		{"StatusInsufficientStorage", Const, 7, ""},
    +		{"StatusInternalServerError", Const, 0, ""},
    +		{"StatusLengthRequired", Const, 0, ""},
    +		{"StatusLocked", Const, 7, ""},
    +		{"StatusLoopDetected", Const, 7, ""},
    +		{"StatusMethodNotAllowed", Const, 0, ""},
    +		{"StatusMisdirectedRequest", Const, 11, ""},
    +		{"StatusMovedPermanently", Const, 0, ""},
    +		{"StatusMultiStatus", Const, 7, ""},
    +		{"StatusMultipleChoices", Const, 0, ""},
    +		{"StatusNetworkAuthenticationRequired", Const, 6, ""},
    +		{"StatusNoContent", Const, 0, ""},
    +		{"StatusNonAuthoritativeInfo", Const, 0, ""},
    +		{"StatusNotAcceptable", Const, 0, ""},
    +		{"StatusNotExtended", Const, 7, ""},
    +		{"StatusNotFound", Const, 0, ""},
    +		{"StatusNotImplemented", Const, 0, ""},
    +		{"StatusNotModified", Const, 0, ""},
    +		{"StatusOK", Const, 0, ""},
    +		{"StatusPartialContent", Const, 0, ""},
    +		{"StatusPaymentRequired", Const, 0, ""},
    +		{"StatusPermanentRedirect", Const, 7, ""},
    +		{"StatusPreconditionFailed", Const, 0, ""},
    +		{"StatusPreconditionRequired", Const, 6, ""},
    +		{"StatusProcessing", Const, 7, ""},
    +		{"StatusProxyAuthRequired", Const, 0, ""},
    +		{"StatusRequestEntityTooLarge", Const, 0, ""},
    +		{"StatusRequestHeaderFieldsTooLarge", Const, 6, ""},
    +		{"StatusRequestTimeout", Const, 0, ""},
    +		{"StatusRequestURITooLong", Const, 0, ""},
    +		{"StatusRequestedRangeNotSatisfiable", Const, 0, ""},
    +		{"StatusResetContent", Const, 0, ""},
    +		{"StatusSeeOther", Const, 0, ""},
    +		{"StatusServiceUnavailable", Const, 0, ""},
    +		{"StatusSwitchingProtocols", Const, 0, ""},
    +		{"StatusTeapot", Const, 0, ""},
    +		{"StatusTemporaryRedirect", Const, 0, ""},
    +		{"StatusText", Func, 0, "func(code int) string"},
    +		{"StatusTooEarly", Const, 12, ""},
    +		{"StatusTooManyRequests", Const, 6, ""},
    +		{"StatusUnauthorized", Const, 0, ""},
    +		{"StatusUnavailableForLegalReasons", Const, 6, ""},
    +		{"StatusUnprocessableEntity", Const, 7, ""},
    +		{"StatusUnsupportedMediaType", Const, 0, ""},
    +		{"StatusUpgradeRequired", Const, 7, ""},
    +		{"StatusUseProxy", Const, 0, ""},
    +		{"StatusVariantAlsoNegotiates", Const, 7, ""},
    +		{"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"},
    +		{"TimeFormat", Const, 0, ""},
    +		{"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"},
    +		{"TrailerPrefix", Const, 8, ""},
    +		{"Transport", Type, 0, ""},
    +		{"Transport.Dial", Field, 0, ""},
    +		{"Transport.DialContext", Field, 7, ""},
    +		{"Transport.DialTLS", Field, 4, ""},
    +		{"Transport.DialTLSContext", Field, 14, ""},
    +		{"Transport.DisableCompression", Field, 0, ""},
    +		{"Transport.DisableKeepAlives", Field, 0, ""},
    +		{"Transport.ExpectContinueTimeout", Field, 6, ""},
    +		{"Transport.ForceAttemptHTTP2", Field, 13, ""},
    +		{"Transport.GetProxyConnectHeader", Field, 16, ""},
    +		{"Transport.HTTP2", Field, 24, ""},
    +		{"Transport.IdleConnTimeout", Field, 7, ""},
    +		{"Transport.MaxConnsPerHost", Field, 11, ""},
    +		{"Transport.MaxIdleConns", Field, 7, ""},
    +		{"Transport.MaxIdleConnsPerHost", Field, 0, ""},
    +		{"Transport.MaxResponseHeaderBytes", Field, 7, ""},
    +		{"Transport.OnProxyConnectResponse", Field, 20, ""},
    +		{"Transport.Protocols", Field, 24, ""},
    +		{"Transport.Proxy", Field, 0, ""},
    +		{"Transport.ProxyConnectHeader", Field, 8, ""},
    +		{"Transport.ReadBufferSize", Field, 13, ""},
    +		{"Transport.ResponseHeaderTimeout", Field, 1, ""},
    +		{"Transport.TLSClientConfig", Field, 0, ""},
    +		{"Transport.TLSHandshakeTimeout", Field, 3, ""},
    +		{"Transport.TLSNextProto", Field, 6, ""},
    +		{"Transport.WriteBufferSize", Field, 13, ""},
     	},
     	"net/http/cgi": {
    -		{"(*Handler).ServeHTTP", Method, 0},
    -		{"Handler", Type, 0},
    -		{"Handler.Args", Field, 0},
    -		{"Handler.Dir", Field, 0},
    -		{"Handler.Env", Field, 0},
    -		{"Handler.InheritEnv", Field, 0},
    -		{"Handler.Logger", Field, 0},
    -		{"Handler.Path", Field, 0},
    -		{"Handler.PathLocationHandler", Field, 0},
    -		{"Handler.Root", Field, 0},
    -		{"Handler.Stderr", Field, 7},
    -		{"Request", Func, 0},
    -		{"RequestFromMap", Func, 0},
    -		{"Serve", Func, 0},
    +		{"(*Handler).ServeHTTP", Method, 0, ""},
    +		{"Handler", Type, 0, ""},
    +		{"Handler.Args", Field, 0, ""},
    +		{"Handler.Dir", Field, 0, ""},
    +		{"Handler.Env", Field, 0, ""},
    +		{"Handler.InheritEnv", Field, 0, ""},
    +		{"Handler.Logger", Field, 0, ""},
    +		{"Handler.Path", Field, 0, ""},
    +		{"Handler.PathLocationHandler", Field, 0, ""},
    +		{"Handler.Root", Field, 0, ""},
    +		{"Handler.Stderr", Field, 7, ""},
    +		{"Request", Func, 0, "func() (*http.Request, error)"},
    +		{"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"},
    +		{"Serve", Func, 0, "func(handler http.Handler) error"},
     	},
     	"net/http/cookiejar": {
    -		{"(*Jar).Cookies", Method, 1},
    -		{"(*Jar).SetCookies", Method, 1},
    -		{"Jar", Type, 1},
    -		{"New", Func, 1},
    -		{"Options", Type, 1},
    -		{"Options.PublicSuffixList", Field, 1},
    -		{"PublicSuffixList", Type, 1},
    +		{"(*Jar).Cookies", Method, 1, ""},
    +		{"(*Jar).SetCookies", Method, 1, ""},
    +		{"Jar", Type, 1, ""},
    +		{"New", Func, 1, "func(o *Options) (*Jar, error)"},
    +		{"Options", Type, 1, ""},
    +		{"Options.PublicSuffixList", Field, 1, ""},
    +		{"PublicSuffixList", Type, 1, ""},
     	},
     	"net/http/fcgi": {
    -		{"ErrConnClosed", Var, 5},
    -		{"ErrRequestAborted", Var, 5},
    -		{"ProcessEnv", Func, 9},
    -		{"Serve", Func, 0},
    +		{"ErrConnClosed", Var, 5, ""},
    +		{"ErrRequestAborted", Var, 5, ""},
    +		{"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"},
    +		{"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"},
     	},
     	"net/http/httptest": {
    -		{"(*ResponseRecorder).Flush", Method, 0},
    -		{"(*ResponseRecorder).Header", Method, 0},
    -		{"(*ResponseRecorder).Result", Method, 7},
    -		{"(*ResponseRecorder).Write", Method, 0},
    -		{"(*ResponseRecorder).WriteHeader", Method, 0},
    -		{"(*ResponseRecorder).WriteString", Method, 6},
    -		{"(*Server).Certificate", Method, 9},
    -		{"(*Server).Client", Method, 9},
    -		{"(*Server).Close", Method, 0},
    -		{"(*Server).CloseClientConnections", Method, 0},
    -		{"(*Server).Start", Method, 0},
    -		{"(*Server).StartTLS", Method, 0},
    -		{"DefaultRemoteAddr", Const, 0},
    -		{"NewRecorder", Func, 0},
    -		{"NewRequest", Func, 7},
    -		{"NewRequestWithContext", Func, 23},
    -		{"NewServer", Func, 0},
    -		{"NewTLSServer", Func, 0},
    -		{"NewUnstartedServer", Func, 0},
    -		{"ResponseRecorder", Type, 0},
    -		{"ResponseRecorder.Body", Field, 0},
    -		{"ResponseRecorder.Code", Field, 0},
    -		{"ResponseRecorder.Flushed", Field, 0},
    -		{"ResponseRecorder.HeaderMap", Field, 0},
    -		{"Server", Type, 0},
    -		{"Server.Config", Field, 0},
    -		{"Server.EnableHTTP2", Field, 14},
    -		{"Server.Listener", Field, 0},
    -		{"Server.TLS", Field, 0},
    -		{"Server.URL", Field, 0},
    +		{"(*ResponseRecorder).Flush", Method, 0, ""},
    +		{"(*ResponseRecorder).Header", Method, 0, ""},
    +		{"(*ResponseRecorder).Result", Method, 7, ""},
    +		{"(*ResponseRecorder).Write", Method, 0, ""},
    +		{"(*ResponseRecorder).WriteHeader", Method, 0, ""},
    +		{"(*ResponseRecorder).WriteString", Method, 6, ""},
    +		{"(*Server).Certificate", Method, 9, ""},
    +		{"(*Server).Client", Method, 9, ""},
    +		{"(*Server).Close", Method, 0, ""},
    +		{"(*Server).CloseClientConnections", Method, 0, ""},
    +		{"(*Server).Start", Method, 0, ""},
    +		{"(*Server).StartTLS", Method, 0, ""},
    +		{"DefaultRemoteAddr", Const, 0, ""},
    +		{"NewRecorder", Func, 0, "func() *ResponseRecorder"},
    +		{"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"},
    +		{"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"},
    +		{"NewServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"},
    +		{"ResponseRecorder", Type, 0, ""},
    +		{"ResponseRecorder.Body", Field, 0, ""},
    +		{"ResponseRecorder.Code", Field, 0, ""},
    +		{"ResponseRecorder.Flushed", Field, 0, ""},
    +		{"ResponseRecorder.HeaderMap", Field, 0, ""},
    +		{"Server", Type, 0, ""},
    +		{"Server.Config", Field, 0, ""},
    +		{"Server.EnableHTTP2", Field, 14, ""},
    +		{"Server.Listener", Field, 0, ""},
    +		{"Server.TLS", Field, 0, ""},
    +		{"Server.URL", Field, 0, ""},
     	},
     	"net/http/httptrace": {
    -		{"ClientTrace", Type, 7},
    -		{"ClientTrace.ConnectDone", Field, 7},
    -		{"ClientTrace.ConnectStart", Field, 7},
    -		{"ClientTrace.DNSDone", Field, 7},
    -		{"ClientTrace.DNSStart", Field, 7},
    -		{"ClientTrace.GetConn", Field, 7},
    -		{"ClientTrace.Got100Continue", Field, 7},
    -		{"ClientTrace.Got1xxResponse", Field, 11},
    -		{"ClientTrace.GotConn", Field, 7},
    -		{"ClientTrace.GotFirstResponseByte", Field, 7},
    -		{"ClientTrace.PutIdleConn", Field, 7},
    -		{"ClientTrace.TLSHandshakeDone", Field, 8},
    -		{"ClientTrace.TLSHandshakeStart", Field, 8},
    -		{"ClientTrace.Wait100Continue", Field, 7},
    -		{"ClientTrace.WroteHeaderField", Field, 11},
    -		{"ClientTrace.WroteHeaders", Field, 7},
    -		{"ClientTrace.WroteRequest", Field, 7},
    -		{"ContextClientTrace", Func, 7},
    -		{"DNSDoneInfo", Type, 7},
    -		{"DNSDoneInfo.Addrs", Field, 7},
    -		{"DNSDoneInfo.Coalesced", Field, 7},
    -		{"DNSDoneInfo.Err", Field, 7},
    -		{"DNSStartInfo", Type, 7},
    -		{"DNSStartInfo.Host", Field, 7},
    -		{"GotConnInfo", Type, 7},
    -		{"GotConnInfo.Conn", Field, 7},
    -		{"GotConnInfo.IdleTime", Field, 7},
    -		{"GotConnInfo.Reused", Field, 7},
    -		{"GotConnInfo.WasIdle", Field, 7},
    -		{"WithClientTrace", Func, 7},
    -		{"WroteRequestInfo", Type, 7},
    -		{"WroteRequestInfo.Err", Field, 7},
    +		{"ClientTrace", Type, 7, ""},
    +		{"ClientTrace.ConnectDone", Field, 7, ""},
    +		{"ClientTrace.ConnectStart", Field, 7, ""},
    +		{"ClientTrace.DNSDone", Field, 7, ""},
    +		{"ClientTrace.DNSStart", Field, 7, ""},
    +		{"ClientTrace.GetConn", Field, 7, ""},
    +		{"ClientTrace.Got100Continue", Field, 7, ""},
    +		{"ClientTrace.Got1xxResponse", Field, 11, ""},
    +		{"ClientTrace.GotConn", Field, 7, ""},
    +		{"ClientTrace.GotFirstResponseByte", Field, 7, ""},
    +		{"ClientTrace.PutIdleConn", Field, 7, ""},
    +		{"ClientTrace.TLSHandshakeDone", Field, 8, ""},
    +		{"ClientTrace.TLSHandshakeStart", Field, 8, ""},
    +		{"ClientTrace.Wait100Continue", Field, 7, ""},
    +		{"ClientTrace.WroteHeaderField", Field, 11, ""},
    +		{"ClientTrace.WroteHeaders", Field, 7, ""},
    +		{"ClientTrace.WroteRequest", Field, 7, ""},
    +		{"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"},
    +		{"DNSDoneInfo", Type, 7, ""},
    +		{"DNSDoneInfo.Addrs", Field, 7, ""},
    +		{"DNSDoneInfo.Coalesced", Field, 7, ""},
    +		{"DNSDoneInfo.Err", Field, 7, ""},
    +		{"DNSStartInfo", Type, 7, ""},
    +		{"DNSStartInfo.Host", Field, 7, ""},
    +		{"GotConnInfo", Type, 7, ""},
    +		{"GotConnInfo.Conn", Field, 7, ""},
    +		{"GotConnInfo.IdleTime", Field, 7, ""},
    +		{"GotConnInfo.Reused", Field, 7, ""},
    +		{"GotConnInfo.WasIdle", Field, 7, ""},
    +		{"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"},
    +		{"WroteRequestInfo", Type, 7, ""},
    +		{"WroteRequestInfo.Err", Field, 7, ""},
     	},
     	"net/http/httputil": {
    -		{"(*ClientConn).Close", Method, 0},
    -		{"(*ClientConn).Do", Method, 0},
    -		{"(*ClientConn).Hijack", Method, 0},
    -		{"(*ClientConn).Pending", Method, 0},
    -		{"(*ClientConn).Read", Method, 0},
    -		{"(*ClientConn).Write", Method, 0},
    -		{"(*ProxyRequest).SetURL", Method, 20},
    -		{"(*ProxyRequest).SetXForwarded", Method, 20},
    -		{"(*ReverseProxy).ServeHTTP", Method, 0},
    -		{"(*ServerConn).Close", Method, 0},
    -		{"(*ServerConn).Hijack", Method, 0},
    -		{"(*ServerConn).Pending", Method, 0},
    -		{"(*ServerConn).Read", Method, 0},
    -		{"(*ServerConn).Write", Method, 0},
    -		{"BufferPool", Type, 6},
    -		{"ClientConn", Type, 0},
    -		{"DumpRequest", Func, 0},
    -		{"DumpRequestOut", Func, 0},
    -		{"DumpResponse", Func, 0},
    -		{"ErrClosed", Var, 0},
    -		{"ErrLineTooLong", Var, 0},
    -		{"ErrPersistEOF", Var, 0},
    -		{"ErrPipeline", Var, 0},
    -		{"NewChunkedReader", Func, 0},
    -		{"NewChunkedWriter", Func, 0},
    -		{"NewClientConn", Func, 0},
    -		{"NewProxyClientConn", Func, 0},
    -		{"NewServerConn", Func, 0},
    -		{"NewSingleHostReverseProxy", Func, 0},
    -		{"ProxyRequest", Type, 20},
    -		{"ProxyRequest.In", Field, 20},
    -		{"ProxyRequest.Out", Field, 20},
    -		{"ReverseProxy", Type, 0},
    -		{"ReverseProxy.BufferPool", Field, 6},
    -		{"ReverseProxy.Director", Field, 0},
    -		{"ReverseProxy.ErrorHandler", Field, 11},
    -		{"ReverseProxy.ErrorLog", Field, 4},
    -		{"ReverseProxy.FlushInterval", Field, 0},
    -		{"ReverseProxy.ModifyResponse", Field, 8},
    -		{"ReverseProxy.Rewrite", Field, 20},
    -		{"ReverseProxy.Transport", Field, 0},
    -		{"ServerConn", Type, 0},
    +		{"(*ClientConn).Close", Method, 0, ""},
    +		{"(*ClientConn).Do", Method, 0, ""},
    +		{"(*ClientConn).Hijack", Method, 0, ""},
    +		{"(*ClientConn).Pending", Method, 0, ""},
    +		{"(*ClientConn).Read", Method, 0, ""},
    +		{"(*ClientConn).Write", Method, 0, ""},
    +		{"(*ProxyRequest).SetURL", Method, 20, ""},
    +		{"(*ProxyRequest).SetXForwarded", Method, 20, ""},
    +		{"(*ReverseProxy).ServeHTTP", Method, 0, ""},
    +		{"(*ServerConn).Close", Method, 0, ""},
    +		{"(*ServerConn).Hijack", Method, 0, ""},
    +		{"(*ServerConn).Pending", Method, 0, ""},
    +		{"(*ServerConn).Read", Method, 0, ""},
    +		{"(*ServerConn).Write", Method, 0, ""},
    +		{"BufferPool", Type, 6, ""},
    +		{"ClientConn", Type, 0, ""},
    +		{"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
    +		{"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
    +		{"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"},
    +		{"ErrClosed", Var, 0, ""},
    +		{"ErrLineTooLong", Var, 0, ""},
    +		{"ErrPersistEOF", Var, 0, ""},
    +		{"ErrPipeline", Var, 0, ""},
    +		{"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"},
    +		{"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
    +		{"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
    +		{"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"},
    +		{"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"},
    +		{"ProxyRequest", Type, 20, ""},
    +		{"ProxyRequest.In", Field, 20, ""},
    +		{"ProxyRequest.Out", Field, 20, ""},
    +		{"ReverseProxy", Type, 0, ""},
    +		{"ReverseProxy.BufferPool", Field, 6, ""},
    +		{"ReverseProxy.Director", Field, 0, ""},
    +		{"ReverseProxy.ErrorHandler", Field, 11, ""},
    +		{"ReverseProxy.ErrorLog", Field, 4, ""},
    +		{"ReverseProxy.FlushInterval", Field, 0, ""},
    +		{"ReverseProxy.ModifyResponse", Field, 8, ""},
    +		{"ReverseProxy.Rewrite", Field, 20, ""},
    +		{"ReverseProxy.Transport", Field, 0, ""},
    +		{"ServerConn", Type, 0, ""},
     	},
     	"net/http/pprof": {
    -		{"Cmdline", Func, 0},
    -		{"Handler", Func, 0},
    -		{"Index", Func, 0},
    -		{"Profile", Func, 0},
    -		{"Symbol", Func, 0},
    -		{"Trace", Func, 5},
    +		{"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Handler", Func, 0, "func(name string) http.Handler"},
    +		{"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
    +		{"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"},
     	},
     	"net/mail": {
    -		{"(*Address).String", Method, 0},
    -		{"(*AddressParser).Parse", Method, 5},
    -		{"(*AddressParser).ParseList", Method, 5},
    -		{"(Header).AddressList", Method, 0},
    -		{"(Header).Date", Method, 0},
    -		{"(Header).Get", Method, 0},
    -		{"Address", Type, 0},
    -		{"Address.Address", Field, 0},
    -		{"Address.Name", Field, 0},
    -		{"AddressParser", Type, 5},
    -		{"AddressParser.WordDecoder", Field, 5},
    -		{"ErrHeaderNotPresent", Var, 0},
    -		{"Header", Type, 0},
    -		{"Message", Type, 0},
    -		{"Message.Body", Field, 0},
    -		{"Message.Header", Field, 0},
    -		{"ParseAddress", Func, 1},
    -		{"ParseAddressList", Func, 1},
    -		{"ParseDate", Func, 8},
    -		{"ReadMessage", Func, 0},
    +		{"(*Address).String", Method, 0, ""},
    +		{"(*AddressParser).Parse", Method, 5, ""},
    +		{"(*AddressParser).ParseList", Method, 5, ""},
    +		{"(Header).AddressList", Method, 0, ""},
    +		{"(Header).Date", Method, 0, ""},
    +		{"(Header).Get", Method, 0, ""},
    +		{"Address", Type, 0, ""},
    +		{"Address.Address", Field, 0, ""},
    +		{"Address.Name", Field, 0, ""},
    +		{"AddressParser", Type, 5, ""},
    +		{"AddressParser.WordDecoder", Field, 5, ""},
    +		{"ErrHeaderNotPresent", Var, 0, ""},
    +		{"Header", Type, 0, ""},
    +		{"Message", Type, 0, ""},
    +		{"Message.Body", Field, 0, ""},
    +		{"Message.Header", Field, 0, ""},
    +		{"ParseAddress", Func, 1, "func(address string) (*Address, error)"},
    +		{"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"},
    +		{"ParseDate", Func, 8, "func(date string) (time.Time, error)"},
    +		{"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"},
     	},
     	"net/netip": {
    -		{"(*Addr).UnmarshalBinary", Method, 18},
    -		{"(*Addr).UnmarshalText", Method, 18},
    -		{"(*AddrPort).UnmarshalBinary", Method, 18},
    -		{"(*AddrPort).UnmarshalText", Method, 18},
    -		{"(*Prefix).UnmarshalBinary", Method, 18},
    -		{"(*Prefix).UnmarshalText", Method, 18},
    -		{"(Addr).AppendBinary", Method, 24},
    -		{"(Addr).AppendText", Method, 24},
    -		{"(Addr).AppendTo", Method, 18},
    -		{"(Addr).As16", Method, 18},
    -		{"(Addr).As4", Method, 18},
    -		{"(Addr).AsSlice", Method, 18},
    -		{"(Addr).BitLen", Method, 18},
    -		{"(Addr).Compare", Method, 18},
    -		{"(Addr).Is4", Method, 18},
    -		{"(Addr).Is4In6", Method, 18},
    -		{"(Addr).Is6", Method, 18},
    -		{"(Addr).IsGlobalUnicast", Method, 18},
    -		{"(Addr).IsInterfaceLocalMulticast", Method, 18},
    -		{"(Addr).IsLinkLocalMulticast", Method, 18},
    -		{"(Addr).IsLinkLocalUnicast", Method, 18},
    -		{"(Addr).IsLoopback", Method, 18},
    -		{"(Addr).IsMulticast", Method, 18},
    -		{"(Addr).IsPrivate", Method, 18},
    -		{"(Addr).IsUnspecified", Method, 18},
    -		{"(Addr).IsValid", Method, 18},
    -		{"(Addr).Less", Method, 18},
    -		{"(Addr).MarshalBinary", Method, 18},
    -		{"(Addr).MarshalText", Method, 18},
    -		{"(Addr).Next", Method, 18},
    -		{"(Addr).Prefix", Method, 18},
    -		{"(Addr).Prev", Method, 18},
    -		{"(Addr).String", Method, 18},
    -		{"(Addr).StringExpanded", Method, 18},
    -		{"(Addr).Unmap", Method, 18},
    -		{"(Addr).WithZone", Method, 18},
    -		{"(Addr).Zone", Method, 18},
    -		{"(AddrPort).Addr", Method, 18},
    -		{"(AddrPort).AppendBinary", Method, 24},
    -		{"(AddrPort).AppendText", Method, 24},
    -		{"(AddrPort).AppendTo", Method, 18},
    -		{"(AddrPort).Compare", Method, 22},
    -		{"(AddrPort).IsValid", Method, 18},
    -		{"(AddrPort).MarshalBinary", Method, 18},
    -		{"(AddrPort).MarshalText", Method, 18},
    -		{"(AddrPort).Port", Method, 18},
    -		{"(AddrPort).String", Method, 18},
    -		{"(Prefix).Addr", Method, 18},
    -		{"(Prefix).AppendBinary", Method, 24},
    -		{"(Prefix).AppendText", Method, 24},
    -		{"(Prefix).AppendTo", Method, 18},
    -		{"(Prefix).Bits", Method, 18},
    -		{"(Prefix).Contains", Method, 18},
    -		{"(Prefix).IsSingleIP", Method, 18},
    -		{"(Prefix).IsValid", Method, 18},
    -		{"(Prefix).MarshalBinary", Method, 18},
    -		{"(Prefix).MarshalText", Method, 18},
    -		{"(Prefix).Masked", Method, 18},
    -		{"(Prefix).Overlaps", Method, 18},
    -		{"(Prefix).String", Method, 18},
    -		{"Addr", Type, 18},
    -		{"AddrFrom16", Func, 18},
    -		{"AddrFrom4", Func, 18},
    -		{"AddrFromSlice", Func, 18},
    -		{"AddrPort", Type, 18},
    -		{"AddrPortFrom", Func, 18},
    -		{"IPv4Unspecified", Func, 18},
    -		{"IPv6LinkLocalAllNodes", Func, 18},
    -		{"IPv6LinkLocalAllRouters", Func, 20},
    -		{"IPv6Loopback", Func, 20},
    -		{"IPv6Unspecified", Func, 18},
    -		{"MustParseAddr", Func, 18},
    -		{"MustParseAddrPort", Func, 18},
    -		{"MustParsePrefix", Func, 18},
    -		{"ParseAddr", Func, 18},
    -		{"ParseAddrPort", Func, 18},
    -		{"ParsePrefix", Func, 18},
    -		{"Prefix", Type, 18},
    -		{"PrefixFrom", Func, 18},
    +		{"(*Addr).UnmarshalBinary", Method, 18, ""},
    +		{"(*Addr).UnmarshalText", Method, 18, ""},
    +		{"(*AddrPort).UnmarshalBinary", Method, 18, ""},
    +		{"(*AddrPort).UnmarshalText", Method, 18, ""},
    +		{"(*Prefix).UnmarshalBinary", Method, 18, ""},
    +		{"(*Prefix).UnmarshalText", Method, 18, ""},
    +		{"(Addr).AppendBinary", Method, 24, ""},
    +		{"(Addr).AppendText", Method, 24, ""},
    +		{"(Addr).AppendTo", Method, 18, ""},
    +		{"(Addr).As16", Method, 18, ""},
    +		{"(Addr).As4", Method, 18, ""},
    +		{"(Addr).AsSlice", Method, 18, ""},
    +		{"(Addr).BitLen", Method, 18, ""},
    +		{"(Addr).Compare", Method, 18, ""},
    +		{"(Addr).Is4", Method, 18, ""},
    +		{"(Addr).Is4In6", Method, 18, ""},
    +		{"(Addr).Is6", Method, 18, ""},
    +		{"(Addr).IsGlobalUnicast", Method, 18, ""},
    +		{"(Addr).IsInterfaceLocalMulticast", Method, 18, ""},
    +		{"(Addr).IsLinkLocalMulticast", Method, 18, ""},
    +		{"(Addr).IsLinkLocalUnicast", Method, 18, ""},
    +		{"(Addr).IsLoopback", Method, 18, ""},
    +		{"(Addr).IsMulticast", Method, 18, ""},
    +		{"(Addr).IsPrivate", Method, 18, ""},
    +		{"(Addr).IsUnspecified", Method, 18, ""},
    +		{"(Addr).IsValid", Method, 18, ""},
    +		{"(Addr).Less", Method, 18, ""},
    +		{"(Addr).MarshalBinary", Method, 18, ""},
    +		{"(Addr).MarshalText", Method, 18, ""},
    +		{"(Addr).Next", Method, 18, ""},
    +		{"(Addr).Prefix", Method, 18, ""},
    +		{"(Addr).Prev", Method, 18, ""},
    +		{"(Addr).String", Method, 18, ""},
    +		{"(Addr).StringExpanded", Method, 18, ""},
    +		{"(Addr).Unmap", Method, 18, ""},
    +		{"(Addr).WithZone", Method, 18, ""},
    +		{"(Addr).Zone", Method, 18, ""},
    +		{"(AddrPort).Addr", Method, 18, ""},
    +		{"(AddrPort).AppendBinary", Method, 24, ""},
    +		{"(AddrPort).AppendText", Method, 24, ""},
    +		{"(AddrPort).AppendTo", Method, 18, ""},
    +		{"(AddrPort).Compare", Method, 22, ""},
    +		{"(AddrPort).IsValid", Method, 18, ""},
    +		{"(AddrPort).MarshalBinary", Method, 18, ""},
    +		{"(AddrPort).MarshalText", Method, 18, ""},
    +		{"(AddrPort).Port", Method, 18, ""},
    +		{"(AddrPort).String", Method, 18, ""},
    +		{"(Prefix).Addr", Method, 18, ""},
    +		{"(Prefix).AppendBinary", Method, 24, ""},
    +		{"(Prefix).AppendText", Method, 24, ""},
    +		{"(Prefix).AppendTo", Method, 18, ""},
    +		{"(Prefix).Bits", Method, 18, ""},
    +		{"(Prefix).Contains", Method, 18, ""},
    +		{"(Prefix).IsSingleIP", Method, 18, ""},
    +		{"(Prefix).IsValid", Method, 18, ""},
    +		{"(Prefix).MarshalBinary", Method, 18, ""},
    +		{"(Prefix).MarshalText", Method, 18, ""},
    +		{"(Prefix).Masked", Method, 18, ""},
    +		{"(Prefix).Overlaps", Method, 18, ""},
    +		{"(Prefix).String", Method, 18, ""},
    +		{"Addr", Type, 18, ""},
    +		{"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"},
    +		{"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"},
    +		{"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"},
    +		{"AddrPort", Type, 18, ""},
    +		{"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"},
    +		{"IPv4Unspecified", Func, 18, "func() Addr"},
    +		{"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"},
    +		{"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"},
    +		{"IPv6Loopback", Func, 20, "func() Addr"},
    +		{"IPv6Unspecified", Func, 18, "func() Addr"},
    +		{"MustParseAddr", Func, 18, "func(s string) Addr"},
    +		{"MustParseAddrPort", Func, 18, "func(s string) AddrPort"},
    +		{"MustParsePrefix", Func, 18, "func(s string) Prefix"},
    +		{"ParseAddr", Func, 18, "func(s string) (Addr, error)"},
    +		{"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"},
    +		{"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"},
    +		{"Prefix", Type, 18, ""},
    +		{"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"},
     	},
     	"net/rpc": {
    -		{"(*Client).Call", Method, 0},
    -		{"(*Client).Close", Method, 0},
    -		{"(*Client).Go", Method, 0},
    -		{"(*Server).Accept", Method, 0},
    -		{"(*Server).HandleHTTP", Method, 0},
    -		{"(*Server).Register", Method, 0},
    -		{"(*Server).RegisterName", Method, 0},
    -		{"(*Server).ServeCodec", Method, 0},
    -		{"(*Server).ServeConn", Method, 0},
    -		{"(*Server).ServeHTTP", Method, 0},
    -		{"(*Server).ServeRequest", Method, 0},
    -		{"(ServerError).Error", Method, 0},
    -		{"Accept", Func, 0},
    -		{"Call", Type, 0},
    -		{"Call.Args", Field, 0},
    -		{"Call.Done", Field, 0},
    -		{"Call.Error", Field, 0},
    -		{"Call.Reply", Field, 0},
    -		{"Call.ServiceMethod", Field, 0},
    -		{"Client", Type, 0},
    -		{"ClientCodec", Type, 0},
    -		{"DefaultDebugPath", Const, 0},
    -		{"DefaultRPCPath", Const, 0},
    -		{"DefaultServer", Var, 0},
    -		{"Dial", Func, 0},
    -		{"DialHTTP", Func, 0},
    -		{"DialHTTPPath", Func, 0},
    -		{"ErrShutdown", Var, 0},
    -		{"HandleHTTP", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"NewClientWithCodec", Func, 0},
    -		{"NewServer", Func, 0},
    -		{"Register", Func, 0},
    -		{"RegisterName", Func, 0},
    -		{"Request", Type, 0},
    -		{"Request.Seq", Field, 0},
    -		{"Request.ServiceMethod", Field, 0},
    -		{"Response", Type, 0},
    -		{"Response.Error", Field, 0},
    -		{"Response.Seq", Field, 0},
    -		{"Response.ServiceMethod", Field, 0},
    -		{"ServeCodec", Func, 0},
    -		{"ServeConn", Func, 0},
    -		{"ServeRequest", Func, 0},
    -		{"Server", Type, 0},
    -		{"ServerCodec", Type, 0},
    -		{"ServerError", Type, 0},
    +		{"(*Client).Call", Method, 0, ""},
    +		{"(*Client).Close", Method, 0, ""},
    +		{"(*Client).Go", Method, 0, ""},
    +		{"(*Server).Accept", Method, 0, ""},
    +		{"(*Server).HandleHTTP", Method, 0, ""},
    +		{"(*Server).Register", Method, 0, ""},
    +		{"(*Server).RegisterName", Method, 0, ""},
    +		{"(*Server).ServeCodec", Method, 0, ""},
    +		{"(*Server).ServeConn", Method, 0, ""},
    +		{"(*Server).ServeHTTP", Method, 0, ""},
    +		{"(*Server).ServeRequest", Method, 0, ""},
    +		{"(ServerError).Error", Method, 0, ""},
    +		{"Accept", Func, 0, "func(lis net.Listener)"},
    +		{"Call", Type, 0, ""},
    +		{"Call.Args", Field, 0, ""},
    +		{"Call.Done", Field, 0, ""},
    +		{"Call.Error", Field, 0, ""},
    +		{"Call.Reply", Field, 0, ""},
    +		{"Call.ServiceMethod", Field, 0, ""},
    +		{"Client", Type, 0, ""},
    +		{"ClientCodec", Type, 0, ""},
    +		{"DefaultDebugPath", Const, 0, ""},
    +		{"DefaultRPCPath", Const, 0, ""},
    +		{"DefaultServer", Var, 0, ""},
    +		{"Dial", Func, 0, "func(network string, address string) (*Client, error)"},
    +		{"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"},
    +		{"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"},
    +		{"ErrShutdown", Var, 0, ""},
    +		{"HandleHTTP", Func, 0, "func()"},
    +		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"},
    +		{"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"},
    +		{"NewServer", Func, 0, "func() *Server"},
    +		{"Register", Func, 0, "func(rcvr any) error"},
    +		{"RegisterName", Func, 0, "func(name string, rcvr any) error"},
    +		{"Request", Type, 0, ""},
    +		{"Request.Seq", Field, 0, ""},
    +		{"Request.ServiceMethod", Field, 0, ""},
    +		{"Response", Type, 0, ""},
    +		{"Response.Error", Field, 0, ""},
    +		{"Response.Seq", Field, 0, ""},
    +		{"Response.ServiceMethod", Field, 0, ""},
    +		{"ServeCodec", Func, 0, "func(codec ServerCodec)"},
    +		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
    +		{"ServeRequest", Func, 0, "func(codec ServerCodec) error"},
    +		{"Server", Type, 0, ""},
    +		{"ServerCodec", Type, 0, ""},
    +		{"ServerError", Type, 0, ""},
     	},
     	"net/rpc/jsonrpc": {
    -		{"Dial", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"NewClientCodec", Func, 0},
    -		{"NewServerCodec", Func, 0},
    -		{"ServeConn", Func, 0},
    +		{"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"},
    +		{"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"},
    +		{"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"},
    +		{"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"},
    +		{"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
     	},
     	"net/smtp": {
    -		{"(*Client).Auth", Method, 0},
    -		{"(*Client).Close", Method, 2},
    -		{"(*Client).Data", Method, 0},
    -		{"(*Client).Extension", Method, 0},
    -		{"(*Client).Hello", Method, 1},
    -		{"(*Client).Mail", Method, 0},
    -		{"(*Client).Noop", Method, 10},
    -		{"(*Client).Quit", Method, 0},
    -		{"(*Client).Rcpt", Method, 0},
    -		{"(*Client).Reset", Method, 0},
    -		{"(*Client).StartTLS", Method, 0},
    -		{"(*Client).TLSConnectionState", Method, 5},
    -		{"(*Client).Verify", Method, 0},
    -		{"Auth", Type, 0},
    -		{"CRAMMD5Auth", Func, 0},
    -		{"Client", Type, 0},
    -		{"Client.Text", Field, 0},
    -		{"Dial", Func, 0},
    -		{"NewClient", Func, 0},
    -		{"PlainAuth", Func, 0},
    -		{"SendMail", Func, 0},
    -		{"ServerInfo", Type, 0},
    -		{"ServerInfo.Auth", Field, 0},
    -		{"ServerInfo.Name", Field, 0},
    -		{"ServerInfo.TLS", Field, 0},
    +		{"(*Client).Auth", Method, 0, ""},
    +		{"(*Client).Close", Method, 2, ""},
    +		{"(*Client).Data", Method, 0, ""},
    +		{"(*Client).Extension", Method, 0, ""},
    +		{"(*Client).Hello", Method, 1, ""},
    +		{"(*Client).Mail", Method, 0, ""},
    +		{"(*Client).Noop", Method, 10, ""},
    +		{"(*Client).Quit", Method, 0, ""},
    +		{"(*Client).Rcpt", Method, 0, ""},
    +		{"(*Client).Reset", Method, 0, ""},
    +		{"(*Client).StartTLS", Method, 0, ""},
    +		{"(*Client).TLSConnectionState", Method, 5, ""},
    +		{"(*Client).Verify", Method, 0, ""},
    +		{"Auth", Type, 0, ""},
    +		{"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
    +		{"Client", Type, 0, ""},
    +		{"Client.Text", Field, 0, ""},
    +		{"Dial", Func, 0, "func(addr string) (*Client, error)"},
    +		{"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"},
    +		{"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"},
    +		{"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"},
    +		{"ServerInfo", Type, 0, ""},
    +		{"ServerInfo.Auth", Field, 0, ""},
    +		{"ServerInfo.Name", Field, 0, ""},
    +		{"ServerInfo.TLS", Field, 0, ""},
     	},
     	"net/textproto": {
    -		{"(*Conn).Close", Method, 0},
    -		{"(*Conn).Cmd", Method, 0},
    -		{"(*Conn).DotReader", Method, 0},
    -		{"(*Conn).DotWriter", Method, 0},
    -		{"(*Conn).EndRequest", Method, 0},
    -		{"(*Conn).EndResponse", Method, 0},
    -		{"(*Conn).Next", Method, 0},
    -		{"(*Conn).PrintfLine", Method, 0},
    -		{"(*Conn).ReadCodeLine", Method, 0},
    -		{"(*Conn).ReadContinuedLine", Method, 0},
    -		{"(*Conn).ReadContinuedLineBytes", Method, 0},
    -		{"(*Conn).ReadDotBytes", Method, 0},
    -		{"(*Conn).ReadDotLines", Method, 0},
    -		{"(*Conn).ReadLine", Method, 0},
    -		{"(*Conn).ReadLineBytes", Method, 0},
    -		{"(*Conn).ReadMIMEHeader", Method, 0},
    -		{"(*Conn).ReadResponse", Method, 0},
    -		{"(*Conn).StartRequest", Method, 0},
    -		{"(*Conn).StartResponse", Method, 0},
    -		{"(*Error).Error", Method, 0},
    -		{"(*Pipeline).EndRequest", Method, 0},
    -		{"(*Pipeline).EndResponse", Method, 0},
    -		{"(*Pipeline).Next", Method, 0},
    -		{"(*Pipeline).StartRequest", Method, 0},
    -		{"(*Pipeline).StartResponse", Method, 0},
    -		{"(*Reader).DotReader", Method, 0},
    -		{"(*Reader).ReadCodeLine", Method, 0},
    -		{"(*Reader).ReadContinuedLine", Method, 0},
    -		{"(*Reader).ReadContinuedLineBytes", Method, 0},
    -		{"(*Reader).ReadDotBytes", Method, 0},
    -		{"(*Reader).ReadDotLines", Method, 0},
    -		{"(*Reader).ReadLine", Method, 0},
    -		{"(*Reader).ReadLineBytes", Method, 0},
    -		{"(*Reader).ReadMIMEHeader", Method, 0},
    -		{"(*Reader).ReadResponse", Method, 0},
    -		{"(*Writer).DotWriter", Method, 0},
    -		{"(*Writer).PrintfLine", Method, 0},
    -		{"(MIMEHeader).Add", Method, 0},
    -		{"(MIMEHeader).Del", Method, 0},
    -		{"(MIMEHeader).Get", Method, 0},
    -		{"(MIMEHeader).Set", Method, 0},
    -		{"(MIMEHeader).Values", Method, 14},
    -		{"(ProtocolError).Error", Method, 0},
    -		{"CanonicalMIMEHeaderKey", Func, 0},
    -		{"Conn", Type, 0},
    -		{"Conn.Pipeline", Field, 0},
    -		{"Conn.Reader", Field, 0},
    -		{"Conn.Writer", Field, 0},
    -		{"Dial", Func, 0},
    -		{"Error", Type, 0},
    -		{"Error.Code", Field, 0},
    -		{"Error.Msg", Field, 0},
    -		{"MIMEHeader", Type, 0},
    -		{"NewConn", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewWriter", Func, 0},
    -		{"Pipeline", Type, 0},
    -		{"ProtocolError", Type, 0},
    -		{"Reader", Type, 0},
    -		{"Reader.R", Field, 0},
    -		{"TrimBytes", Func, 1},
    -		{"TrimString", Func, 1},
    -		{"Writer", Type, 0},
    -		{"Writer.W", Field, 0},
    +		{"(*Conn).Close", Method, 0, ""},
    +		{"(*Conn).Cmd", Method, 0, ""},
    +		{"(*Conn).DotReader", Method, 0, ""},
    +		{"(*Conn).DotWriter", Method, 0, ""},
    +		{"(*Conn).EndRequest", Method, 0, ""},
    +		{"(*Conn).EndResponse", Method, 0, ""},
    +		{"(*Conn).Next", Method, 0, ""},
    +		{"(*Conn).PrintfLine", Method, 0, ""},
    +		{"(*Conn).ReadCodeLine", Method, 0, ""},
    +		{"(*Conn).ReadContinuedLine", Method, 0, ""},
    +		{"(*Conn).ReadContinuedLineBytes", Method, 0, ""},
    +		{"(*Conn).ReadDotBytes", Method, 0, ""},
    +		{"(*Conn).ReadDotLines", Method, 0, ""},
    +		{"(*Conn).ReadLine", Method, 0, ""},
    +		{"(*Conn).ReadLineBytes", Method, 0, ""},
    +		{"(*Conn).ReadMIMEHeader", Method, 0, ""},
    +		{"(*Conn).ReadResponse", Method, 0, ""},
    +		{"(*Conn).StartRequest", Method, 0, ""},
    +		{"(*Conn).StartResponse", Method, 0, ""},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Pipeline).EndRequest", Method, 0, ""},
    +		{"(*Pipeline).EndResponse", Method, 0, ""},
    +		{"(*Pipeline).Next", Method, 0, ""},
    +		{"(*Pipeline).StartRequest", Method, 0, ""},
    +		{"(*Pipeline).StartResponse", Method, 0, ""},
    +		{"(*Reader).DotReader", Method, 0, ""},
    +		{"(*Reader).ReadCodeLine", Method, 0, ""},
    +		{"(*Reader).ReadContinuedLine", Method, 0, ""},
    +		{"(*Reader).ReadContinuedLineBytes", Method, 0, ""},
    +		{"(*Reader).ReadDotBytes", Method, 0, ""},
    +		{"(*Reader).ReadDotLines", Method, 0, ""},
    +		{"(*Reader).ReadLine", Method, 0, ""},
    +		{"(*Reader).ReadLineBytes", Method, 0, ""},
    +		{"(*Reader).ReadMIMEHeader", Method, 0, ""},
    +		{"(*Reader).ReadResponse", Method, 0, ""},
    +		{"(*Writer).DotWriter", Method, 0, ""},
    +		{"(*Writer).PrintfLine", Method, 0, ""},
    +		{"(MIMEHeader).Add", Method, 0, ""},
    +		{"(MIMEHeader).Del", Method, 0, ""},
    +		{"(MIMEHeader).Get", Method, 0, ""},
    +		{"(MIMEHeader).Set", Method, 0, ""},
    +		{"(MIMEHeader).Values", Method, 14, ""},
    +		{"(ProtocolError).Error", Method, 0, ""},
    +		{"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"},
    +		{"Conn", Type, 0, ""},
    +		{"Conn.Pipeline", Field, 0, ""},
    +		{"Conn.Reader", Field, 0, ""},
    +		{"Conn.Writer", Field, 0, ""},
    +		{"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"},
    +		{"Error", Type, 0, ""},
    +		{"Error.Code", Field, 0, ""},
    +		{"Error.Msg", Field, 0, ""},
    +		{"MIMEHeader", Type, 0, ""},
    +		{"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"},
    +		{"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"},
    +		{"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"},
    +		{"Pipeline", Type, 0, ""},
    +		{"ProtocolError", Type, 0, ""},
    +		{"Reader", Type, 0, ""},
    +		{"Reader.R", Field, 0, ""},
    +		{"TrimBytes", Func, 1, "func(b []byte) []byte"},
    +		{"TrimString", Func, 1, "func(s string) string"},
    +		{"Writer", Type, 0, ""},
    +		{"Writer.W", Field, 0, ""},
     	},
     	"net/url": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Error).Temporary", Method, 6},
    -		{"(*Error).Timeout", Method, 6},
    -		{"(*Error).Unwrap", Method, 13},
    -		{"(*URL).AppendBinary", Method, 24},
    -		{"(*URL).EscapedFragment", Method, 15},
    -		{"(*URL).EscapedPath", Method, 5},
    -		{"(*URL).Hostname", Method, 8},
    -		{"(*URL).IsAbs", Method, 0},
    -		{"(*URL).JoinPath", Method, 19},
    -		{"(*URL).MarshalBinary", Method, 8},
    -		{"(*URL).Parse", Method, 0},
    -		{"(*URL).Port", Method, 8},
    -		{"(*URL).Query", Method, 0},
    -		{"(*URL).Redacted", Method, 15},
    -		{"(*URL).RequestURI", Method, 0},
    -		{"(*URL).ResolveReference", Method, 0},
    -		{"(*URL).String", Method, 0},
    -		{"(*URL).UnmarshalBinary", Method, 8},
    -		{"(*Userinfo).Password", Method, 0},
    -		{"(*Userinfo).String", Method, 0},
    -		{"(*Userinfo).Username", Method, 0},
    -		{"(EscapeError).Error", Method, 0},
    -		{"(InvalidHostError).Error", Method, 6},
    -		{"(Values).Add", Method, 0},
    -		{"(Values).Del", Method, 0},
    -		{"(Values).Encode", Method, 0},
    -		{"(Values).Get", Method, 0},
    -		{"(Values).Has", Method, 17},
    -		{"(Values).Set", Method, 0},
    -		{"Error", Type, 0},
    -		{"Error.Err", Field, 0},
    -		{"Error.Op", Field, 0},
    -		{"Error.URL", Field, 0},
    -		{"EscapeError", Type, 0},
    -		{"InvalidHostError", Type, 6},
    -		{"JoinPath", Func, 19},
    -		{"Parse", Func, 0},
    -		{"ParseQuery", Func, 0},
    -		{"ParseRequestURI", Func, 0},
    -		{"PathEscape", Func, 8},
    -		{"PathUnescape", Func, 8},
    -		{"QueryEscape", Func, 0},
    -		{"QueryUnescape", Func, 0},
    -		{"URL", Type, 0},
    -		{"URL.ForceQuery", Field, 7},
    -		{"URL.Fragment", Field, 0},
    -		{"URL.Host", Field, 0},
    -		{"URL.OmitHost", Field, 19},
    -		{"URL.Opaque", Field, 0},
    -		{"URL.Path", Field, 0},
    -		{"URL.RawFragment", Field, 15},
    -		{"URL.RawPath", Field, 5},
    -		{"URL.RawQuery", Field, 0},
    -		{"URL.Scheme", Field, 0},
    -		{"URL.User", Field, 0},
    -		{"User", Func, 0},
    -		{"UserPassword", Func, 0},
    -		{"Userinfo", Type, 0},
    -		{"Values", Type, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Error).Temporary", Method, 6, ""},
    +		{"(*Error).Timeout", Method, 6, ""},
    +		{"(*Error).Unwrap", Method, 13, ""},
    +		{"(*URL).AppendBinary", Method, 24, ""},
    +		{"(*URL).EscapedFragment", Method, 15, ""},
    +		{"(*URL).EscapedPath", Method, 5, ""},
    +		{"(*URL).Hostname", Method, 8, ""},
    +		{"(*URL).IsAbs", Method, 0, ""},
    +		{"(*URL).JoinPath", Method, 19, ""},
    +		{"(*URL).MarshalBinary", Method, 8, ""},
    +		{"(*URL).Parse", Method, 0, ""},
    +		{"(*URL).Port", Method, 8, ""},
    +		{"(*URL).Query", Method, 0, ""},
    +		{"(*URL).Redacted", Method, 15, ""},
    +		{"(*URL).RequestURI", Method, 0, ""},
    +		{"(*URL).ResolveReference", Method, 0, ""},
    +		{"(*URL).String", Method, 0, ""},
    +		{"(*URL).UnmarshalBinary", Method, 8, ""},
    +		{"(*Userinfo).Password", Method, 0, ""},
    +		{"(*Userinfo).String", Method, 0, ""},
    +		{"(*Userinfo).Username", Method, 0, ""},
    +		{"(EscapeError).Error", Method, 0, ""},
    +		{"(InvalidHostError).Error", Method, 6, ""},
    +		{"(Values).Add", Method, 0, ""},
    +		{"(Values).Del", Method, 0, ""},
    +		{"(Values).Encode", Method, 0, ""},
    +		{"(Values).Get", Method, 0, ""},
    +		{"(Values).Has", Method, 17, ""},
    +		{"(Values).Set", Method, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Err", Field, 0, ""},
    +		{"Error.Op", Field, 0, ""},
    +		{"Error.URL", Field, 0, ""},
    +		{"EscapeError", Type, 0, ""},
    +		{"InvalidHostError", Type, 6, ""},
    +		{"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"},
    +		{"Parse", Func, 0, "func(rawURL string) (*URL, error)"},
    +		{"ParseQuery", Func, 0, "func(query string) (Values, error)"},
    +		{"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"},
    +		{"PathEscape", Func, 8, "func(s string) string"},
    +		{"PathUnescape", Func, 8, "func(s string) (string, error)"},
    +		{"QueryEscape", Func, 0, "func(s string) string"},
    +		{"QueryUnescape", Func, 0, "func(s string) (string, error)"},
    +		{"URL", Type, 0, ""},
    +		{"URL.ForceQuery", Field, 7, ""},
    +		{"URL.Fragment", Field, 0, ""},
    +		{"URL.Host", Field, 0, ""},
    +		{"URL.OmitHost", Field, 19, ""},
    +		{"URL.Opaque", Field, 0, ""},
    +		{"URL.Path", Field, 0, ""},
    +		{"URL.RawFragment", Field, 15, ""},
    +		{"URL.RawPath", Field, 5, ""},
    +		{"URL.RawQuery", Field, 0, ""},
    +		{"URL.Scheme", Field, 0, ""},
    +		{"URL.User", Field, 0, ""},
    +		{"User", Func, 0, "func(username string) *Userinfo"},
    +		{"UserPassword", Func, 0, "func(username string, password string) *Userinfo"},
    +		{"Userinfo", Type, 0, ""},
    +		{"Values", Type, 0, ""},
     	},
     	"os": {
    -		{"(*File).Chdir", Method, 0},
    -		{"(*File).Chmod", Method, 0},
    -		{"(*File).Chown", Method, 0},
    -		{"(*File).Close", Method, 0},
    -		{"(*File).Fd", Method, 0},
    -		{"(*File).Name", Method, 0},
    -		{"(*File).Read", Method, 0},
    -		{"(*File).ReadAt", Method, 0},
    -		{"(*File).ReadDir", Method, 16},
    -		{"(*File).ReadFrom", Method, 15},
    -		{"(*File).Readdir", Method, 0},
    -		{"(*File).Readdirnames", Method, 0},
    -		{"(*File).Seek", Method, 0},
    -		{"(*File).SetDeadline", Method, 10},
    -		{"(*File).SetReadDeadline", Method, 10},
    -		{"(*File).SetWriteDeadline", Method, 10},
    -		{"(*File).Stat", Method, 0},
    -		{"(*File).Sync", Method, 0},
    -		{"(*File).SyscallConn", Method, 12},
    -		{"(*File).Truncate", Method, 0},
    -		{"(*File).Write", Method, 0},
    -		{"(*File).WriteAt", Method, 0},
    -		{"(*File).WriteString", Method, 0},
    -		{"(*File).WriteTo", Method, 22},
    -		{"(*LinkError).Error", Method, 0},
    -		{"(*LinkError).Unwrap", Method, 13},
    -		{"(*PathError).Error", Method, 0},
    -		{"(*PathError).Timeout", Method, 10},
    -		{"(*PathError).Unwrap", Method, 13},
    -		{"(*Process).Kill", Method, 0},
    -		{"(*Process).Release", Method, 0},
    -		{"(*Process).Signal", Method, 0},
    -		{"(*Process).Wait", Method, 0},
    -		{"(*ProcessState).ExitCode", Method, 12},
    -		{"(*ProcessState).Exited", Method, 0},
    -		{"(*ProcessState).Pid", Method, 0},
    -		{"(*ProcessState).String", Method, 0},
    -		{"(*ProcessState).Success", Method, 0},
    -		{"(*ProcessState).Sys", Method, 0},
    -		{"(*ProcessState).SysUsage", Method, 0},
    -		{"(*ProcessState).SystemTime", Method, 0},
    -		{"(*ProcessState).UserTime", Method, 0},
    -		{"(*Root).Chmod", Method, 25},
    -		{"(*Root).Chown", Method, 25},
    -		{"(*Root).Close", Method, 24},
    -		{"(*Root).Create", Method, 24},
    -		{"(*Root).FS", Method, 24},
    -		{"(*Root).Lstat", Method, 24},
    -		{"(*Root).Mkdir", Method, 24},
    -		{"(*Root).Name", Method, 24},
    -		{"(*Root).Open", Method, 24},
    -		{"(*Root).OpenFile", Method, 24},
    -		{"(*Root).OpenRoot", Method, 24},
    -		{"(*Root).Remove", Method, 24},
    -		{"(*Root).Stat", Method, 24},
    -		{"(*SyscallError).Error", Method, 0},
    -		{"(*SyscallError).Timeout", Method, 10},
    -		{"(*SyscallError).Unwrap", Method, 13},
    -		{"(FileMode).IsDir", Method, 0},
    -		{"(FileMode).IsRegular", Method, 1},
    -		{"(FileMode).Perm", Method, 0},
    -		{"(FileMode).String", Method, 0},
    -		{"Args", Var, 0},
    -		{"Chdir", Func, 0},
    -		{"Chmod", Func, 0},
    -		{"Chown", Func, 0},
    -		{"Chtimes", Func, 0},
    -		{"Clearenv", Func, 0},
    -		{"CopyFS", Func, 23},
    -		{"Create", Func, 0},
    -		{"CreateTemp", Func, 16},
    -		{"DevNull", Const, 0},
    -		{"DirEntry", Type, 16},
    -		{"DirFS", Func, 16},
    -		{"Environ", Func, 0},
    -		{"ErrClosed", Var, 8},
    -		{"ErrDeadlineExceeded", Var, 15},
    -		{"ErrExist", Var, 0},
    -		{"ErrInvalid", Var, 0},
    -		{"ErrNoDeadline", Var, 10},
    -		{"ErrNotExist", Var, 0},
    -		{"ErrPermission", Var, 0},
    -		{"ErrProcessDone", Var, 16},
    -		{"Executable", Func, 8},
    -		{"Exit", Func, 0},
    -		{"Expand", Func, 0},
    -		{"ExpandEnv", Func, 0},
    -		{"File", Type, 0},
    -		{"FileInfo", Type, 0},
    -		{"FileMode", Type, 0},
    -		{"FindProcess", Func, 0},
    -		{"Getegid", Func, 0},
    -		{"Getenv", Func, 0},
    -		{"Geteuid", Func, 0},
    -		{"Getgid", Func, 0},
    -		{"Getgroups", Func, 0},
    -		{"Getpagesize", Func, 0},
    -		{"Getpid", Func, 0},
    -		{"Getppid", Func, 0},
    -		{"Getuid", Func, 0},
    -		{"Getwd", Func, 0},
    -		{"Hostname", Func, 0},
    -		{"Interrupt", Var, 0},
    -		{"IsExist", Func, 0},
    -		{"IsNotExist", Func, 0},
    -		{"IsPathSeparator", Func, 0},
    -		{"IsPermission", Func, 0},
    -		{"IsTimeout", Func, 10},
    -		{"Kill", Var, 0},
    -		{"Lchown", Func, 0},
    -		{"Link", Func, 0},
    -		{"LinkError", Type, 0},
    -		{"LinkError.Err", Field, 0},
    -		{"LinkError.New", Field, 0},
    -		{"LinkError.Old", Field, 0},
    -		{"LinkError.Op", Field, 0},
    -		{"LookupEnv", Func, 5},
    -		{"Lstat", Func, 0},
    -		{"Mkdir", Func, 0},
    -		{"MkdirAll", Func, 0},
    -		{"MkdirTemp", Func, 16},
    -		{"ModeAppend", Const, 0},
    -		{"ModeCharDevice", Const, 0},
    -		{"ModeDevice", Const, 0},
    -		{"ModeDir", Const, 0},
    -		{"ModeExclusive", Const, 0},
    -		{"ModeIrregular", Const, 11},
    -		{"ModeNamedPipe", Const, 0},
    -		{"ModePerm", Const, 0},
    -		{"ModeSetgid", Const, 0},
    -		{"ModeSetuid", Const, 0},
    -		{"ModeSocket", Const, 0},
    -		{"ModeSticky", Const, 0},
    -		{"ModeSymlink", Const, 0},
    -		{"ModeTemporary", Const, 0},
    -		{"ModeType", Const, 0},
    -		{"NewFile", Func, 0},
    -		{"NewSyscallError", Func, 0},
    -		{"O_APPEND", Const, 0},
    -		{"O_CREATE", Const, 0},
    -		{"O_EXCL", Const, 0},
    -		{"O_RDONLY", Const, 0},
    -		{"O_RDWR", Const, 0},
    -		{"O_SYNC", Const, 0},
    -		{"O_TRUNC", Const, 0},
    -		{"O_WRONLY", Const, 0},
    -		{"Open", Func, 0},
    -		{"OpenFile", Func, 0},
    -		{"OpenInRoot", Func, 24},
    -		{"OpenRoot", Func, 24},
    -		{"PathError", Type, 0},
    -		{"PathError.Err", Field, 0},
    -		{"PathError.Op", Field, 0},
    -		{"PathError.Path", Field, 0},
    -		{"PathListSeparator", Const, 0},
    -		{"PathSeparator", Const, 0},
    -		{"Pipe", Func, 0},
    -		{"ProcAttr", Type, 0},
    -		{"ProcAttr.Dir", Field, 0},
    -		{"ProcAttr.Env", Field, 0},
    -		{"ProcAttr.Files", Field, 0},
    -		{"ProcAttr.Sys", Field, 0},
    -		{"Process", Type, 0},
    -		{"Process.Pid", Field, 0},
    -		{"ProcessState", Type, 0},
    -		{"ReadDir", Func, 16},
    -		{"ReadFile", Func, 16},
    -		{"Readlink", Func, 0},
    -		{"Remove", Func, 0},
    -		{"RemoveAll", Func, 0},
    -		{"Rename", Func, 0},
    -		{"Root", Type, 24},
    -		{"SEEK_CUR", Const, 0},
    -		{"SEEK_END", Const, 0},
    -		{"SEEK_SET", Const, 0},
    -		{"SameFile", Func, 0},
    -		{"Setenv", Func, 0},
    -		{"Signal", Type, 0},
    -		{"StartProcess", Func, 0},
    -		{"Stat", Func, 0},
    -		{"Stderr", Var, 0},
    -		{"Stdin", Var, 0},
    -		{"Stdout", Var, 0},
    -		{"Symlink", Func, 0},
    -		{"SyscallError", Type, 0},
    -		{"SyscallError.Err", Field, 0},
    -		{"SyscallError.Syscall", Field, 0},
    -		{"TempDir", Func, 0},
    -		{"Truncate", Func, 0},
    -		{"Unsetenv", Func, 4},
    -		{"UserCacheDir", Func, 11},
    -		{"UserConfigDir", Func, 13},
    -		{"UserHomeDir", Func, 12},
    -		{"WriteFile", Func, 16},
    +		{"(*File).Chdir", Method, 0, ""},
    +		{"(*File).Chmod", Method, 0, ""},
    +		{"(*File).Chown", Method, 0, ""},
    +		{"(*File).Close", Method, 0, ""},
    +		{"(*File).Fd", Method, 0, ""},
    +		{"(*File).Name", Method, 0, ""},
    +		{"(*File).Read", Method, 0, ""},
    +		{"(*File).ReadAt", Method, 0, ""},
    +		{"(*File).ReadDir", Method, 16, ""},
    +		{"(*File).ReadFrom", Method, 15, ""},
    +		{"(*File).Readdir", Method, 0, ""},
    +		{"(*File).Readdirnames", Method, 0, ""},
    +		{"(*File).Seek", Method, 0, ""},
    +		{"(*File).SetDeadline", Method, 10, ""},
    +		{"(*File).SetReadDeadline", Method, 10, ""},
    +		{"(*File).SetWriteDeadline", Method, 10, ""},
    +		{"(*File).Stat", Method, 0, ""},
    +		{"(*File).Sync", Method, 0, ""},
    +		{"(*File).SyscallConn", Method, 12, ""},
    +		{"(*File).Truncate", Method, 0, ""},
    +		{"(*File).Write", Method, 0, ""},
    +		{"(*File).WriteAt", Method, 0, ""},
    +		{"(*File).WriteString", Method, 0, ""},
    +		{"(*File).WriteTo", Method, 22, ""},
    +		{"(*LinkError).Error", Method, 0, ""},
    +		{"(*LinkError).Unwrap", Method, 13, ""},
    +		{"(*PathError).Error", Method, 0, ""},
    +		{"(*PathError).Timeout", Method, 10, ""},
    +		{"(*PathError).Unwrap", Method, 13, ""},
    +		{"(*Process).Kill", Method, 0, ""},
    +		{"(*Process).Release", Method, 0, ""},
    +		{"(*Process).Signal", Method, 0, ""},
    +		{"(*Process).Wait", Method, 0, ""},
    +		{"(*ProcessState).ExitCode", Method, 12, ""},
    +		{"(*ProcessState).Exited", Method, 0, ""},
    +		{"(*ProcessState).Pid", Method, 0, ""},
    +		{"(*ProcessState).String", Method, 0, ""},
    +		{"(*ProcessState).Success", Method, 0, ""},
    +		{"(*ProcessState).Sys", Method, 0, ""},
    +		{"(*ProcessState).SysUsage", Method, 0, ""},
    +		{"(*ProcessState).SystemTime", Method, 0, ""},
    +		{"(*ProcessState).UserTime", Method, 0, ""},
    +		{"(*Root).Chmod", Method, 25, ""},
    +		{"(*Root).Chown", Method, 25, ""},
    +		{"(*Root).Chtimes", Method, 25, ""},
    +		{"(*Root).Close", Method, 24, ""},
    +		{"(*Root).Create", Method, 24, ""},
    +		{"(*Root).FS", Method, 24, ""},
    +		{"(*Root).Lchown", Method, 25, ""},
    +		{"(*Root).Link", Method, 25, ""},
    +		{"(*Root).Lstat", Method, 24, ""},
    +		{"(*Root).Mkdir", Method, 24, ""},
    +		{"(*Root).MkdirAll", Method, 25, ""},
    +		{"(*Root).Name", Method, 24, ""},
    +		{"(*Root).Open", Method, 24, ""},
    +		{"(*Root).OpenFile", Method, 24, ""},
    +		{"(*Root).OpenRoot", Method, 24, ""},
    +		{"(*Root).ReadFile", Method, 25, ""},
    +		{"(*Root).Readlink", Method, 25, ""},
    +		{"(*Root).Remove", Method, 24, ""},
    +		{"(*Root).RemoveAll", Method, 25, ""},
    +		{"(*Root).Rename", Method, 25, ""},
    +		{"(*Root).Stat", Method, 24, ""},
    +		{"(*Root).Symlink", Method, 25, ""},
    +		{"(*Root).WriteFile", Method, 25, ""},
    +		{"(*SyscallError).Error", Method, 0, ""},
    +		{"(*SyscallError).Timeout", Method, 10, ""},
    +		{"(*SyscallError).Unwrap", Method, 13, ""},
    +		{"(FileMode).IsDir", Method, 0, ""},
    +		{"(FileMode).IsRegular", Method, 1, ""},
    +		{"(FileMode).Perm", Method, 0, ""},
    +		{"(FileMode).String", Method, 0, ""},
    +		{"Args", Var, 0, ""},
    +		{"Chdir", Func, 0, "func(dir string) error"},
    +		{"Chmod", Func, 0, "func(name string, mode FileMode) error"},
    +		{"Chown", Func, 0, "func(name string, uid int, gid int) error"},
    +		{"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"},
    +		{"Clearenv", Func, 0, "func()"},
    +		{"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"},
    +		{"Create", Func, 0, "func(name string) (*File, error)"},
    +		{"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"},
    +		{"DevNull", Const, 0, ""},
    +		{"DirEntry", Type, 16, ""},
    +		{"DirFS", Func, 16, "func(dir string) fs.FS"},
    +		{"Environ", Func, 0, "func() []string"},
    +		{"ErrClosed", Var, 8, ""},
    +		{"ErrDeadlineExceeded", Var, 15, ""},
    +		{"ErrExist", Var, 0, ""},
    +		{"ErrInvalid", Var, 0, ""},
    +		{"ErrNoDeadline", Var, 10, ""},
    +		{"ErrNotExist", Var, 0, ""},
    +		{"ErrPermission", Var, 0, ""},
    +		{"ErrProcessDone", Var, 16, ""},
    +		{"Executable", Func, 8, "func() (string, error)"},
    +		{"Exit", Func, 0, "func(code int)"},
    +		{"Expand", Func, 0, "func(s string, mapping func(string) string) string"},
    +		{"ExpandEnv", Func, 0, "func(s string) string"},
    +		{"File", Type, 0, ""},
    +		{"FileInfo", Type, 0, ""},
    +		{"FileMode", Type, 0, ""},
    +		{"FindProcess", Func, 0, "func(pid int) (*Process, error)"},
    +		{"Getegid", Func, 0, "func() int"},
    +		{"Getenv", Func, 0, "func(key string) string"},
    +		{"Geteuid", Func, 0, "func() int"},
    +		{"Getgid", Func, 0, "func() int"},
    +		{"Getgroups", Func, 0, "func() ([]int, error)"},
    +		{"Getpagesize", Func, 0, "func() int"},
    +		{"Getpid", Func, 0, "func() int"},
    +		{"Getppid", Func, 0, "func() int"},
    +		{"Getuid", Func, 0, "func() int"},
    +		{"Getwd", Func, 0, "func() (dir string, err error)"},
    +		{"Hostname", Func, 0, "func() (name string, err error)"},
    +		{"Interrupt", Var, 0, ""},
    +		{"IsExist", Func, 0, "func(err error) bool"},
    +		{"IsNotExist", Func, 0, "func(err error) bool"},
    +		{"IsPathSeparator", Func, 0, "func(c uint8) bool"},
    +		{"IsPermission", Func, 0, "func(err error) bool"},
    +		{"IsTimeout", Func, 10, "func(err error) bool"},
    +		{"Kill", Var, 0, ""},
    +		{"Lchown", Func, 0, "func(name string, uid int, gid int) error"},
    +		{"Link", Func, 0, "func(oldname string, newname string) error"},
    +		{"LinkError", Type, 0, ""},
    +		{"LinkError.Err", Field, 0, ""},
    +		{"LinkError.New", Field, 0, ""},
    +		{"LinkError.Old", Field, 0, ""},
    +		{"LinkError.Op", Field, 0, ""},
    +		{"LookupEnv", Func, 5, "func(key string) (string, bool)"},
    +		{"Lstat", Func, 0, "func(name string) (FileInfo, error)"},
    +		{"Mkdir", Func, 0, "func(name string, perm FileMode) error"},
    +		{"MkdirAll", Func, 0, "func(path string, perm FileMode) error"},
    +		{"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"},
    +		{"ModeAppend", Const, 0, ""},
    +		{"ModeCharDevice", Const, 0, ""},
    +		{"ModeDevice", Const, 0, ""},
    +		{"ModeDir", Const, 0, ""},
    +		{"ModeExclusive", Const, 0, ""},
    +		{"ModeIrregular", Const, 11, ""},
    +		{"ModeNamedPipe", Const, 0, ""},
    +		{"ModePerm", Const, 0, ""},
    +		{"ModeSetgid", Const, 0, ""},
    +		{"ModeSetuid", Const, 0, ""},
    +		{"ModeSocket", Const, 0, ""},
    +		{"ModeSticky", Const, 0, ""},
    +		{"ModeSymlink", Const, 0, ""},
    +		{"ModeTemporary", Const, 0, ""},
    +		{"ModeType", Const, 0, ""},
    +		{"NewFile", Func, 0, "func(fd uintptr, name string) *File"},
    +		{"NewSyscallError", Func, 0, "func(syscall string, err error) error"},
    +		{"O_APPEND", Const, 0, ""},
    +		{"O_CREATE", Const, 0, ""},
    +		{"O_EXCL", Const, 0, ""},
    +		{"O_RDONLY", Const, 0, ""},
    +		{"O_RDWR", Const, 0, ""},
    +		{"O_SYNC", Const, 0, ""},
    +		{"O_TRUNC", Const, 0, ""},
    +		{"O_WRONLY", Const, 0, ""},
    +		{"Open", Func, 0, "func(name string) (*File, error)"},
    +		{"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"},
    +		{"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"},
    +		{"OpenRoot", Func, 24, "func(name string) (*Root, error)"},
    +		{"PathError", Type, 0, ""},
    +		{"PathError.Err", Field, 0, ""},
    +		{"PathError.Op", Field, 0, ""},
    +		{"PathError.Path", Field, 0, ""},
    +		{"PathListSeparator", Const, 0, ""},
    +		{"PathSeparator", Const, 0, ""},
    +		{"Pipe", Func, 0, "func() (r *File, w *File, err error)"},
    +		{"ProcAttr", Type, 0, ""},
    +		{"ProcAttr.Dir", Field, 0, ""},
    +		{"ProcAttr.Env", Field, 0, ""},
    +		{"ProcAttr.Files", Field, 0, ""},
    +		{"ProcAttr.Sys", Field, 0, ""},
    +		{"Process", Type, 0, ""},
    +		{"Process.Pid", Field, 0, ""},
    +		{"ProcessState", Type, 0, ""},
    +		{"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"},
    +		{"ReadFile", Func, 16, "func(name string) ([]byte, error)"},
    +		{"Readlink", Func, 0, "func(name string) (string, error)"},
    +		{"Remove", Func, 0, "func(name string) error"},
    +		{"RemoveAll", Func, 0, "func(path string) error"},
    +		{"Rename", Func, 0, "func(oldpath string, newpath string) error"},
    +		{"Root", Type, 24, ""},
    +		{"SEEK_CUR", Const, 0, ""},
    +		{"SEEK_END", Const, 0, ""},
    +		{"SEEK_SET", Const, 0, ""},
    +		{"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"},
    +		{"Setenv", Func, 0, "func(key string, value string) error"},
    +		{"Signal", Type, 0, ""},
    +		{"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"},
    +		{"Stat", Func, 0, "func(name string) (FileInfo, error)"},
    +		{"Stderr", Var, 0, ""},
    +		{"Stdin", Var, 0, ""},
    +		{"Stdout", Var, 0, ""},
    +		{"Symlink", Func, 0, "func(oldname string, newname string) error"},
    +		{"SyscallError", Type, 0, ""},
    +		{"SyscallError.Err", Field, 0, ""},
    +		{"SyscallError.Syscall", Field, 0, ""},
    +		{"TempDir", Func, 0, "func() string"},
    +		{"Truncate", Func, 0, "func(name string, size int64) error"},
    +		{"Unsetenv", Func, 4, "func(key string) error"},
    +		{"UserCacheDir", Func, 11, "func() (string, error)"},
    +		{"UserConfigDir", Func, 13, "func() (string, error)"},
    +		{"UserHomeDir", Func, 12, "func() (string, error)"},
    +		{"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"},
     	},
     	"os/exec": {
    -		{"(*Cmd).CombinedOutput", Method, 0},
    -		{"(*Cmd).Environ", Method, 19},
    -		{"(*Cmd).Output", Method, 0},
    -		{"(*Cmd).Run", Method, 0},
    -		{"(*Cmd).Start", Method, 0},
    -		{"(*Cmd).StderrPipe", Method, 0},
    -		{"(*Cmd).StdinPipe", Method, 0},
    -		{"(*Cmd).StdoutPipe", Method, 0},
    -		{"(*Cmd).String", Method, 13},
    -		{"(*Cmd).Wait", Method, 0},
    -		{"(*Error).Error", Method, 0},
    -		{"(*Error).Unwrap", Method, 13},
    -		{"(*ExitError).Error", Method, 0},
    -		{"(ExitError).ExitCode", Method, 12},
    -		{"(ExitError).Exited", Method, 0},
    -		{"(ExitError).Pid", Method, 0},
    -		{"(ExitError).String", Method, 0},
    -		{"(ExitError).Success", Method, 0},
    -		{"(ExitError).Sys", Method, 0},
    -		{"(ExitError).SysUsage", Method, 0},
    -		{"(ExitError).SystemTime", Method, 0},
    -		{"(ExitError).UserTime", Method, 0},
    -		{"Cmd", Type, 0},
    -		{"Cmd.Args", Field, 0},
    -		{"Cmd.Cancel", Field, 20},
    -		{"Cmd.Dir", Field, 0},
    -		{"Cmd.Env", Field, 0},
    -		{"Cmd.Err", Field, 19},
    -		{"Cmd.ExtraFiles", Field, 0},
    -		{"Cmd.Path", Field, 0},
    -		{"Cmd.Process", Field, 0},
    -		{"Cmd.ProcessState", Field, 0},
    -		{"Cmd.Stderr", Field, 0},
    -		{"Cmd.Stdin", Field, 0},
    -		{"Cmd.Stdout", Field, 0},
    -		{"Cmd.SysProcAttr", Field, 0},
    -		{"Cmd.WaitDelay", Field, 20},
    -		{"Command", Func, 0},
    -		{"CommandContext", Func, 7},
    -		{"ErrDot", Var, 19},
    -		{"ErrNotFound", Var, 0},
    -		{"ErrWaitDelay", Var, 20},
    -		{"Error", Type, 0},
    -		{"Error.Err", Field, 0},
    -		{"Error.Name", Field, 0},
    -		{"ExitError", Type, 0},
    -		{"ExitError.ProcessState", Field, 0},
    -		{"ExitError.Stderr", Field, 6},
    -		{"LookPath", Func, 0},
    +		{"(*Cmd).CombinedOutput", Method, 0, ""},
    +		{"(*Cmd).Environ", Method, 19, ""},
    +		{"(*Cmd).Output", Method, 0, ""},
    +		{"(*Cmd).Run", Method, 0, ""},
    +		{"(*Cmd).Start", Method, 0, ""},
    +		{"(*Cmd).StderrPipe", Method, 0, ""},
    +		{"(*Cmd).StdinPipe", Method, 0, ""},
    +		{"(*Cmd).StdoutPipe", Method, 0, ""},
    +		{"(*Cmd).String", Method, 13, ""},
    +		{"(*Cmd).Wait", Method, 0, ""},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Error).Unwrap", Method, 13, ""},
    +		{"(*ExitError).Error", Method, 0, ""},
    +		{"(ExitError).ExitCode", Method, 12, ""},
    +		{"(ExitError).Exited", Method, 0, ""},
    +		{"(ExitError).Pid", Method, 0, ""},
    +		{"(ExitError).String", Method, 0, ""},
    +		{"(ExitError).Success", Method, 0, ""},
    +		{"(ExitError).Sys", Method, 0, ""},
    +		{"(ExitError).SysUsage", Method, 0, ""},
    +		{"(ExitError).SystemTime", Method, 0, ""},
    +		{"(ExitError).UserTime", Method, 0, ""},
    +		{"Cmd", Type, 0, ""},
    +		{"Cmd.Args", Field, 0, ""},
    +		{"Cmd.Cancel", Field, 20, ""},
    +		{"Cmd.Dir", Field, 0, ""},
    +		{"Cmd.Env", Field, 0, ""},
    +		{"Cmd.Err", Field, 19, ""},
    +		{"Cmd.ExtraFiles", Field, 0, ""},
    +		{"Cmd.Path", Field, 0, ""},
    +		{"Cmd.Process", Field, 0, ""},
    +		{"Cmd.ProcessState", Field, 0, ""},
    +		{"Cmd.Stderr", Field, 0, ""},
    +		{"Cmd.Stdin", Field, 0, ""},
    +		{"Cmd.Stdout", Field, 0, ""},
    +		{"Cmd.SysProcAttr", Field, 0, ""},
    +		{"Cmd.WaitDelay", Field, 20, ""},
    +		{"Command", Func, 0, "func(name string, arg ...string) *Cmd"},
    +		{"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"},
    +		{"ErrDot", Var, 19, ""},
    +		{"ErrNotFound", Var, 0, ""},
    +		{"ErrWaitDelay", Var, 20, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Err", Field, 0, ""},
    +		{"Error.Name", Field, 0, ""},
    +		{"ExitError", Type, 0, ""},
    +		{"ExitError.ProcessState", Field, 0, ""},
    +		{"ExitError.Stderr", Field, 6, ""},
    +		{"LookPath", Func, 0, "func(file string) (string, error)"},
     	},
     	"os/signal": {
    -		{"Ignore", Func, 5},
    -		{"Ignored", Func, 11},
    -		{"Notify", Func, 0},
    -		{"NotifyContext", Func, 16},
    -		{"Reset", Func, 5},
    -		{"Stop", Func, 1},
    +		{"Ignore", Func, 5, "func(sig ...os.Signal)"},
    +		{"Ignored", Func, 11, "func(sig os.Signal) bool"},
    +		{"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"},
    +		{"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"},
    +		{"Reset", Func, 5, "func(sig ...os.Signal)"},
    +		{"Stop", Func, 1, "func(c chan<- os.Signal)"},
     	},
     	"os/user": {
    -		{"(*User).GroupIds", Method, 7},
    -		{"(UnknownGroupError).Error", Method, 7},
    -		{"(UnknownGroupIdError).Error", Method, 7},
    -		{"(UnknownUserError).Error", Method, 0},
    -		{"(UnknownUserIdError).Error", Method, 0},
    -		{"Current", Func, 0},
    -		{"Group", Type, 7},
    -		{"Group.Gid", Field, 7},
    -		{"Group.Name", Field, 7},
    -		{"Lookup", Func, 0},
    -		{"LookupGroup", Func, 7},
    -		{"LookupGroupId", Func, 7},
    -		{"LookupId", Func, 0},
    -		{"UnknownGroupError", Type, 7},
    -		{"UnknownGroupIdError", Type, 7},
    -		{"UnknownUserError", Type, 0},
    -		{"UnknownUserIdError", Type, 0},
    -		{"User", Type, 0},
    -		{"User.Gid", Field, 0},
    -		{"User.HomeDir", Field, 0},
    -		{"User.Name", Field, 0},
    -		{"User.Uid", Field, 0},
    -		{"User.Username", Field, 0},
    +		{"(*User).GroupIds", Method, 7, ""},
    +		{"(UnknownGroupError).Error", Method, 7, ""},
    +		{"(UnknownGroupIdError).Error", Method, 7, ""},
    +		{"(UnknownUserError).Error", Method, 0, ""},
    +		{"(UnknownUserIdError).Error", Method, 0, ""},
    +		{"Current", Func, 0, "func() (*User, error)"},
    +		{"Group", Type, 7, ""},
    +		{"Group.Gid", Field, 7, ""},
    +		{"Group.Name", Field, 7, ""},
    +		{"Lookup", Func, 0, "func(username string) (*User, error)"},
    +		{"LookupGroup", Func, 7, "func(name string) (*Group, error)"},
    +		{"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"},
    +		{"LookupId", Func, 0, "func(uid string) (*User, error)"},
    +		{"UnknownGroupError", Type, 7, ""},
    +		{"UnknownGroupIdError", Type, 7, ""},
    +		{"UnknownUserError", Type, 0, ""},
    +		{"UnknownUserIdError", Type, 0, ""},
    +		{"User", Type, 0, ""},
    +		{"User.Gid", Field, 0, ""},
    +		{"User.HomeDir", Field, 0, ""},
    +		{"User.Name", Field, 0, ""},
    +		{"User.Uid", Field, 0, ""},
    +		{"User.Username", Field, 0, ""},
     	},
     	"path": {
    -		{"Base", Func, 0},
    -		{"Clean", Func, 0},
    -		{"Dir", Func, 0},
    -		{"ErrBadPattern", Var, 0},
    -		{"Ext", Func, 0},
    -		{"IsAbs", Func, 0},
    -		{"Join", Func, 0},
    -		{"Match", Func, 0},
    -		{"Split", Func, 0},
    +		{"Base", Func, 0, "func(path string) string"},
    +		{"Clean", Func, 0, "func(path string) string"},
    +		{"Dir", Func, 0, "func(path string) string"},
    +		{"ErrBadPattern", Var, 0, ""},
    +		{"Ext", Func, 0, "func(path string) string"},
    +		{"IsAbs", Func, 0, "func(path string) bool"},
    +		{"Join", Func, 0, "func(elem ...string) string"},
    +		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
    +		{"Split", Func, 0, "func(path string) (dir string, file string)"},
     	},
     	"path/filepath": {
    -		{"Abs", Func, 0},
    -		{"Base", Func, 0},
    -		{"Clean", Func, 0},
    -		{"Dir", Func, 0},
    -		{"ErrBadPattern", Var, 0},
    -		{"EvalSymlinks", Func, 0},
    -		{"Ext", Func, 0},
    -		{"FromSlash", Func, 0},
    -		{"Glob", Func, 0},
    -		{"HasPrefix", Func, 0},
    -		{"IsAbs", Func, 0},
    -		{"IsLocal", Func, 20},
    -		{"Join", Func, 0},
    -		{"ListSeparator", Const, 0},
    -		{"Localize", Func, 23},
    -		{"Match", Func, 0},
    -		{"Rel", Func, 0},
    -		{"Separator", Const, 0},
    -		{"SkipAll", Var, 20},
    -		{"SkipDir", Var, 0},
    -		{"Split", Func, 0},
    -		{"SplitList", Func, 0},
    -		{"ToSlash", Func, 0},
    -		{"VolumeName", Func, 0},
    -		{"Walk", Func, 0},
    -		{"WalkDir", Func, 16},
    -		{"WalkFunc", Type, 0},
    +		{"Abs", Func, 0, "func(path string) (string, error)"},
    +		{"Base", Func, 0, "func(path string) string"},
    +		{"Clean", Func, 0, "func(path string) string"},
    +		{"Dir", Func, 0, "func(path string) string"},
    +		{"ErrBadPattern", Var, 0, ""},
    +		{"EvalSymlinks", Func, 0, "func(path string) (string, error)"},
    +		{"Ext", Func, 0, "func(path string) string"},
    +		{"FromSlash", Func, 0, "func(path string) string"},
    +		{"Glob", Func, 0, "func(pattern string) (matches []string, err error)"},
    +		{"HasPrefix", Func, 0, "func(p string, prefix string) bool"},
    +		{"IsAbs", Func, 0, "func(path string) bool"},
    +		{"IsLocal", Func, 20, "func(path string) bool"},
    +		{"Join", Func, 0, "func(elem ...string) string"},
    +		{"ListSeparator", Const, 0, ""},
    +		{"Localize", Func, 23, "func(path string) (string, error)"},
    +		{"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
    +		{"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"},
    +		{"Separator", Const, 0, ""},
    +		{"SkipAll", Var, 20, ""},
    +		{"SkipDir", Var, 0, ""},
    +		{"Split", Func, 0, "func(path string) (dir string, file string)"},
    +		{"SplitList", Func, 0, "func(path string) []string"},
    +		{"ToSlash", Func, 0, "func(path string) string"},
    +		{"VolumeName", Func, 0, "func(path string) string"},
    +		{"Walk", Func, 0, "func(root string, fn WalkFunc) error"},
    +		{"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"},
    +		{"WalkFunc", Type, 0, ""},
     	},
     	"plugin": {
    -		{"(*Plugin).Lookup", Method, 8},
    -		{"Open", Func, 8},
    -		{"Plugin", Type, 8},
    -		{"Symbol", Type, 8},
    +		{"(*Plugin).Lookup", Method, 8, ""},
    +		{"Open", Func, 8, "func(path string) (*Plugin, error)"},
    +		{"Plugin", Type, 8, ""},
    +		{"Symbol", Type, 8, ""},
     	},
     	"reflect": {
    -		{"(*MapIter).Key", Method, 12},
    -		{"(*MapIter).Next", Method, 12},
    -		{"(*MapIter).Reset", Method, 18},
    -		{"(*MapIter).Value", Method, 12},
    -		{"(*ValueError).Error", Method, 0},
    -		{"(ChanDir).String", Method, 0},
    -		{"(Kind).String", Method, 0},
    -		{"(Method).IsExported", Method, 17},
    -		{"(StructField).IsExported", Method, 17},
    -		{"(StructTag).Get", Method, 0},
    -		{"(StructTag).Lookup", Method, 7},
    -		{"(Value).Addr", Method, 0},
    -		{"(Value).Bool", Method, 0},
    -		{"(Value).Bytes", Method, 0},
    -		{"(Value).Call", Method, 0},
    -		{"(Value).CallSlice", Method, 0},
    -		{"(Value).CanAddr", Method, 0},
    -		{"(Value).CanComplex", Method, 18},
    -		{"(Value).CanConvert", Method, 17},
    -		{"(Value).CanFloat", Method, 18},
    -		{"(Value).CanInt", Method, 18},
    -		{"(Value).CanInterface", Method, 0},
    -		{"(Value).CanSet", Method, 0},
    -		{"(Value).CanUint", Method, 18},
    -		{"(Value).Cap", Method, 0},
    -		{"(Value).Clear", Method, 21},
    -		{"(Value).Close", Method, 0},
    -		{"(Value).Comparable", Method, 20},
    -		{"(Value).Complex", Method, 0},
    -		{"(Value).Convert", Method, 1},
    -		{"(Value).Elem", Method, 0},
    -		{"(Value).Equal", Method, 20},
    -		{"(Value).Field", Method, 0},
    -		{"(Value).FieldByIndex", Method, 0},
    -		{"(Value).FieldByIndexErr", Method, 18},
    -		{"(Value).FieldByName", Method, 0},
    -		{"(Value).FieldByNameFunc", Method, 0},
    -		{"(Value).Float", Method, 0},
    -		{"(Value).Grow", Method, 20},
    -		{"(Value).Index", Method, 0},
    -		{"(Value).Int", Method, 0},
    -		{"(Value).Interface", Method, 0},
    -		{"(Value).InterfaceData", Method, 0},
    -		{"(Value).IsNil", Method, 0},
    -		{"(Value).IsValid", Method, 0},
    -		{"(Value).IsZero", Method, 13},
    -		{"(Value).Kind", Method, 0},
    -		{"(Value).Len", Method, 0},
    -		{"(Value).MapIndex", Method, 0},
    -		{"(Value).MapKeys", Method, 0},
    -		{"(Value).MapRange", Method, 12},
    -		{"(Value).Method", Method, 0},
    -		{"(Value).MethodByName", Method, 0},
    -		{"(Value).NumField", Method, 0},
    -		{"(Value).NumMethod", Method, 0},
    -		{"(Value).OverflowComplex", Method, 0},
    -		{"(Value).OverflowFloat", Method, 0},
    -		{"(Value).OverflowInt", Method, 0},
    -		{"(Value).OverflowUint", Method, 0},
    -		{"(Value).Pointer", Method, 0},
    -		{"(Value).Recv", Method, 0},
    -		{"(Value).Send", Method, 0},
    -		{"(Value).Seq", Method, 23},
    -		{"(Value).Seq2", Method, 23},
    -		{"(Value).Set", Method, 0},
    -		{"(Value).SetBool", Method, 0},
    -		{"(Value).SetBytes", Method, 0},
    -		{"(Value).SetCap", Method, 2},
    -		{"(Value).SetComplex", Method, 0},
    -		{"(Value).SetFloat", Method, 0},
    -		{"(Value).SetInt", Method, 0},
    -		{"(Value).SetIterKey", Method, 18},
    -		{"(Value).SetIterValue", Method, 18},
    -		{"(Value).SetLen", Method, 0},
    -		{"(Value).SetMapIndex", Method, 0},
    -		{"(Value).SetPointer", Method, 0},
    -		{"(Value).SetString", Method, 0},
    -		{"(Value).SetUint", Method, 0},
    -		{"(Value).SetZero", Method, 20},
    -		{"(Value).Slice", Method, 0},
    -		{"(Value).Slice3", Method, 2},
    -		{"(Value).String", Method, 0},
    -		{"(Value).TryRecv", Method, 0},
    -		{"(Value).TrySend", Method, 0},
    -		{"(Value).Type", Method, 0},
    -		{"(Value).Uint", Method, 0},
    -		{"(Value).UnsafeAddr", Method, 0},
    -		{"(Value).UnsafePointer", Method, 18},
    -		{"Append", Func, 0},
    -		{"AppendSlice", Func, 0},
    -		{"Array", Const, 0},
    -		{"ArrayOf", Func, 5},
    -		{"Bool", Const, 0},
    -		{"BothDir", Const, 0},
    -		{"Chan", Const, 0},
    -		{"ChanDir", Type, 0},
    -		{"ChanOf", Func, 1},
    -		{"Complex128", Const, 0},
    -		{"Complex64", Const, 0},
    -		{"Copy", Func, 0},
    -		{"DeepEqual", Func, 0},
    -		{"Float32", Const, 0},
    -		{"Float64", Const, 0},
    -		{"Func", Const, 0},
    -		{"FuncOf", Func, 5},
    -		{"Indirect", Func, 0},
    -		{"Int", Const, 0},
    -		{"Int16", Const, 0},
    -		{"Int32", Const, 0},
    -		{"Int64", Const, 0},
    -		{"Int8", Const, 0},
    -		{"Interface", Const, 0},
    -		{"Invalid", Const, 0},
    -		{"Kind", Type, 0},
    -		{"MakeChan", Func, 0},
    -		{"MakeFunc", Func, 1},
    -		{"MakeMap", Func, 0},
    -		{"MakeMapWithSize", Func, 9},
    -		{"MakeSlice", Func, 0},
    -		{"Map", Const, 0},
    -		{"MapIter", Type, 12},
    -		{"MapOf", Func, 1},
    -		{"Method", Type, 0},
    -		{"Method.Func", Field, 0},
    -		{"Method.Index", Field, 0},
    -		{"Method.Name", Field, 0},
    -		{"Method.PkgPath", Field, 0},
    -		{"Method.Type", Field, 0},
    -		{"New", Func, 0},
    -		{"NewAt", Func, 0},
    -		{"Pointer", Const, 18},
    -		{"PointerTo", Func, 18},
    -		{"Ptr", Const, 0},
    -		{"PtrTo", Func, 0},
    -		{"RecvDir", Const, 0},
    -		{"Select", Func, 1},
    -		{"SelectCase", Type, 1},
    -		{"SelectCase.Chan", Field, 1},
    -		{"SelectCase.Dir", Field, 1},
    -		{"SelectCase.Send", Field, 1},
    -		{"SelectDefault", Const, 1},
    -		{"SelectDir", Type, 1},
    -		{"SelectRecv", Const, 1},
    -		{"SelectSend", Const, 1},
    -		{"SendDir", Const, 0},
    -		{"Slice", Const, 0},
    -		{"SliceAt", Func, 23},
    -		{"SliceHeader", Type, 0},
    -		{"SliceHeader.Cap", Field, 0},
    -		{"SliceHeader.Data", Field, 0},
    -		{"SliceHeader.Len", Field, 0},
    -		{"SliceOf", Func, 1},
    -		{"String", Const, 0},
    -		{"StringHeader", Type, 0},
    -		{"StringHeader.Data", Field, 0},
    -		{"StringHeader.Len", Field, 0},
    -		{"Struct", Const, 0},
    -		{"StructField", Type, 0},
    -		{"StructField.Anonymous", Field, 0},
    -		{"StructField.Index", Field, 0},
    -		{"StructField.Name", Field, 0},
    -		{"StructField.Offset", Field, 0},
    -		{"StructField.PkgPath", Field, 0},
    -		{"StructField.Tag", Field, 0},
    -		{"StructField.Type", Field, 0},
    -		{"StructOf", Func, 7},
    -		{"StructTag", Type, 0},
    -		{"Swapper", Func, 8},
    -		{"Type", Type, 0},
    -		{"TypeFor", Func, 22},
    -		{"TypeOf", Func, 0},
    -		{"Uint", Const, 0},
    -		{"Uint16", Const, 0},
    -		{"Uint32", Const, 0},
    -		{"Uint64", Const, 0},
    -		{"Uint8", Const, 0},
    -		{"Uintptr", Const, 0},
    -		{"UnsafePointer", Const, 0},
    -		{"Value", Type, 0},
    -		{"ValueError", Type, 0},
    -		{"ValueError.Kind", Field, 0},
    -		{"ValueError.Method", Field, 0},
    -		{"ValueOf", Func, 0},
    -		{"VisibleFields", Func, 17},
    -		{"Zero", Func, 0},
    +		{"(*MapIter).Key", Method, 12, ""},
    +		{"(*MapIter).Next", Method, 12, ""},
    +		{"(*MapIter).Reset", Method, 18, ""},
    +		{"(*MapIter).Value", Method, 12, ""},
    +		{"(*ValueError).Error", Method, 0, ""},
    +		{"(ChanDir).String", Method, 0, ""},
    +		{"(Kind).String", Method, 0, ""},
    +		{"(Method).IsExported", Method, 17, ""},
    +		{"(StructField).IsExported", Method, 17, ""},
    +		{"(StructTag).Get", Method, 0, ""},
    +		{"(StructTag).Lookup", Method, 7, ""},
    +		{"(Value).Addr", Method, 0, ""},
    +		{"(Value).Bool", Method, 0, ""},
    +		{"(Value).Bytes", Method, 0, ""},
    +		{"(Value).Call", Method, 0, ""},
    +		{"(Value).CallSlice", Method, 0, ""},
    +		{"(Value).CanAddr", Method, 0, ""},
    +		{"(Value).CanComplex", Method, 18, ""},
    +		{"(Value).CanConvert", Method, 17, ""},
    +		{"(Value).CanFloat", Method, 18, ""},
    +		{"(Value).CanInt", Method, 18, ""},
    +		{"(Value).CanInterface", Method, 0, ""},
    +		{"(Value).CanSet", Method, 0, ""},
    +		{"(Value).CanUint", Method, 18, ""},
    +		{"(Value).Cap", Method, 0, ""},
    +		{"(Value).Clear", Method, 21, ""},
    +		{"(Value).Close", Method, 0, ""},
    +		{"(Value).Comparable", Method, 20, ""},
    +		{"(Value).Complex", Method, 0, ""},
    +		{"(Value).Convert", Method, 1, ""},
    +		{"(Value).Elem", Method, 0, ""},
    +		{"(Value).Equal", Method, 20, ""},
    +		{"(Value).Field", Method, 0, ""},
    +		{"(Value).FieldByIndex", Method, 0, ""},
    +		{"(Value).FieldByIndexErr", Method, 18, ""},
    +		{"(Value).FieldByName", Method, 0, ""},
    +		{"(Value).FieldByNameFunc", Method, 0, ""},
    +		{"(Value).Float", Method, 0, ""},
    +		{"(Value).Grow", Method, 20, ""},
    +		{"(Value).Index", Method, 0, ""},
    +		{"(Value).Int", Method, 0, ""},
    +		{"(Value).Interface", Method, 0, ""},
    +		{"(Value).InterfaceData", Method, 0, ""},
    +		{"(Value).IsNil", Method, 0, ""},
    +		{"(Value).IsValid", Method, 0, ""},
    +		{"(Value).IsZero", Method, 13, ""},
    +		{"(Value).Kind", Method, 0, ""},
    +		{"(Value).Len", Method, 0, ""},
    +		{"(Value).MapIndex", Method, 0, ""},
    +		{"(Value).MapKeys", Method, 0, ""},
    +		{"(Value).MapRange", Method, 12, ""},
    +		{"(Value).Method", Method, 0, ""},
    +		{"(Value).MethodByName", Method, 0, ""},
    +		{"(Value).NumField", Method, 0, ""},
    +		{"(Value).NumMethod", Method, 0, ""},
    +		{"(Value).OverflowComplex", Method, 0, ""},
    +		{"(Value).OverflowFloat", Method, 0, ""},
    +		{"(Value).OverflowInt", Method, 0, ""},
    +		{"(Value).OverflowUint", Method, 0, ""},
    +		{"(Value).Pointer", Method, 0, ""},
    +		{"(Value).Recv", Method, 0, ""},
    +		{"(Value).Send", Method, 0, ""},
    +		{"(Value).Seq", Method, 23, ""},
    +		{"(Value).Seq2", Method, 23, ""},
    +		{"(Value).Set", Method, 0, ""},
    +		{"(Value).SetBool", Method, 0, ""},
    +		{"(Value).SetBytes", Method, 0, ""},
    +		{"(Value).SetCap", Method, 2, ""},
    +		{"(Value).SetComplex", Method, 0, ""},
    +		{"(Value).SetFloat", Method, 0, ""},
    +		{"(Value).SetInt", Method, 0, ""},
    +		{"(Value).SetIterKey", Method, 18, ""},
    +		{"(Value).SetIterValue", Method, 18, ""},
    +		{"(Value).SetLen", Method, 0, ""},
    +		{"(Value).SetMapIndex", Method, 0, ""},
    +		{"(Value).SetPointer", Method, 0, ""},
    +		{"(Value).SetString", Method, 0, ""},
    +		{"(Value).SetUint", Method, 0, ""},
    +		{"(Value).SetZero", Method, 20, ""},
    +		{"(Value).Slice", Method, 0, ""},
    +		{"(Value).Slice3", Method, 2, ""},
    +		{"(Value).String", Method, 0, ""},
    +		{"(Value).TryRecv", Method, 0, ""},
    +		{"(Value).TrySend", Method, 0, ""},
    +		{"(Value).Type", Method, 0, ""},
    +		{"(Value).Uint", Method, 0, ""},
    +		{"(Value).UnsafeAddr", Method, 0, ""},
    +		{"(Value).UnsafePointer", Method, 18, ""},
    +		{"Append", Func, 0, "func(s Value, x ...Value) Value"},
    +		{"AppendSlice", Func, 0, "func(s Value, t Value) Value"},
    +		{"Array", Const, 0, ""},
    +		{"ArrayOf", Func, 5, "func(length int, elem Type) Type"},
    +		{"Bool", Const, 0, ""},
    +		{"BothDir", Const, 0, ""},
    +		{"Chan", Const, 0, ""},
    +		{"ChanDir", Type, 0, ""},
    +		{"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"},
    +		{"Complex128", Const, 0, ""},
    +		{"Complex64", Const, 0, ""},
    +		{"Copy", Func, 0, "func(dst Value, src Value) int"},
    +		{"DeepEqual", Func, 0, "func(x any, y any) bool"},
    +		{"Float32", Const, 0, ""},
    +		{"Float64", Const, 0, ""},
    +		{"Func", Const, 0, ""},
    +		{"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"},
    +		{"Indirect", Func, 0, "func(v Value) Value"},
    +		{"Int", Const, 0, ""},
    +		{"Int16", Const, 0, ""},
    +		{"Int32", Const, 0, ""},
    +		{"Int64", Const, 0, ""},
    +		{"Int8", Const, 0, ""},
    +		{"Interface", Const, 0, ""},
    +		{"Invalid", Const, 0, ""},
    +		{"Kind", Type, 0, ""},
    +		{"MakeChan", Func, 0, "func(typ Type, buffer int) Value"},
    +		{"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"},
    +		{"MakeMap", Func, 0, "func(typ Type) Value"},
    +		{"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"},
    +		{"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"},
    +		{"Map", Const, 0, ""},
    +		{"MapIter", Type, 12, ""},
    +		{"MapOf", Func, 1, "func(key Type, elem Type) Type"},
    +		{"Method", Type, 0, ""},
    +		{"Method.Func", Field, 0, ""},
    +		{"Method.Index", Field, 0, ""},
    +		{"Method.Name", Field, 0, ""},
    +		{"Method.PkgPath", Field, 0, ""},
    +		{"Method.Type", Field, 0, ""},
    +		{"New", Func, 0, "func(typ Type) Value"},
    +		{"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"},
    +		{"Pointer", Const, 18, ""},
    +		{"PointerTo", Func, 18, "func(t Type) Type"},
    +		{"Ptr", Const, 0, ""},
    +		{"PtrTo", Func, 0, "func(t Type) Type"},
    +		{"RecvDir", Const, 0, ""},
    +		{"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"},
    +		{"SelectCase", Type, 1, ""},
    +		{"SelectCase.Chan", Field, 1, ""},
    +		{"SelectCase.Dir", Field, 1, ""},
    +		{"SelectCase.Send", Field, 1, ""},
    +		{"SelectDefault", Const, 1, ""},
    +		{"SelectDir", Type, 1, ""},
    +		{"SelectRecv", Const, 1, ""},
    +		{"SelectSend", Const, 1, ""},
    +		{"SendDir", Const, 0, ""},
    +		{"Slice", Const, 0, ""},
    +		{"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"},
    +		{"SliceHeader", Type, 0, ""},
    +		{"SliceHeader.Cap", Field, 0, ""},
    +		{"SliceHeader.Data", Field, 0, ""},
    +		{"SliceHeader.Len", Field, 0, ""},
    +		{"SliceOf", Func, 1, "func(t Type) Type"},
    +		{"String", Const, 0, ""},
    +		{"StringHeader", Type, 0, ""},
    +		{"StringHeader.Data", Field, 0, ""},
    +		{"StringHeader.Len", Field, 0, ""},
    +		{"Struct", Const, 0, ""},
    +		{"StructField", Type, 0, ""},
    +		{"StructField.Anonymous", Field, 0, ""},
    +		{"StructField.Index", Field, 0, ""},
    +		{"StructField.Name", Field, 0, ""},
    +		{"StructField.Offset", Field, 0, ""},
    +		{"StructField.PkgPath", Field, 0, ""},
    +		{"StructField.Tag", Field, 0, ""},
    +		{"StructField.Type", Field, 0, ""},
    +		{"StructOf", Func, 7, "func(fields []StructField) Type"},
    +		{"StructTag", Type, 0, ""},
    +		{"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
    +		{"Type", Type, 0, ""},
    +		{"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
    +		{"TypeFor", Func, 22, "func[T any]() Type"},
    +		{"TypeOf", Func, 0, "func(i any) Type"},
    +		{"Uint", Const, 0, ""},
    +		{"Uint16", Const, 0, ""},
    +		{"Uint32", Const, 0, ""},
    +		{"Uint64", Const, 0, ""},
    +		{"Uint8", Const, 0, ""},
    +		{"Uintptr", Const, 0, ""},
    +		{"UnsafePointer", Const, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueError", Type, 0, ""},
    +		{"ValueError.Kind", Field, 0, ""},
    +		{"ValueError.Method", Field, 0, ""},
    +		{"ValueOf", Func, 0, "func(i any) Value"},
    +		{"VisibleFields", Func, 17, "func(t Type) []StructField"},
    +		{"Zero", Func, 0, "func(typ Type) Value"},
     	},
     	"regexp": {
    -		{"(*Regexp).AppendText", Method, 24},
    -		{"(*Regexp).Copy", Method, 6},
    -		{"(*Regexp).Expand", Method, 0},
    -		{"(*Regexp).ExpandString", Method, 0},
    -		{"(*Regexp).Find", Method, 0},
    -		{"(*Regexp).FindAll", Method, 0},
    -		{"(*Regexp).FindAllIndex", Method, 0},
    -		{"(*Regexp).FindAllString", Method, 0},
    -		{"(*Regexp).FindAllStringIndex", Method, 0},
    -		{"(*Regexp).FindAllStringSubmatch", Method, 0},
    -		{"(*Regexp).FindAllStringSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindAllSubmatch", Method, 0},
    -		{"(*Regexp).FindAllSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindIndex", Method, 0},
    -		{"(*Regexp).FindReaderIndex", Method, 0},
    -		{"(*Regexp).FindReaderSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindString", Method, 0},
    -		{"(*Regexp).FindStringIndex", Method, 0},
    -		{"(*Regexp).FindStringSubmatch", Method, 0},
    -		{"(*Regexp).FindStringSubmatchIndex", Method, 0},
    -		{"(*Regexp).FindSubmatch", Method, 0},
    -		{"(*Regexp).FindSubmatchIndex", Method, 0},
    -		{"(*Regexp).LiteralPrefix", Method, 0},
    -		{"(*Regexp).Longest", Method, 1},
    -		{"(*Regexp).MarshalText", Method, 21},
    -		{"(*Regexp).Match", Method, 0},
    -		{"(*Regexp).MatchReader", Method, 0},
    -		{"(*Regexp).MatchString", Method, 0},
    -		{"(*Regexp).NumSubexp", Method, 0},
    -		{"(*Regexp).ReplaceAll", Method, 0},
    -		{"(*Regexp).ReplaceAllFunc", Method, 0},
    -		{"(*Regexp).ReplaceAllLiteral", Method, 0},
    -		{"(*Regexp).ReplaceAllLiteralString", Method, 0},
    -		{"(*Regexp).ReplaceAllString", Method, 0},
    -		{"(*Regexp).ReplaceAllStringFunc", Method, 0},
    -		{"(*Regexp).Split", Method, 1},
    -		{"(*Regexp).String", Method, 0},
    -		{"(*Regexp).SubexpIndex", Method, 15},
    -		{"(*Regexp).SubexpNames", Method, 0},
    -		{"(*Regexp).UnmarshalText", Method, 21},
    -		{"Compile", Func, 0},
    -		{"CompilePOSIX", Func, 0},
    -		{"Match", Func, 0},
    -		{"MatchReader", Func, 0},
    -		{"MatchString", Func, 0},
    -		{"MustCompile", Func, 0},
    -		{"MustCompilePOSIX", Func, 0},
    -		{"QuoteMeta", Func, 0},
    -		{"Regexp", Type, 0},
    +		{"(*Regexp).AppendText", Method, 24, ""},
    +		{"(*Regexp).Copy", Method, 6, ""},
    +		{"(*Regexp).Expand", Method, 0, ""},
    +		{"(*Regexp).ExpandString", Method, 0, ""},
    +		{"(*Regexp).Find", Method, 0, ""},
    +		{"(*Regexp).FindAll", Method, 0, ""},
    +		{"(*Regexp).FindAllIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllString", Method, 0, ""},
    +		{"(*Regexp).FindAllStringIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllStringSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindAllSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindAllSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindIndex", Method, 0, ""},
    +		{"(*Regexp).FindReaderIndex", Method, 0, ""},
    +		{"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindString", Method, 0, ""},
    +		{"(*Regexp).FindStringIndex", Method, 0, ""},
    +		{"(*Regexp).FindStringSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindStringSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).FindSubmatch", Method, 0, ""},
    +		{"(*Regexp).FindSubmatchIndex", Method, 0, ""},
    +		{"(*Regexp).LiteralPrefix", Method, 0, ""},
    +		{"(*Regexp).Longest", Method, 1, ""},
    +		{"(*Regexp).MarshalText", Method, 21, ""},
    +		{"(*Regexp).Match", Method, 0, ""},
    +		{"(*Regexp).MatchReader", Method, 0, ""},
    +		{"(*Regexp).MatchString", Method, 0, ""},
    +		{"(*Regexp).NumSubexp", Method, 0, ""},
    +		{"(*Regexp).ReplaceAll", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllFunc", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllLiteral", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllLiteralString", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllString", Method, 0, ""},
    +		{"(*Regexp).ReplaceAllStringFunc", Method, 0, ""},
    +		{"(*Regexp).Split", Method, 1, ""},
    +		{"(*Regexp).String", Method, 0, ""},
    +		{"(*Regexp).SubexpIndex", Method, 15, ""},
    +		{"(*Regexp).SubexpNames", Method, 0, ""},
    +		{"(*Regexp).UnmarshalText", Method, 21, ""},
    +		{"Compile", Func, 0, "func(expr string) (*Regexp, error)"},
    +		{"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"},
    +		{"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"},
    +		{"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"},
    +		{"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"},
    +		{"MustCompile", Func, 0, "func(str string) *Regexp"},
    +		{"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"},
    +		{"QuoteMeta", Func, 0, "func(s string) string"},
    +		{"Regexp", Type, 0, ""},
     	},
     	"regexp/syntax": {
    -		{"(*Error).Error", Method, 0},
    -		{"(*Inst).MatchEmptyWidth", Method, 0},
    -		{"(*Inst).MatchRune", Method, 0},
    -		{"(*Inst).MatchRunePos", Method, 3},
    -		{"(*Inst).String", Method, 0},
    -		{"(*Prog).Prefix", Method, 0},
    -		{"(*Prog).StartCond", Method, 0},
    -		{"(*Prog).String", Method, 0},
    -		{"(*Regexp).CapNames", Method, 0},
    -		{"(*Regexp).Equal", Method, 0},
    -		{"(*Regexp).MaxCap", Method, 0},
    -		{"(*Regexp).Simplify", Method, 0},
    -		{"(*Regexp).String", Method, 0},
    -		{"(ErrorCode).String", Method, 0},
    -		{"(InstOp).String", Method, 3},
    -		{"(Op).String", Method, 11},
    -		{"ClassNL", Const, 0},
    -		{"Compile", Func, 0},
    -		{"DotNL", Const, 0},
    -		{"EmptyBeginLine", Const, 0},
    -		{"EmptyBeginText", Const, 0},
    -		{"EmptyEndLine", Const, 0},
    -		{"EmptyEndText", Const, 0},
    -		{"EmptyNoWordBoundary", Const, 0},
    -		{"EmptyOp", Type, 0},
    -		{"EmptyOpContext", Func, 0},
    -		{"EmptyWordBoundary", Const, 0},
    -		{"ErrInternalError", Const, 0},
    -		{"ErrInvalidCharClass", Const, 0},
    -		{"ErrInvalidCharRange", Const, 0},
    -		{"ErrInvalidEscape", Const, 0},
    -		{"ErrInvalidNamedCapture", Const, 0},
    -		{"ErrInvalidPerlOp", Const, 0},
    -		{"ErrInvalidRepeatOp", Const, 0},
    -		{"ErrInvalidRepeatSize", Const, 0},
    -		{"ErrInvalidUTF8", Const, 0},
    -		{"ErrLarge", Const, 20},
    -		{"ErrMissingBracket", Const, 0},
    -		{"ErrMissingParen", Const, 0},
    -		{"ErrMissingRepeatArgument", Const, 0},
    -		{"ErrNestingDepth", Const, 19},
    -		{"ErrTrailingBackslash", Const, 0},
    -		{"ErrUnexpectedParen", Const, 1},
    -		{"Error", Type, 0},
    -		{"Error.Code", Field, 0},
    -		{"Error.Expr", Field, 0},
    -		{"ErrorCode", Type, 0},
    -		{"Flags", Type, 0},
    -		{"FoldCase", Const, 0},
    -		{"Inst", Type, 0},
    -		{"Inst.Arg", Field, 0},
    -		{"Inst.Op", Field, 0},
    -		{"Inst.Out", Field, 0},
    -		{"Inst.Rune", Field, 0},
    -		{"InstAlt", Const, 0},
    -		{"InstAltMatch", Const, 0},
    -		{"InstCapture", Const, 0},
    -		{"InstEmptyWidth", Const, 0},
    -		{"InstFail", Const, 0},
    -		{"InstMatch", Const, 0},
    -		{"InstNop", Const, 0},
    -		{"InstOp", Type, 0},
    -		{"InstRune", Const, 0},
    -		{"InstRune1", Const, 0},
    -		{"InstRuneAny", Const, 0},
    -		{"InstRuneAnyNotNL", Const, 0},
    -		{"IsWordChar", Func, 0},
    -		{"Literal", Const, 0},
    -		{"MatchNL", Const, 0},
    -		{"NonGreedy", Const, 0},
    -		{"OneLine", Const, 0},
    -		{"Op", Type, 0},
    -		{"OpAlternate", Const, 0},
    -		{"OpAnyChar", Const, 0},
    -		{"OpAnyCharNotNL", Const, 0},
    -		{"OpBeginLine", Const, 0},
    -		{"OpBeginText", Const, 0},
    -		{"OpCapture", Const, 0},
    -		{"OpCharClass", Const, 0},
    -		{"OpConcat", Const, 0},
    -		{"OpEmptyMatch", Const, 0},
    -		{"OpEndLine", Const, 0},
    -		{"OpEndText", Const, 0},
    -		{"OpLiteral", Const, 0},
    -		{"OpNoMatch", Const, 0},
    -		{"OpNoWordBoundary", Const, 0},
    -		{"OpPlus", Const, 0},
    -		{"OpQuest", Const, 0},
    -		{"OpRepeat", Const, 0},
    -		{"OpStar", Const, 0},
    -		{"OpWordBoundary", Const, 0},
    -		{"POSIX", Const, 0},
    -		{"Parse", Func, 0},
    -		{"Perl", Const, 0},
    -		{"PerlX", Const, 0},
    -		{"Prog", Type, 0},
    -		{"Prog.Inst", Field, 0},
    -		{"Prog.NumCap", Field, 0},
    -		{"Prog.Start", Field, 0},
    -		{"Regexp", Type, 0},
    -		{"Regexp.Cap", Field, 0},
    -		{"Regexp.Flags", Field, 0},
    -		{"Regexp.Max", Field, 0},
    -		{"Regexp.Min", Field, 0},
    -		{"Regexp.Name", Field, 0},
    -		{"Regexp.Op", Field, 0},
    -		{"Regexp.Rune", Field, 0},
    -		{"Regexp.Rune0", Field, 0},
    -		{"Regexp.Sub", Field, 0},
    -		{"Regexp.Sub0", Field, 0},
    -		{"Simple", Const, 0},
    -		{"UnicodeGroups", Const, 0},
    -		{"WasDollar", Const, 0},
    +		{"(*Error).Error", Method, 0, ""},
    +		{"(*Inst).MatchEmptyWidth", Method, 0, ""},
    +		{"(*Inst).MatchRune", Method, 0, ""},
    +		{"(*Inst).MatchRunePos", Method, 3, ""},
    +		{"(*Inst).String", Method, 0, ""},
    +		{"(*Prog).Prefix", Method, 0, ""},
    +		{"(*Prog).StartCond", Method, 0, ""},
    +		{"(*Prog).String", Method, 0, ""},
    +		{"(*Regexp).CapNames", Method, 0, ""},
    +		{"(*Regexp).Equal", Method, 0, ""},
    +		{"(*Regexp).MaxCap", Method, 0, ""},
    +		{"(*Regexp).Simplify", Method, 0, ""},
    +		{"(*Regexp).String", Method, 0, ""},
    +		{"(ErrorCode).String", Method, 0, ""},
    +		{"(InstOp).String", Method, 3, ""},
    +		{"(Op).String", Method, 11, ""},
    +		{"ClassNL", Const, 0, ""},
    +		{"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"},
    +		{"DotNL", Const, 0, ""},
    +		{"EmptyBeginLine", Const, 0, ""},
    +		{"EmptyBeginText", Const, 0, ""},
    +		{"EmptyEndLine", Const, 0, ""},
    +		{"EmptyEndText", Const, 0, ""},
    +		{"EmptyNoWordBoundary", Const, 0, ""},
    +		{"EmptyOp", Type, 0, ""},
    +		{"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"},
    +		{"EmptyWordBoundary", Const, 0, ""},
    +		{"ErrInternalError", Const, 0, ""},
    +		{"ErrInvalidCharClass", Const, 0, ""},
    +		{"ErrInvalidCharRange", Const, 0, ""},
    +		{"ErrInvalidEscape", Const, 0, ""},
    +		{"ErrInvalidNamedCapture", Const, 0, ""},
    +		{"ErrInvalidPerlOp", Const, 0, ""},
    +		{"ErrInvalidRepeatOp", Const, 0, ""},
    +		{"ErrInvalidRepeatSize", Const, 0, ""},
    +		{"ErrInvalidUTF8", Const, 0, ""},
    +		{"ErrLarge", Const, 20, ""},
    +		{"ErrMissingBracket", Const, 0, ""},
    +		{"ErrMissingParen", Const, 0, ""},
    +		{"ErrMissingRepeatArgument", Const, 0, ""},
    +		{"ErrNestingDepth", Const, 19, ""},
    +		{"ErrTrailingBackslash", Const, 0, ""},
    +		{"ErrUnexpectedParen", Const, 1, ""},
    +		{"Error", Type, 0, ""},
    +		{"Error.Code", Field, 0, ""},
    +		{"Error.Expr", Field, 0, ""},
    +		{"ErrorCode", Type, 0, ""},
    +		{"Flags", Type, 0, ""},
    +		{"FoldCase", Const, 0, ""},
    +		{"Inst", Type, 0, ""},
    +		{"Inst.Arg", Field, 0, ""},
    +		{"Inst.Op", Field, 0, ""},
    +		{"Inst.Out", Field, 0, ""},
    +		{"Inst.Rune", Field, 0, ""},
    +		{"InstAlt", Const, 0, ""},
    +		{"InstAltMatch", Const, 0, ""},
    +		{"InstCapture", Const, 0, ""},
    +		{"InstEmptyWidth", Const, 0, ""},
    +		{"InstFail", Const, 0, ""},
    +		{"InstMatch", Const, 0, ""},
    +		{"InstNop", Const, 0, ""},
    +		{"InstOp", Type, 0, ""},
    +		{"InstRune", Const, 0, ""},
    +		{"InstRune1", Const, 0, ""},
    +		{"InstRuneAny", Const, 0, ""},
    +		{"InstRuneAnyNotNL", Const, 0, ""},
    +		{"IsWordChar", Func, 0, "func(r rune) bool"},
    +		{"Literal", Const, 0, ""},
    +		{"MatchNL", Const, 0, ""},
    +		{"NonGreedy", Const, 0, ""},
    +		{"OneLine", Const, 0, ""},
    +		{"Op", Type, 0, ""},
    +		{"OpAlternate", Const, 0, ""},
    +		{"OpAnyChar", Const, 0, ""},
    +		{"OpAnyCharNotNL", Const, 0, ""},
    +		{"OpBeginLine", Const, 0, ""},
    +		{"OpBeginText", Const, 0, ""},
    +		{"OpCapture", Const, 0, ""},
    +		{"OpCharClass", Const, 0, ""},
    +		{"OpConcat", Const, 0, ""},
    +		{"OpEmptyMatch", Const, 0, ""},
    +		{"OpEndLine", Const, 0, ""},
    +		{"OpEndText", Const, 0, ""},
    +		{"OpLiteral", Const, 0, ""},
    +		{"OpNoMatch", Const, 0, ""},
    +		{"OpNoWordBoundary", Const, 0, ""},
    +		{"OpPlus", Const, 0, ""},
    +		{"OpQuest", Const, 0, ""},
    +		{"OpRepeat", Const, 0, ""},
    +		{"OpStar", Const, 0, ""},
    +		{"OpWordBoundary", Const, 0, ""},
    +		{"POSIX", Const, 0, ""},
    +		{"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"},
    +		{"Perl", Const, 0, ""},
    +		{"PerlX", Const, 0, ""},
    +		{"Prog", Type, 0, ""},
    +		{"Prog.Inst", Field, 0, ""},
    +		{"Prog.NumCap", Field, 0, ""},
    +		{"Prog.Start", Field, 0, ""},
    +		{"Regexp", Type, 0, ""},
    +		{"Regexp.Cap", Field, 0, ""},
    +		{"Regexp.Flags", Field, 0, ""},
    +		{"Regexp.Max", Field, 0, ""},
    +		{"Regexp.Min", Field, 0, ""},
    +		{"Regexp.Name", Field, 0, ""},
    +		{"Regexp.Op", Field, 0, ""},
    +		{"Regexp.Rune", Field, 0, ""},
    +		{"Regexp.Rune0", Field, 0, ""},
    +		{"Regexp.Sub", Field, 0, ""},
    +		{"Regexp.Sub0", Field, 0, ""},
    +		{"Simple", Const, 0, ""},
    +		{"UnicodeGroups", Const, 0, ""},
    +		{"WasDollar", Const, 0, ""},
     	},
     	"runtime": {
    -		{"(*BlockProfileRecord).Stack", Method, 1},
    -		{"(*Frames).Next", Method, 7},
    -		{"(*Func).Entry", Method, 0},
    -		{"(*Func).FileLine", Method, 0},
    -		{"(*Func).Name", Method, 0},
    -		{"(*MemProfileRecord).InUseBytes", Method, 0},
    -		{"(*MemProfileRecord).InUseObjects", Method, 0},
    -		{"(*MemProfileRecord).Stack", Method, 0},
    -		{"(*PanicNilError).Error", Method, 21},
    -		{"(*PanicNilError).RuntimeError", Method, 21},
    -		{"(*Pinner).Pin", Method, 21},
    -		{"(*Pinner).Unpin", Method, 21},
    -		{"(*StackRecord).Stack", Method, 0},
    -		{"(*TypeAssertionError).Error", Method, 0},
    -		{"(*TypeAssertionError).RuntimeError", Method, 0},
    -		{"(Cleanup).Stop", Method, 24},
    -		{"AddCleanup", Func, 24},
    -		{"BlockProfile", Func, 1},
    -		{"BlockProfileRecord", Type, 1},
    -		{"BlockProfileRecord.Count", Field, 1},
    -		{"BlockProfileRecord.Cycles", Field, 1},
    -		{"BlockProfileRecord.StackRecord", Field, 1},
    -		{"Breakpoint", Func, 0},
    -		{"CPUProfile", Func, 0},
    -		{"Caller", Func, 0},
    -		{"Callers", Func, 0},
    -		{"CallersFrames", Func, 7},
    -		{"Cleanup", Type, 24},
    -		{"Compiler", Const, 0},
    -		{"Error", Type, 0},
    -		{"Frame", Type, 7},
    -		{"Frame.Entry", Field, 7},
    -		{"Frame.File", Field, 7},
    -		{"Frame.Func", Field, 7},
    -		{"Frame.Function", Field, 7},
    -		{"Frame.Line", Field, 7},
    -		{"Frame.PC", Field, 7},
    -		{"Frames", Type, 7},
    -		{"Func", Type, 0},
    -		{"FuncForPC", Func, 0},
    -		{"GC", Func, 0},
    -		{"GOARCH", Const, 0},
    -		{"GOMAXPROCS", Func, 0},
    -		{"GOOS", Const, 0},
    -		{"GOROOT", Func, 0},
    -		{"Goexit", Func, 0},
    -		{"GoroutineProfile", Func, 0},
    -		{"Gosched", Func, 0},
    -		{"KeepAlive", Func, 7},
    -		{"LockOSThread", Func, 0},
    -		{"MemProfile", Func, 0},
    -		{"MemProfileRate", Var, 0},
    -		{"MemProfileRecord", Type, 0},
    -		{"MemProfileRecord.AllocBytes", Field, 0},
    -		{"MemProfileRecord.AllocObjects", Field, 0},
    -		{"MemProfileRecord.FreeBytes", Field, 0},
    -		{"MemProfileRecord.FreeObjects", Field, 0},
    -		{"MemProfileRecord.Stack0", Field, 0},
    -		{"MemStats", Type, 0},
    -		{"MemStats.Alloc", Field, 0},
    -		{"MemStats.BuckHashSys", Field, 0},
    -		{"MemStats.BySize", Field, 0},
    -		{"MemStats.DebugGC", Field, 0},
    -		{"MemStats.EnableGC", Field, 0},
    -		{"MemStats.Frees", Field, 0},
    -		{"MemStats.GCCPUFraction", Field, 5},
    -		{"MemStats.GCSys", Field, 2},
    -		{"MemStats.HeapAlloc", Field, 0},
    -		{"MemStats.HeapIdle", Field, 0},
    -		{"MemStats.HeapInuse", Field, 0},
    -		{"MemStats.HeapObjects", Field, 0},
    -		{"MemStats.HeapReleased", Field, 0},
    -		{"MemStats.HeapSys", Field, 0},
    -		{"MemStats.LastGC", Field, 0},
    -		{"MemStats.Lookups", Field, 0},
    -		{"MemStats.MCacheInuse", Field, 0},
    -		{"MemStats.MCacheSys", Field, 0},
    -		{"MemStats.MSpanInuse", Field, 0},
    -		{"MemStats.MSpanSys", Field, 0},
    -		{"MemStats.Mallocs", Field, 0},
    -		{"MemStats.NextGC", Field, 0},
    -		{"MemStats.NumForcedGC", Field, 8},
    -		{"MemStats.NumGC", Field, 0},
    -		{"MemStats.OtherSys", Field, 2},
    -		{"MemStats.PauseEnd", Field, 4},
    -		{"MemStats.PauseNs", Field, 0},
    -		{"MemStats.PauseTotalNs", Field, 0},
    -		{"MemStats.StackInuse", Field, 0},
    -		{"MemStats.StackSys", Field, 0},
    -		{"MemStats.Sys", Field, 0},
    -		{"MemStats.TotalAlloc", Field, 0},
    -		{"MutexProfile", Func, 8},
    -		{"NumCPU", Func, 0},
    -		{"NumCgoCall", Func, 0},
    -		{"NumGoroutine", Func, 0},
    -		{"PanicNilError", Type, 21},
    -		{"Pinner", Type, 21},
    -		{"ReadMemStats", Func, 0},
    -		{"ReadTrace", Func, 5},
    -		{"SetBlockProfileRate", Func, 1},
    -		{"SetCPUProfileRate", Func, 0},
    -		{"SetCgoTraceback", Func, 7},
    -		{"SetFinalizer", Func, 0},
    -		{"SetMutexProfileFraction", Func, 8},
    -		{"Stack", Func, 0},
    -		{"StackRecord", Type, 0},
    -		{"StackRecord.Stack0", Field, 0},
    -		{"StartTrace", Func, 5},
    -		{"StopTrace", Func, 5},
    -		{"ThreadCreateProfile", Func, 0},
    -		{"TypeAssertionError", Type, 0},
    -		{"UnlockOSThread", Func, 0},
    -		{"Version", Func, 0},
    +		{"(*BlockProfileRecord).Stack", Method, 1, ""},
    +		{"(*Frames).Next", Method, 7, ""},
    +		{"(*Func).Entry", Method, 0, ""},
    +		{"(*Func).FileLine", Method, 0, ""},
    +		{"(*Func).Name", Method, 0, ""},
    +		{"(*MemProfileRecord).InUseBytes", Method, 0, ""},
    +		{"(*MemProfileRecord).InUseObjects", Method, 0, ""},
    +		{"(*MemProfileRecord).Stack", Method, 0, ""},
    +		{"(*PanicNilError).Error", Method, 21, ""},
    +		{"(*PanicNilError).RuntimeError", Method, 21, ""},
    +		{"(*Pinner).Pin", Method, 21, ""},
    +		{"(*Pinner).Unpin", Method, 21, ""},
    +		{"(*StackRecord).Stack", Method, 0, ""},
    +		{"(*TypeAssertionError).Error", Method, 0, ""},
    +		{"(*TypeAssertionError).RuntimeError", Method, 0, ""},
    +		{"(Cleanup).Stop", Method, 24, ""},
    +		{"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
    +		{"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
    +		{"BlockProfileRecord", Type, 1, ""},
    +		{"BlockProfileRecord.Count", Field, 1, ""},
    +		{"BlockProfileRecord.Cycles", Field, 1, ""},
    +		{"BlockProfileRecord.StackRecord", Field, 1, ""},
    +		{"Breakpoint", Func, 0, "func()"},
    +		{"CPUProfile", Func, 0, "func() []byte"},
    +		{"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"},
    +		{"Callers", Func, 0, "func(skip int, pc []uintptr) int"},
    +		{"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"},
    +		{"Cleanup", Type, 24, ""},
    +		{"Compiler", Const, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Frame", Type, 7, ""},
    +		{"Frame.Entry", Field, 7, ""},
    +		{"Frame.File", Field, 7, ""},
    +		{"Frame.Func", Field, 7, ""},
    +		{"Frame.Function", Field, 7, ""},
    +		{"Frame.Line", Field, 7, ""},
    +		{"Frame.PC", Field, 7, ""},
    +		{"Frames", Type, 7, ""},
    +		{"Func", Type, 0, ""},
    +		{"FuncForPC", Func, 0, "func(pc uintptr) *Func"},
    +		{"GC", Func, 0, "func()"},
    +		{"GOARCH", Const, 0, ""},
    +		{"GOMAXPROCS", Func, 0, "func(n int) int"},
    +		{"GOOS", Const, 0, ""},
    +		{"GOROOT", Func, 0, "func() string"},
    +		{"Goexit", Func, 0, "func()"},
    +		{"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
    +		{"Gosched", Func, 0, "func()"},
    +		{"KeepAlive", Func, 7, "func(x any)"},
    +		{"LockOSThread", Func, 0, "func()"},
    +		{"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"},
    +		{"MemProfileRate", Var, 0, ""},
    +		{"MemProfileRecord", Type, 0, ""},
    +		{"MemProfileRecord.AllocBytes", Field, 0, ""},
    +		{"MemProfileRecord.AllocObjects", Field, 0, ""},
    +		{"MemProfileRecord.FreeBytes", Field, 0, ""},
    +		{"MemProfileRecord.FreeObjects", Field, 0, ""},
    +		{"MemProfileRecord.Stack0", Field, 0, ""},
    +		{"MemStats", Type, 0, ""},
    +		{"MemStats.Alloc", Field, 0, ""},
    +		{"MemStats.BuckHashSys", Field, 0, ""},
    +		{"MemStats.BySize", Field, 0, ""},
    +		{"MemStats.DebugGC", Field, 0, ""},
    +		{"MemStats.EnableGC", Field, 0, ""},
    +		{"MemStats.Frees", Field, 0, ""},
    +		{"MemStats.GCCPUFraction", Field, 5, ""},
    +		{"MemStats.GCSys", Field, 2, ""},
    +		{"MemStats.HeapAlloc", Field, 0, ""},
    +		{"MemStats.HeapIdle", Field, 0, ""},
    +		{"MemStats.HeapInuse", Field, 0, ""},
    +		{"MemStats.HeapObjects", Field, 0, ""},
    +		{"MemStats.HeapReleased", Field, 0, ""},
    +		{"MemStats.HeapSys", Field, 0, ""},
    +		{"MemStats.LastGC", Field, 0, ""},
    +		{"MemStats.Lookups", Field, 0, ""},
    +		{"MemStats.MCacheInuse", Field, 0, ""},
    +		{"MemStats.MCacheSys", Field, 0, ""},
    +		{"MemStats.MSpanInuse", Field, 0, ""},
    +		{"MemStats.MSpanSys", Field, 0, ""},
    +		{"MemStats.Mallocs", Field, 0, ""},
    +		{"MemStats.NextGC", Field, 0, ""},
    +		{"MemStats.NumForcedGC", Field, 8, ""},
    +		{"MemStats.NumGC", Field, 0, ""},
    +		{"MemStats.OtherSys", Field, 2, ""},
    +		{"MemStats.PauseEnd", Field, 4, ""},
    +		{"MemStats.PauseNs", Field, 0, ""},
    +		{"MemStats.PauseTotalNs", Field, 0, ""},
    +		{"MemStats.StackInuse", Field, 0, ""},
    +		{"MemStats.StackSys", Field, 0, ""},
    +		{"MemStats.Sys", Field, 0, ""},
    +		{"MemStats.TotalAlloc", Field, 0, ""},
    +		{"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"},
    +		{"NumCPU", Func, 0, "func() int"},
    +		{"NumCgoCall", Func, 0, "func() int64"},
    +		{"NumGoroutine", Func, 0, "func() int"},
    +		{"PanicNilError", Type, 21, ""},
    +		{"Pinner", Type, 21, ""},
    +		{"ReadMemStats", Func, 0, "func(m *MemStats)"},
    +		{"ReadTrace", Func, 5, "func() []byte"},
    +		{"SetBlockProfileRate", Func, 1, "func(rate int)"},
    +		{"SetCPUProfileRate", Func, 0, "func(hz int)"},
    +		{"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
    +		{"SetDefaultGOMAXPROCS", Func, 25, "func()"},
    +		{"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
    +		{"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
    +		{"Stack", Func, 0, "func(buf []byte, all bool) int"},
    +		{"StackRecord", Type, 0, ""},
    +		{"StackRecord.Stack0", Field, 0, ""},
    +		{"StartTrace", Func, 5, "func() error"},
    +		{"StopTrace", Func, 5, "func()"},
    +		{"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
    +		{"TypeAssertionError", Type, 0, ""},
    +		{"UnlockOSThread", Func, 0, "func()"},
    +		{"Version", Func, 0, "func() string"},
     	},
     	"runtime/cgo": {
    -		{"(Handle).Delete", Method, 17},
    -		{"(Handle).Value", Method, 17},
    -		{"Handle", Type, 17},
    -		{"Incomplete", Type, 20},
    -		{"NewHandle", Func, 17},
    +		{"(Handle).Delete", Method, 17, ""},
    +		{"(Handle).Value", Method, 17, ""},
    +		{"Handle", Type, 17, ""},
    +		{"Incomplete", Type, 20, ""},
    +		{"NewHandle", Func, 17, ""},
     	},
     	"runtime/coverage": {
    -		{"ClearCounters", Func, 20},
    -		{"WriteCounters", Func, 20},
    -		{"WriteCountersDir", Func, 20},
    -		{"WriteMeta", Func, 20},
    -		{"WriteMetaDir", Func, 20},
    +		{"ClearCounters", Func, 20, "func() error"},
    +		{"WriteCounters", Func, 20, "func(w io.Writer) error"},
    +		{"WriteCountersDir", Func, 20, "func(dir string) error"},
    +		{"WriteMeta", Func, 20, "func(w io.Writer) error"},
    +		{"WriteMetaDir", Func, 20, "func(dir string) error"},
     	},
     	"runtime/debug": {
    -		{"(*BuildInfo).String", Method, 18},
    -		{"BuildInfo", Type, 12},
    -		{"BuildInfo.Deps", Field, 12},
    -		{"BuildInfo.GoVersion", Field, 18},
    -		{"BuildInfo.Main", Field, 12},
    -		{"BuildInfo.Path", Field, 12},
    -		{"BuildInfo.Settings", Field, 18},
    -		{"BuildSetting", Type, 18},
    -		{"BuildSetting.Key", Field, 18},
    -		{"BuildSetting.Value", Field, 18},
    -		{"CrashOptions", Type, 23},
    -		{"FreeOSMemory", Func, 1},
    -		{"GCStats", Type, 1},
    -		{"GCStats.LastGC", Field, 1},
    -		{"GCStats.NumGC", Field, 1},
    -		{"GCStats.Pause", Field, 1},
    -		{"GCStats.PauseEnd", Field, 4},
    -		{"GCStats.PauseQuantiles", Field, 1},
    -		{"GCStats.PauseTotal", Field, 1},
    -		{"Module", Type, 12},
    -		{"Module.Path", Field, 12},
    -		{"Module.Replace", Field, 12},
    -		{"Module.Sum", Field, 12},
    -		{"Module.Version", Field, 12},
    -		{"ParseBuildInfo", Func, 18},
    -		{"PrintStack", Func, 0},
    -		{"ReadBuildInfo", Func, 12},
    -		{"ReadGCStats", Func, 1},
    -		{"SetCrashOutput", Func, 23},
    -		{"SetGCPercent", Func, 1},
    -		{"SetMaxStack", Func, 2},
    -		{"SetMaxThreads", Func, 2},
    -		{"SetMemoryLimit", Func, 19},
    -		{"SetPanicOnFault", Func, 3},
    -		{"SetTraceback", Func, 6},
    -		{"Stack", Func, 0},
    -		{"WriteHeapDump", Func, 3},
    +		{"(*BuildInfo).String", Method, 18, ""},
    +		{"BuildInfo", Type, 12, ""},
    +		{"BuildInfo.Deps", Field, 12, ""},
    +		{"BuildInfo.GoVersion", Field, 18, ""},
    +		{"BuildInfo.Main", Field, 12, ""},
    +		{"BuildInfo.Path", Field, 12, ""},
    +		{"BuildInfo.Settings", Field, 18, ""},
    +		{"BuildSetting", Type, 18, ""},
    +		{"BuildSetting.Key", Field, 18, ""},
    +		{"BuildSetting.Value", Field, 18, ""},
    +		{"CrashOptions", Type, 23, ""},
    +		{"FreeOSMemory", Func, 1, "func()"},
    +		{"GCStats", Type, 1, ""},
    +		{"GCStats.LastGC", Field, 1, ""},
    +		{"GCStats.NumGC", Field, 1, ""},
    +		{"GCStats.Pause", Field, 1, ""},
    +		{"GCStats.PauseEnd", Field, 4, ""},
    +		{"GCStats.PauseQuantiles", Field, 1, ""},
    +		{"GCStats.PauseTotal", Field, 1, ""},
    +		{"Module", Type, 12, ""},
    +		{"Module.Path", Field, 12, ""},
    +		{"Module.Replace", Field, 12, ""},
    +		{"Module.Sum", Field, 12, ""},
    +		{"Module.Version", Field, 12, ""},
    +		{"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"},
    +		{"PrintStack", Func, 0, "func()"},
    +		{"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"},
    +		{"ReadGCStats", Func, 1, "func(stats *GCStats)"},
    +		{"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"},
    +		{"SetGCPercent", Func, 1, "func(percent int) int"},
    +		{"SetMaxStack", Func, 2, "func(bytes int) int"},
    +		{"SetMaxThreads", Func, 2, "func(threads int) int"},
    +		{"SetMemoryLimit", Func, 19, "func(limit int64) int64"},
    +		{"SetPanicOnFault", Func, 3, "func(enabled bool) bool"},
    +		{"SetTraceback", Func, 6, "func(level string)"},
    +		{"Stack", Func, 0, "func() []byte"},
    +		{"WriteHeapDump", Func, 3, "func(fd uintptr)"},
     	},
     	"runtime/metrics": {
    -		{"(Value).Float64", Method, 16},
    -		{"(Value).Float64Histogram", Method, 16},
    -		{"(Value).Kind", Method, 16},
    -		{"(Value).Uint64", Method, 16},
    -		{"All", Func, 16},
    -		{"Description", Type, 16},
    -		{"Description.Cumulative", Field, 16},
    -		{"Description.Description", Field, 16},
    -		{"Description.Kind", Field, 16},
    -		{"Description.Name", Field, 16},
    -		{"Float64Histogram", Type, 16},
    -		{"Float64Histogram.Buckets", Field, 16},
    -		{"Float64Histogram.Counts", Field, 16},
    -		{"KindBad", Const, 16},
    -		{"KindFloat64", Const, 16},
    -		{"KindFloat64Histogram", Const, 16},
    -		{"KindUint64", Const, 16},
    -		{"Read", Func, 16},
    -		{"Sample", Type, 16},
    -		{"Sample.Name", Field, 16},
    -		{"Sample.Value", Field, 16},
    -		{"Value", Type, 16},
    -		{"ValueKind", Type, 16},
    +		{"(Value).Float64", Method, 16, ""},
    +		{"(Value).Float64Histogram", Method, 16, ""},
    +		{"(Value).Kind", Method, 16, ""},
    +		{"(Value).Uint64", Method, 16, ""},
    +		{"All", Func, 16, "func() []Description"},
    +		{"Description", Type, 16, ""},
    +		{"Description.Cumulative", Field, 16, ""},
    +		{"Description.Description", Field, 16, ""},
    +		{"Description.Kind", Field, 16, ""},
    +		{"Description.Name", Field, 16, ""},
    +		{"Float64Histogram", Type, 16, ""},
    +		{"Float64Histogram.Buckets", Field, 16, ""},
    +		{"Float64Histogram.Counts", Field, 16, ""},
    +		{"KindBad", Const, 16, ""},
    +		{"KindFloat64", Const, 16, ""},
    +		{"KindFloat64Histogram", Const, 16, ""},
    +		{"KindUint64", Const, 16, ""},
    +		{"Read", Func, 16, "func(m []Sample)"},
    +		{"Sample", Type, 16, ""},
    +		{"Sample.Name", Field, 16, ""},
    +		{"Sample.Value", Field, 16, ""},
    +		{"Value", Type, 16, ""},
    +		{"ValueKind", Type, 16, ""},
     	},
     	"runtime/pprof": {
    -		{"(*Profile).Add", Method, 0},
    -		{"(*Profile).Count", Method, 0},
    -		{"(*Profile).Name", Method, 0},
    -		{"(*Profile).Remove", Method, 0},
    -		{"(*Profile).WriteTo", Method, 0},
    -		{"Do", Func, 9},
    -		{"ForLabels", Func, 9},
    -		{"Label", Func, 9},
    -		{"LabelSet", Type, 9},
    -		{"Labels", Func, 9},
    -		{"Lookup", Func, 0},
    -		{"NewProfile", Func, 0},
    -		{"Profile", Type, 0},
    -		{"Profiles", Func, 0},
    -		{"SetGoroutineLabels", Func, 9},
    -		{"StartCPUProfile", Func, 0},
    -		{"StopCPUProfile", Func, 0},
    -		{"WithLabels", Func, 9},
    -		{"WriteHeapProfile", Func, 0},
    +		{"(*Profile).Add", Method, 0, ""},
    +		{"(*Profile).Count", Method, 0, ""},
    +		{"(*Profile).Name", Method, 0, ""},
    +		{"(*Profile).Remove", Method, 0, ""},
    +		{"(*Profile).WriteTo", Method, 0, ""},
    +		{"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"},
    +		{"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"},
    +		{"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"},
    +		{"LabelSet", Type, 9, ""},
    +		{"Labels", Func, 9, "func(args ...string) LabelSet"},
    +		{"Lookup", Func, 0, "func(name string) *Profile"},
    +		{"NewProfile", Func, 0, "func(name string) *Profile"},
    +		{"Profile", Type, 0, ""},
    +		{"Profiles", Func, 0, "func() []*Profile"},
    +		{"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"},
    +		{"StartCPUProfile", Func, 0, "func(w io.Writer) error"},
    +		{"StopCPUProfile", Func, 0, "func()"},
    +		{"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"},
    +		{"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
     	},
     	"runtime/trace": {
    -		{"(*Region).End", Method, 11},
    -		{"(*Task).End", Method, 11},
    -		{"IsEnabled", Func, 11},
    -		{"Log", Func, 11},
    -		{"Logf", Func, 11},
    -		{"NewTask", Func, 11},
    -		{"Region", Type, 11},
    -		{"Start", Func, 5},
    -		{"StartRegion", Func, 11},
    -		{"Stop", Func, 5},
    -		{"Task", Type, 11},
    -		{"WithRegion", Func, 11},
    +		{"(*FlightRecorder).Enabled", Method, 25, ""},
    +		{"(*FlightRecorder).Start", Method, 25, ""},
    +		{"(*FlightRecorder).Stop", Method, 25, ""},
    +		{"(*FlightRecorder).WriteTo", Method, 25, ""},
    +		{"(*Region).End", Method, 11, ""},
    +		{"(*Task).End", Method, 11, ""},
    +		{"FlightRecorder", Type, 25, ""},
    +		{"FlightRecorderConfig", Type, 25, ""},
    +		{"FlightRecorderConfig.MaxBytes", Field, 25, ""},
    +		{"FlightRecorderConfig.MinAge", Field, 25, ""},
    +		{"IsEnabled", Func, 11, "func() bool"},
    +		{"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
    +		{"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
    +		{"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
    +		{"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
    +		{"Region", Type, 11, ""},
    +		{"Start", Func, 5, "func(w io.Writer) error"},
    +		{"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"},
    +		{"Stop", Func, 5, "func()"},
    +		{"Task", Type, 11, ""},
    +		{"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"},
     	},
     	"slices": {
    -		{"All", Func, 23},
    -		{"AppendSeq", Func, 23},
    -		{"Backward", Func, 23},
    -		{"BinarySearch", Func, 21},
    -		{"BinarySearchFunc", Func, 21},
    -		{"Chunk", Func, 23},
    -		{"Clip", Func, 21},
    -		{"Clone", Func, 21},
    -		{"Collect", Func, 23},
    -		{"Compact", Func, 21},
    -		{"CompactFunc", Func, 21},
    -		{"Compare", Func, 21},
    -		{"CompareFunc", Func, 21},
    -		{"Concat", Func, 22},
    -		{"Contains", Func, 21},
    -		{"ContainsFunc", Func, 21},
    -		{"Delete", Func, 21},
    -		{"DeleteFunc", Func, 21},
    -		{"Equal", Func, 21},
    -		{"EqualFunc", Func, 21},
    -		{"Grow", Func, 21},
    -		{"Index", Func, 21},
    -		{"IndexFunc", Func, 21},
    -		{"Insert", Func, 21},
    -		{"IsSorted", Func, 21},
    -		{"IsSortedFunc", Func, 21},
    -		{"Max", Func, 21},
    -		{"MaxFunc", Func, 21},
    -		{"Min", Func, 21},
    -		{"MinFunc", Func, 21},
    -		{"Repeat", Func, 23},
    -		{"Replace", Func, 21},
    -		{"Reverse", Func, 21},
    -		{"Sort", Func, 21},
    -		{"SortFunc", Func, 21},
    -		{"SortStableFunc", Func, 21},
    -		{"Sorted", Func, 23},
    -		{"SortedFunc", Func, 23},
    -		{"SortedStableFunc", Func, 23},
    -		{"Values", Func, 23},
    +		{"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
    +		{"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"},
    +		{"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
    +		{"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"},
    +		{"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"},
    +		{"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"},
    +		{"Clip", Func, 21, "func[S ~[]E, E any](s S) S"},
    +		{"Clone", Func, 21, "func[S ~[]E, E any](s S) S"},
    +		{"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"},
    +		{"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"},
    +		{"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"},
    +		{"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"},
    +		{"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"},
    +		{"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"},
    +		{"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"},
    +		{"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"},
    +		{"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"},
    +		{"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"},
    +		{"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"},
    +		{"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"},
    +		{"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"},
    +		{"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"},
    +		{"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"},
    +		{"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"},
    +		{"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"},
    +		{"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"},
    +		{"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
    +		{"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
    +		{"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
    +		{"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
    +		{"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"},
    +		{"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"},
    +		{"Reverse", Func, 21, "func[S ~[]E, E any](s S)"},
    +		{"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"},
    +		{"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
    +		{"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
    +		{"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"},
    +		{"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
    +		{"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
    +		{"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"},
     	},
     	"sort": {
    -		{"(Float64Slice).Len", Method, 0},
    -		{"(Float64Slice).Less", Method, 0},
    -		{"(Float64Slice).Search", Method, 0},
    -		{"(Float64Slice).Sort", Method, 0},
    -		{"(Float64Slice).Swap", Method, 0},
    -		{"(IntSlice).Len", Method, 0},
    -		{"(IntSlice).Less", Method, 0},
    -		{"(IntSlice).Search", Method, 0},
    -		{"(IntSlice).Sort", Method, 0},
    -		{"(IntSlice).Swap", Method, 0},
    -		{"(StringSlice).Len", Method, 0},
    -		{"(StringSlice).Less", Method, 0},
    -		{"(StringSlice).Search", Method, 0},
    -		{"(StringSlice).Sort", Method, 0},
    -		{"(StringSlice).Swap", Method, 0},
    -		{"Find", Func, 19},
    -		{"Float64Slice", Type, 0},
    -		{"Float64s", Func, 0},
    -		{"Float64sAreSorted", Func, 0},
    -		{"IntSlice", Type, 0},
    -		{"Interface", Type, 0},
    -		{"Ints", Func, 0},
    -		{"IntsAreSorted", Func, 0},
    -		{"IsSorted", Func, 0},
    -		{"Reverse", Func, 1},
    -		{"Search", Func, 0},
    -		{"SearchFloat64s", Func, 0},
    -		{"SearchInts", Func, 0},
    -		{"SearchStrings", Func, 0},
    -		{"Slice", Func, 8},
    -		{"SliceIsSorted", Func, 8},
    -		{"SliceStable", Func, 8},
    -		{"Sort", Func, 0},
    -		{"Stable", Func, 2},
    -		{"StringSlice", Type, 0},
    -		{"Strings", Func, 0},
    -		{"StringsAreSorted", Func, 0},
    +		{"(Float64Slice).Len", Method, 0, ""},
    +		{"(Float64Slice).Less", Method, 0, ""},
    +		{"(Float64Slice).Search", Method, 0, ""},
    +		{"(Float64Slice).Sort", Method, 0, ""},
    +		{"(Float64Slice).Swap", Method, 0, ""},
    +		{"(IntSlice).Len", Method, 0, ""},
    +		{"(IntSlice).Less", Method, 0, ""},
    +		{"(IntSlice).Search", Method, 0, ""},
    +		{"(IntSlice).Sort", Method, 0, ""},
    +		{"(IntSlice).Swap", Method, 0, ""},
    +		{"(StringSlice).Len", Method, 0, ""},
    +		{"(StringSlice).Less", Method, 0, ""},
    +		{"(StringSlice).Search", Method, 0, ""},
    +		{"(StringSlice).Sort", Method, 0, ""},
    +		{"(StringSlice).Swap", Method, 0, ""},
    +		{"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"},
    +		{"Float64Slice", Type, 0, ""},
    +		{"Float64s", Func, 0, "func(x []float64)"},
    +		{"Float64sAreSorted", Func, 0, "func(x []float64) bool"},
    +		{"IntSlice", Type, 0, ""},
    +		{"Interface", Type, 0, ""},
    +		{"Ints", Func, 0, "func(x []int)"},
    +		{"IntsAreSorted", Func, 0, "func(x []int) bool"},
    +		{"IsSorted", Func, 0, "func(data Interface) bool"},
    +		{"Reverse", Func, 1, "func(data Interface) Interface"},
    +		{"Search", Func, 0, "func(n int, f func(int) bool) int"},
    +		{"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"},
    +		{"SearchInts", Func, 0, "func(a []int, x int) int"},
    +		{"SearchStrings", Func, 0, "func(a []string, x string) int"},
    +		{"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"},
    +		{"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"},
    +		{"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"},
    +		{"Sort", Func, 0, "func(data Interface)"},
    +		{"Stable", Func, 2, "func(data Interface)"},
    +		{"StringSlice", Type, 0, ""},
    +		{"Strings", Func, 0, "func(x []string)"},
    +		{"StringsAreSorted", Func, 0, "func(x []string) bool"},
     	},
     	"strconv": {
    -		{"(*NumError).Error", Method, 0},
    -		{"(*NumError).Unwrap", Method, 14},
    -		{"AppendBool", Func, 0},
    -		{"AppendFloat", Func, 0},
    -		{"AppendInt", Func, 0},
    -		{"AppendQuote", Func, 0},
    -		{"AppendQuoteRune", Func, 0},
    -		{"AppendQuoteRuneToASCII", Func, 0},
    -		{"AppendQuoteRuneToGraphic", Func, 6},
    -		{"AppendQuoteToASCII", Func, 0},
    -		{"AppendQuoteToGraphic", Func, 6},
    -		{"AppendUint", Func, 0},
    -		{"Atoi", Func, 0},
    -		{"CanBackquote", Func, 0},
    -		{"ErrRange", Var, 0},
    -		{"ErrSyntax", Var, 0},
    -		{"FormatBool", Func, 0},
    -		{"FormatComplex", Func, 15},
    -		{"FormatFloat", Func, 0},
    -		{"FormatInt", Func, 0},
    -		{"FormatUint", Func, 0},
    -		{"IntSize", Const, 0},
    -		{"IsGraphic", Func, 6},
    -		{"IsPrint", Func, 0},
    -		{"Itoa", Func, 0},
    -		{"NumError", Type, 0},
    -		{"NumError.Err", Field, 0},
    -		{"NumError.Func", Field, 0},
    -		{"NumError.Num", Field, 0},
    -		{"ParseBool", Func, 0},
    -		{"ParseComplex", Func, 15},
    -		{"ParseFloat", Func, 0},
    -		{"ParseInt", Func, 0},
    -		{"ParseUint", Func, 0},
    -		{"Quote", Func, 0},
    -		{"QuoteRune", Func, 0},
    -		{"QuoteRuneToASCII", Func, 0},
    -		{"QuoteRuneToGraphic", Func, 6},
    -		{"QuoteToASCII", Func, 0},
    -		{"QuoteToGraphic", Func, 6},
    -		{"QuotedPrefix", Func, 17},
    -		{"Unquote", Func, 0},
    -		{"UnquoteChar", Func, 0},
    +		{"(*NumError).Error", Method, 0, ""},
    +		{"(*NumError).Unwrap", Method, 14, ""},
    +		{"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"},
    +		{"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"},
    +		{"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"},
    +		{"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"},
    +		{"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"},
    +		{"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"},
    +		{"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"},
    +		{"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"},
    +		{"Atoi", Func, 0, "func(s string) (int, error)"},
    +		{"CanBackquote", Func, 0, "func(s string) bool"},
    +		{"ErrRange", Var, 0, ""},
    +		{"ErrSyntax", Var, 0, ""},
    +		{"FormatBool", Func, 0, "func(b bool) string"},
    +		{"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"},
    +		{"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"},
    +		{"FormatInt", Func, 0, "func(i int64, base int) string"},
    +		{"FormatUint", Func, 0, "func(i uint64, base int) string"},
    +		{"IntSize", Const, 0, ""},
    +		{"IsGraphic", Func, 6, "func(r rune) bool"},
    +		{"IsPrint", Func, 0, "func(r rune) bool"},
    +		{"Itoa", Func, 0, "func(i int) string"},
    +		{"NumError", Type, 0, ""},
    +		{"NumError.Err", Field, 0, ""},
    +		{"NumError.Func", Field, 0, ""},
    +		{"NumError.Num", Field, 0, ""},
    +		{"ParseBool", Func, 0, "func(str string) (bool, error)"},
    +		{"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"},
    +		{"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"},
    +		{"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"},
    +		{"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"},
    +		{"Quote", Func, 0, "func(s string) string"},
    +		{"QuoteRune", Func, 0, "func(r rune) string"},
    +		{"QuoteRuneToASCII", Func, 0, "func(r rune) string"},
    +		{"QuoteRuneToGraphic", Func, 6, "func(r rune) string"},
    +		{"QuoteToASCII", Func, 0, "func(s string) string"},
    +		{"QuoteToGraphic", Func, 6, "func(s string) string"},
    +		{"QuotedPrefix", Func, 17, "func(s string) (string, error)"},
    +		{"Unquote", Func, 0, "func(s string) (string, error)"},
    +		{"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"},
     	},
     	"strings": {
    -		{"(*Builder).Cap", Method, 12},
    -		{"(*Builder).Grow", Method, 10},
    -		{"(*Builder).Len", Method, 10},
    -		{"(*Builder).Reset", Method, 10},
    -		{"(*Builder).String", Method, 10},
    -		{"(*Builder).Write", Method, 10},
    -		{"(*Builder).WriteByte", Method, 10},
    -		{"(*Builder).WriteRune", Method, 10},
    -		{"(*Builder).WriteString", Method, 10},
    -		{"(*Reader).Len", Method, 0},
    -		{"(*Reader).Read", Method, 0},
    -		{"(*Reader).ReadAt", Method, 0},
    -		{"(*Reader).ReadByte", Method, 0},
    -		{"(*Reader).ReadRune", Method, 0},
    -		{"(*Reader).Reset", Method, 7},
    -		{"(*Reader).Seek", Method, 0},
    -		{"(*Reader).Size", Method, 5},
    -		{"(*Reader).UnreadByte", Method, 0},
    -		{"(*Reader).UnreadRune", Method, 0},
    -		{"(*Reader).WriteTo", Method, 1},
    -		{"(*Replacer).Replace", Method, 0},
    -		{"(*Replacer).WriteString", Method, 0},
    -		{"Builder", Type, 10},
    -		{"Clone", Func, 18},
    -		{"Compare", Func, 5},
    -		{"Contains", Func, 0},
    -		{"ContainsAny", Func, 0},
    -		{"ContainsFunc", Func, 21},
    -		{"ContainsRune", Func, 0},
    -		{"Count", Func, 0},
    -		{"Cut", Func, 18},
    -		{"CutPrefix", Func, 20},
    -		{"CutSuffix", Func, 20},
    -		{"EqualFold", Func, 0},
    -		{"Fields", Func, 0},
    -		{"FieldsFunc", Func, 0},
    -		{"FieldsFuncSeq", Func, 24},
    -		{"FieldsSeq", Func, 24},
    -		{"HasPrefix", Func, 0},
    -		{"HasSuffix", Func, 0},
    -		{"Index", Func, 0},
    -		{"IndexAny", Func, 0},
    -		{"IndexByte", Func, 2},
    -		{"IndexFunc", Func, 0},
    -		{"IndexRune", Func, 0},
    -		{"Join", Func, 0},
    -		{"LastIndex", Func, 0},
    -		{"LastIndexAny", Func, 0},
    -		{"LastIndexByte", Func, 5},
    -		{"LastIndexFunc", Func, 0},
    -		{"Lines", Func, 24},
    -		{"Map", Func, 0},
    -		{"NewReader", Func, 0},
    -		{"NewReplacer", Func, 0},
    -		{"Reader", Type, 0},
    -		{"Repeat", Func, 0},
    -		{"Replace", Func, 0},
    -		{"ReplaceAll", Func, 12},
    -		{"Replacer", Type, 0},
    -		{"Split", Func, 0},
    -		{"SplitAfter", Func, 0},
    -		{"SplitAfterN", Func, 0},
    -		{"SplitAfterSeq", Func, 24},
    -		{"SplitN", Func, 0},
    -		{"SplitSeq", Func, 24},
    -		{"Title", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToLowerSpecial", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToTitleSpecial", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"ToUpperSpecial", Func, 0},
    -		{"ToValidUTF8", Func, 13},
    -		{"Trim", Func, 0},
    -		{"TrimFunc", Func, 0},
    -		{"TrimLeft", Func, 0},
    -		{"TrimLeftFunc", Func, 0},
    -		{"TrimPrefix", Func, 1},
    -		{"TrimRight", Func, 0},
    -		{"TrimRightFunc", Func, 0},
    -		{"TrimSpace", Func, 0},
    -		{"TrimSuffix", Func, 1},
    +		{"(*Builder).Cap", Method, 12, ""},
    +		{"(*Builder).Grow", Method, 10, ""},
    +		{"(*Builder).Len", Method, 10, ""},
    +		{"(*Builder).Reset", Method, 10, ""},
    +		{"(*Builder).String", Method, 10, ""},
    +		{"(*Builder).Write", Method, 10, ""},
    +		{"(*Builder).WriteByte", Method, 10, ""},
    +		{"(*Builder).WriteRune", Method, 10, ""},
    +		{"(*Builder).WriteString", Method, 10, ""},
    +		{"(*Reader).Len", Method, 0, ""},
    +		{"(*Reader).Read", Method, 0, ""},
    +		{"(*Reader).ReadAt", Method, 0, ""},
    +		{"(*Reader).ReadByte", Method, 0, ""},
    +		{"(*Reader).ReadRune", Method, 0, ""},
    +		{"(*Reader).Reset", Method, 7, ""},
    +		{"(*Reader).Seek", Method, 0, ""},
    +		{"(*Reader).Size", Method, 5, ""},
    +		{"(*Reader).UnreadByte", Method, 0, ""},
    +		{"(*Reader).UnreadRune", Method, 0, ""},
    +		{"(*Reader).WriteTo", Method, 1, ""},
    +		{"(*Replacer).Replace", Method, 0, ""},
    +		{"(*Replacer).WriteString", Method, 0, ""},
    +		{"Builder", Type, 10, ""},
    +		{"Clone", Func, 18, "func(s string) string"},
    +		{"Compare", Func, 5, "func(a string, b string) int"},
    +		{"Contains", Func, 0, "func(s string, substr string) bool"},
    +		{"ContainsAny", Func, 0, "func(s string, chars string) bool"},
    +		{"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"},
    +		{"ContainsRune", Func, 0, "func(s string, r rune) bool"},
    +		{"Count", Func, 0, "func(s string, substr string) int"},
    +		{"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"},
    +		{"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"},
    +		{"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"},
    +		{"EqualFold", Func, 0, "func(s string, t string) bool"},
    +		{"Fields", Func, 0, "func(s string) []string"},
    +		{"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"},
    +		{"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"},
    +		{"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"},
    +		{"HasPrefix", Func, 0, "func(s string, prefix string) bool"},
    +		{"HasSuffix", Func, 0, "func(s string, suffix string) bool"},
    +		{"Index", Func, 0, "func(s string, substr string) int"},
    +		{"IndexAny", Func, 0, "func(s string, chars string) int"},
    +		{"IndexByte", Func, 2, "func(s string, c byte) int"},
    +		{"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
    +		{"IndexRune", Func, 0, "func(s string, r rune) int"},
    +		{"Join", Func, 0, "func(elems []string, sep string) string"},
    +		{"LastIndex", Func, 0, "func(s string, substr string) int"},
    +		{"LastIndexAny", Func, 0, "func(s string, chars string) int"},
    +		{"LastIndexByte", Func, 5, "func(s string, c byte) int"},
    +		{"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
    +		{"Lines", Func, 24, "func(s string) iter.Seq[string]"},
    +		{"Map", Func, 0, "func(mapping func(rune) rune, s string) string"},
    +		{"NewReader", Func, 0, "func(s string) *Reader"},
    +		{"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"},
    +		{"Reader", Type, 0, ""},
    +		{"Repeat", Func, 0, "func(s string, count int) string"},
    +		{"Replace", Func, 0, "func(s string, old string, new string, n int) string"},
    +		{"ReplaceAll", Func, 12, "func(s string, old string, new string) string"},
    +		{"Replacer", Type, 0, ""},
    +		{"Split", Func, 0, "func(s string, sep string) []string"},
    +		{"SplitAfter", Func, 0, "func(s string, sep string) []string"},
    +		{"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"},
    +		{"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
    +		{"SplitN", Func, 0, "func(s string, sep string, n int) []string"},
    +		{"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
    +		{"Title", Func, 0, "func(s string) string"},
    +		{"ToLower", Func, 0, "func(s string) string"},
    +		{"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToTitle", Func, 0, "func(s string) string"},
    +		{"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToUpper", Func, 0, "func(s string) string"},
    +		{"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
    +		{"ToValidUTF8", Func, 13, "func(s string, replacement string) string"},
    +		{"Trim", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimLeft", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimPrefix", Func, 1, "func(s string, prefix string) string"},
    +		{"TrimRight", Func, 0, "func(s string, cutset string) string"},
    +		{"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"},
    +		{"TrimSpace", Func, 0, "func(s string) string"},
    +		{"TrimSuffix", Func, 1, "func(s string, suffix string) string"},
     	},
     	"structs": {
    -		{"HostLayout", Type, 23},
    +		{"HostLayout", Type, 23, ""},
     	},
     	"sync": {
    -		{"(*Cond).Broadcast", Method, 0},
    -		{"(*Cond).Signal", Method, 0},
    -		{"(*Cond).Wait", Method, 0},
    -		{"(*Map).Clear", Method, 23},
    -		{"(*Map).CompareAndDelete", Method, 20},
    -		{"(*Map).CompareAndSwap", Method, 20},
    -		{"(*Map).Delete", Method, 9},
    -		{"(*Map).Load", Method, 9},
    -		{"(*Map).LoadAndDelete", Method, 15},
    -		{"(*Map).LoadOrStore", Method, 9},
    -		{"(*Map).Range", Method, 9},
    -		{"(*Map).Store", Method, 9},
    -		{"(*Map).Swap", Method, 20},
    -		{"(*Mutex).Lock", Method, 0},
    -		{"(*Mutex).TryLock", Method, 18},
    -		{"(*Mutex).Unlock", Method, 0},
    -		{"(*Once).Do", Method, 0},
    -		{"(*Pool).Get", Method, 3},
    -		{"(*Pool).Put", Method, 3},
    -		{"(*RWMutex).Lock", Method, 0},
    -		{"(*RWMutex).RLock", Method, 0},
    -		{"(*RWMutex).RLocker", Method, 0},
    -		{"(*RWMutex).RUnlock", Method, 0},
    -		{"(*RWMutex).TryLock", Method, 18},
    -		{"(*RWMutex).TryRLock", Method, 18},
    -		{"(*RWMutex).Unlock", Method, 0},
    -		{"(*WaitGroup).Add", Method, 0},
    -		{"(*WaitGroup).Done", Method, 0},
    -		{"(*WaitGroup).Wait", Method, 0},
    -		{"Cond", Type, 0},
    -		{"Cond.L", Field, 0},
    -		{"Locker", Type, 0},
    -		{"Map", Type, 9},
    -		{"Mutex", Type, 0},
    -		{"NewCond", Func, 0},
    -		{"Once", Type, 0},
    -		{"OnceFunc", Func, 21},
    -		{"OnceValue", Func, 21},
    -		{"OnceValues", Func, 21},
    -		{"Pool", Type, 3},
    -		{"Pool.New", Field, 3},
    -		{"RWMutex", Type, 0},
    -		{"WaitGroup", Type, 0},
    +		{"(*Cond).Broadcast", Method, 0, ""},
    +		{"(*Cond).Signal", Method, 0, ""},
    +		{"(*Cond).Wait", Method, 0, ""},
    +		{"(*Map).Clear", Method, 23, ""},
    +		{"(*Map).CompareAndDelete", Method, 20, ""},
    +		{"(*Map).CompareAndSwap", Method, 20, ""},
    +		{"(*Map).Delete", Method, 9, ""},
    +		{"(*Map).Load", Method, 9, ""},
    +		{"(*Map).LoadAndDelete", Method, 15, ""},
    +		{"(*Map).LoadOrStore", Method, 9, ""},
    +		{"(*Map).Range", Method, 9, ""},
    +		{"(*Map).Store", Method, 9, ""},
    +		{"(*Map).Swap", Method, 20, ""},
    +		{"(*Mutex).Lock", Method, 0, ""},
    +		{"(*Mutex).TryLock", Method, 18, ""},
    +		{"(*Mutex).Unlock", Method, 0, ""},
    +		{"(*Once).Do", Method, 0, ""},
    +		{"(*Pool).Get", Method, 3, ""},
    +		{"(*Pool).Put", Method, 3, ""},
    +		{"(*RWMutex).Lock", Method, 0, ""},
    +		{"(*RWMutex).RLock", Method, 0, ""},
    +		{"(*RWMutex).RLocker", Method, 0, ""},
    +		{"(*RWMutex).RUnlock", Method, 0, ""},
    +		{"(*RWMutex).TryLock", Method, 18, ""},
    +		{"(*RWMutex).TryRLock", Method, 18, ""},
    +		{"(*RWMutex).Unlock", Method, 0, ""},
    +		{"(*WaitGroup).Add", Method, 0, ""},
    +		{"(*WaitGroup).Done", Method, 0, ""},
    +		{"(*WaitGroup).Go", Method, 25, ""},
    +		{"(*WaitGroup).Wait", Method, 0, ""},
    +		{"Cond", Type, 0, ""},
    +		{"Cond.L", Field, 0, ""},
    +		{"Locker", Type, 0, ""},
    +		{"Map", Type, 9, ""},
    +		{"Mutex", Type, 0, ""},
    +		{"NewCond", Func, 0, "func(l Locker) *Cond"},
    +		{"Once", Type, 0, ""},
    +		{"OnceFunc", Func, 21, "func(f func()) func()"},
    +		{"OnceValue", Func, 21, "func[T any](f func() T) func() T"},
    +		{"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"},
    +		{"Pool", Type, 3, ""},
    +		{"Pool.New", Field, 3, ""},
    +		{"RWMutex", Type, 0, ""},
    +		{"WaitGroup", Type, 0, ""},
     	},
     	"sync/atomic": {
    -		{"(*Bool).CompareAndSwap", Method, 19},
    -		{"(*Bool).Load", Method, 19},
    -		{"(*Bool).Store", Method, 19},
    -		{"(*Bool).Swap", Method, 19},
    -		{"(*Int32).Add", Method, 19},
    -		{"(*Int32).And", Method, 23},
    -		{"(*Int32).CompareAndSwap", Method, 19},
    -		{"(*Int32).Load", Method, 19},
    -		{"(*Int32).Or", Method, 23},
    -		{"(*Int32).Store", Method, 19},
    -		{"(*Int32).Swap", Method, 19},
    -		{"(*Int64).Add", Method, 19},
    -		{"(*Int64).And", Method, 23},
    -		{"(*Int64).CompareAndSwap", Method, 19},
    -		{"(*Int64).Load", Method, 19},
    -		{"(*Int64).Or", Method, 23},
    -		{"(*Int64).Store", Method, 19},
    -		{"(*Int64).Swap", Method, 19},
    -		{"(*Pointer).CompareAndSwap", Method, 19},
    -		{"(*Pointer).Load", Method, 19},
    -		{"(*Pointer).Store", Method, 19},
    -		{"(*Pointer).Swap", Method, 19},
    -		{"(*Uint32).Add", Method, 19},
    -		{"(*Uint32).And", Method, 23},
    -		{"(*Uint32).CompareAndSwap", Method, 19},
    -		{"(*Uint32).Load", Method, 19},
    -		{"(*Uint32).Or", Method, 23},
    -		{"(*Uint32).Store", Method, 19},
    -		{"(*Uint32).Swap", Method, 19},
    -		{"(*Uint64).Add", Method, 19},
    -		{"(*Uint64).And", Method, 23},
    -		{"(*Uint64).CompareAndSwap", Method, 19},
    -		{"(*Uint64).Load", Method, 19},
    -		{"(*Uint64).Or", Method, 23},
    -		{"(*Uint64).Store", Method, 19},
    -		{"(*Uint64).Swap", Method, 19},
    -		{"(*Uintptr).Add", Method, 19},
    -		{"(*Uintptr).And", Method, 23},
    -		{"(*Uintptr).CompareAndSwap", Method, 19},
    -		{"(*Uintptr).Load", Method, 19},
    -		{"(*Uintptr).Or", Method, 23},
    -		{"(*Uintptr).Store", Method, 19},
    -		{"(*Uintptr).Swap", Method, 19},
    -		{"(*Value).CompareAndSwap", Method, 17},
    -		{"(*Value).Load", Method, 4},
    -		{"(*Value).Store", Method, 4},
    -		{"(*Value).Swap", Method, 17},
    -		{"AddInt32", Func, 0},
    -		{"AddInt64", Func, 0},
    -		{"AddUint32", Func, 0},
    -		{"AddUint64", Func, 0},
    -		{"AddUintptr", Func, 0},
    -		{"AndInt32", Func, 23},
    -		{"AndInt64", Func, 23},
    -		{"AndUint32", Func, 23},
    -		{"AndUint64", Func, 23},
    -		{"AndUintptr", Func, 23},
    -		{"Bool", Type, 19},
    -		{"CompareAndSwapInt32", Func, 0},
    -		{"CompareAndSwapInt64", Func, 0},
    -		{"CompareAndSwapPointer", Func, 0},
    -		{"CompareAndSwapUint32", Func, 0},
    -		{"CompareAndSwapUint64", Func, 0},
    -		{"CompareAndSwapUintptr", Func, 0},
    -		{"Int32", Type, 19},
    -		{"Int64", Type, 19},
    -		{"LoadInt32", Func, 0},
    -		{"LoadInt64", Func, 0},
    -		{"LoadPointer", Func, 0},
    -		{"LoadUint32", Func, 0},
    -		{"LoadUint64", Func, 0},
    -		{"LoadUintptr", Func, 0},
    -		{"OrInt32", Func, 23},
    -		{"OrInt64", Func, 23},
    -		{"OrUint32", Func, 23},
    -		{"OrUint64", Func, 23},
    -		{"OrUintptr", Func, 23},
    -		{"Pointer", Type, 19},
    -		{"StoreInt32", Func, 0},
    -		{"StoreInt64", Func, 0},
    -		{"StorePointer", Func, 0},
    -		{"StoreUint32", Func, 0},
    -		{"StoreUint64", Func, 0},
    -		{"StoreUintptr", Func, 0},
    -		{"SwapInt32", Func, 2},
    -		{"SwapInt64", Func, 2},
    -		{"SwapPointer", Func, 2},
    -		{"SwapUint32", Func, 2},
    -		{"SwapUint64", Func, 2},
    -		{"SwapUintptr", Func, 2},
    -		{"Uint32", Type, 19},
    -		{"Uint64", Type, 19},
    -		{"Uintptr", Type, 19},
    -		{"Value", Type, 4},
    +		{"(*Bool).CompareAndSwap", Method, 19, ""},
    +		{"(*Bool).Load", Method, 19, ""},
    +		{"(*Bool).Store", Method, 19, ""},
    +		{"(*Bool).Swap", Method, 19, ""},
    +		{"(*Int32).Add", Method, 19, ""},
    +		{"(*Int32).And", Method, 23, ""},
    +		{"(*Int32).CompareAndSwap", Method, 19, ""},
    +		{"(*Int32).Load", Method, 19, ""},
    +		{"(*Int32).Or", Method, 23, ""},
    +		{"(*Int32).Store", Method, 19, ""},
    +		{"(*Int32).Swap", Method, 19, ""},
    +		{"(*Int64).Add", Method, 19, ""},
    +		{"(*Int64).And", Method, 23, ""},
    +		{"(*Int64).CompareAndSwap", Method, 19, ""},
    +		{"(*Int64).Load", Method, 19, ""},
    +		{"(*Int64).Or", Method, 23, ""},
    +		{"(*Int64).Store", Method, 19, ""},
    +		{"(*Int64).Swap", Method, 19, ""},
    +		{"(*Pointer).CompareAndSwap", Method, 19, ""},
    +		{"(*Pointer).Load", Method, 19, ""},
    +		{"(*Pointer).Store", Method, 19, ""},
    +		{"(*Pointer).Swap", Method, 19, ""},
    +		{"(*Uint32).Add", Method, 19, ""},
    +		{"(*Uint32).And", Method, 23, ""},
    +		{"(*Uint32).CompareAndSwap", Method, 19, ""},
    +		{"(*Uint32).Load", Method, 19, ""},
    +		{"(*Uint32).Or", Method, 23, ""},
    +		{"(*Uint32).Store", Method, 19, ""},
    +		{"(*Uint32).Swap", Method, 19, ""},
    +		{"(*Uint64).Add", Method, 19, ""},
    +		{"(*Uint64).And", Method, 23, ""},
    +		{"(*Uint64).CompareAndSwap", Method, 19, ""},
    +		{"(*Uint64).Load", Method, 19, ""},
    +		{"(*Uint64).Or", Method, 23, ""},
    +		{"(*Uint64).Store", Method, 19, ""},
    +		{"(*Uint64).Swap", Method, 19, ""},
    +		{"(*Uintptr).Add", Method, 19, ""},
    +		{"(*Uintptr).And", Method, 23, ""},
    +		{"(*Uintptr).CompareAndSwap", Method, 19, ""},
    +		{"(*Uintptr).Load", Method, 19, ""},
    +		{"(*Uintptr).Or", Method, 23, ""},
    +		{"(*Uintptr).Store", Method, 19, ""},
    +		{"(*Uintptr).Swap", Method, 19, ""},
    +		{"(*Value).CompareAndSwap", Method, 17, ""},
    +		{"(*Value).Load", Method, 4, ""},
    +		{"(*Value).Store", Method, 4, ""},
    +		{"(*Value).Swap", Method, 17, ""},
    +		{"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"},
    +		{"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"},
    +		{"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"},
    +		{"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"},
    +		{"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"},
    +		{"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
    +		{"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
    +		{"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
    +		{"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
    +		{"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
    +		{"Bool", Type, 19, ""},
    +		{"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"},
    +		{"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"},
    +		{"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"},
    +		{"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"},
    +		{"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"},
    +		{"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"},
    +		{"Int32", Type, 19, ""},
    +		{"Int64", Type, 19, ""},
    +		{"LoadInt32", Func, 0, "func(addr *int32) (val int32)"},
    +		{"LoadInt64", Func, 0, "func(addr *int64) (val int64)"},
    +		{"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"},
    +		{"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"},
    +		{"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"},
    +		{"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"},
    +		{"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
    +		{"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
    +		{"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
    +		{"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
    +		{"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
    +		{"Pointer", Type, 19, ""},
    +		{"StoreInt32", Func, 0, "func(addr *int32, val int32)"},
    +		{"StoreInt64", Func, 0, "func(addr *int64, val int64)"},
    +		{"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"},
    +		{"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"},
    +		{"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"},
    +		{"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"},
    +		{"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"},
    +		{"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"},
    +		{"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"},
    +		{"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"},
    +		{"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"},
    +		{"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"},
    +		{"Uint32", Type, 19, ""},
    +		{"Uint64", Type, 19, ""},
    +		{"Uintptr", Type, 19, ""},
    +		{"Value", Type, 4, ""},
     	},
     	"syscall": {
    -		{"(*Cmsghdr).SetLen", Method, 0},
    -		{"(*DLL).FindProc", Method, 0},
    -		{"(*DLL).MustFindProc", Method, 0},
    -		{"(*DLL).Release", Method, 0},
    -		{"(*DLLError).Error", Method, 0},
    -		{"(*DLLError).Unwrap", Method, 16},
    -		{"(*Filetime).Nanoseconds", Method, 0},
    -		{"(*Iovec).SetLen", Method, 0},
    -		{"(*LazyDLL).Handle", Method, 0},
    -		{"(*LazyDLL).Load", Method, 0},
    -		{"(*LazyDLL).NewProc", Method, 0},
    -		{"(*LazyProc).Addr", Method, 0},
    -		{"(*LazyProc).Call", Method, 0},
    -		{"(*LazyProc).Find", Method, 0},
    -		{"(*Msghdr).SetControllen", Method, 0},
    -		{"(*Proc).Addr", Method, 0},
    -		{"(*Proc).Call", Method, 0},
    -		{"(*PtraceRegs).PC", Method, 0},
    -		{"(*PtraceRegs).SetPC", Method, 0},
    -		{"(*RawSockaddrAny).Sockaddr", Method, 0},
    -		{"(*SID).Copy", Method, 0},
    -		{"(*SID).Len", Method, 0},
    -		{"(*SID).LookupAccount", Method, 0},
    -		{"(*SID).String", Method, 0},
    -		{"(*Timespec).Nano", Method, 0},
    -		{"(*Timespec).Unix", Method, 0},
    -		{"(*Timeval).Nano", Method, 0},
    -		{"(*Timeval).Nanoseconds", Method, 0},
    -		{"(*Timeval).Unix", Method, 0},
    -		{"(Errno).Error", Method, 0},
    -		{"(Errno).Is", Method, 13},
    -		{"(Errno).Temporary", Method, 0},
    -		{"(Errno).Timeout", Method, 0},
    -		{"(Signal).Signal", Method, 0},
    -		{"(Signal).String", Method, 0},
    -		{"(Token).Close", Method, 0},
    -		{"(Token).GetTokenPrimaryGroup", Method, 0},
    -		{"(Token).GetTokenUser", Method, 0},
    -		{"(Token).GetUserProfileDirectory", Method, 0},
    -		{"(WaitStatus).Continued", Method, 0},
    -		{"(WaitStatus).CoreDump", Method, 0},
    -		{"(WaitStatus).ExitStatus", Method, 0},
    -		{"(WaitStatus).Exited", Method, 0},
    -		{"(WaitStatus).Signal", Method, 0},
    -		{"(WaitStatus).Signaled", Method, 0},
    -		{"(WaitStatus).StopSignal", Method, 0},
    -		{"(WaitStatus).Stopped", Method, 0},
    -		{"(WaitStatus).TrapCause", Method, 0},
    -		{"AF_ALG", Const, 0},
    -		{"AF_APPLETALK", Const, 0},
    -		{"AF_ARP", Const, 0},
    -		{"AF_ASH", Const, 0},
    -		{"AF_ATM", Const, 0},
    -		{"AF_ATMPVC", Const, 0},
    -		{"AF_ATMSVC", Const, 0},
    -		{"AF_AX25", Const, 0},
    -		{"AF_BLUETOOTH", Const, 0},
    -		{"AF_BRIDGE", Const, 0},
    -		{"AF_CAIF", Const, 0},
    -		{"AF_CAN", Const, 0},
    -		{"AF_CCITT", Const, 0},
    -		{"AF_CHAOS", Const, 0},
    -		{"AF_CNT", Const, 0},
    -		{"AF_COIP", Const, 0},
    -		{"AF_DATAKIT", Const, 0},
    -		{"AF_DECnet", Const, 0},
    -		{"AF_DLI", Const, 0},
    -		{"AF_E164", Const, 0},
    -		{"AF_ECMA", Const, 0},
    -		{"AF_ECONET", Const, 0},
    -		{"AF_ENCAP", Const, 1},
    -		{"AF_FILE", Const, 0},
    -		{"AF_HYLINK", Const, 0},
    -		{"AF_IEEE80211", Const, 0},
    -		{"AF_IEEE802154", Const, 0},
    -		{"AF_IMPLINK", Const, 0},
    -		{"AF_INET", Const, 0},
    -		{"AF_INET6", Const, 0},
    -		{"AF_INET6_SDP", Const, 3},
    -		{"AF_INET_SDP", Const, 3},
    -		{"AF_IPX", Const, 0},
    -		{"AF_IRDA", Const, 0},
    -		{"AF_ISDN", Const, 0},
    -		{"AF_ISO", Const, 0},
    -		{"AF_IUCV", Const, 0},
    -		{"AF_KEY", Const, 0},
    -		{"AF_LAT", Const, 0},
    -		{"AF_LINK", Const, 0},
    -		{"AF_LLC", Const, 0},
    -		{"AF_LOCAL", Const, 0},
    -		{"AF_MAX", Const, 0},
    -		{"AF_MPLS", Const, 1},
    -		{"AF_NATM", Const, 0},
    -		{"AF_NDRV", Const, 0},
    -		{"AF_NETBEUI", Const, 0},
    -		{"AF_NETBIOS", Const, 0},
    -		{"AF_NETGRAPH", Const, 0},
    -		{"AF_NETLINK", Const, 0},
    -		{"AF_NETROM", Const, 0},
    -		{"AF_NS", Const, 0},
    -		{"AF_OROUTE", Const, 1},
    -		{"AF_OSI", Const, 0},
    -		{"AF_PACKET", Const, 0},
    -		{"AF_PHONET", Const, 0},
    -		{"AF_PPP", Const, 0},
    -		{"AF_PPPOX", Const, 0},
    -		{"AF_PUP", Const, 0},
    -		{"AF_RDS", Const, 0},
    -		{"AF_RESERVED_36", Const, 0},
    -		{"AF_ROSE", Const, 0},
    -		{"AF_ROUTE", Const, 0},
    -		{"AF_RXRPC", Const, 0},
    -		{"AF_SCLUSTER", Const, 0},
    -		{"AF_SECURITY", Const, 0},
    -		{"AF_SIP", Const, 0},
    -		{"AF_SLOW", Const, 0},
    -		{"AF_SNA", Const, 0},
    -		{"AF_SYSTEM", Const, 0},
    -		{"AF_TIPC", Const, 0},
    -		{"AF_UNIX", Const, 0},
    -		{"AF_UNSPEC", Const, 0},
    -		{"AF_UTUN", Const, 16},
    -		{"AF_VENDOR00", Const, 0},
    -		{"AF_VENDOR01", Const, 0},
    -		{"AF_VENDOR02", Const, 0},
    -		{"AF_VENDOR03", Const, 0},
    -		{"AF_VENDOR04", Const, 0},
    -		{"AF_VENDOR05", Const, 0},
    -		{"AF_VENDOR06", Const, 0},
    -		{"AF_VENDOR07", Const, 0},
    -		{"AF_VENDOR08", Const, 0},
    -		{"AF_VENDOR09", Const, 0},
    -		{"AF_VENDOR10", Const, 0},
    -		{"AF_VENDOR11", Const, 0},
    -		{"AF_VENDOR12", Const, 0},
    -		{"AF_VENDOR13", Const, 0},
    -		{"AF_VENDOR14", Const, 0},
    -		{"AF_VENDOR15", Const, 0},
    -		{"AF_VENDOR16", Const, 0},
    -		{"AF_VENDOR17", Const, 0},
    -		{"AF_VENDOR18", Const, 0},
    -		{"AF_VENDOR19", Const, 0},
    -		{"AF_VENDOR20", Const, 0},
    -		{"AF_VENDOR21", Const, 0},
    -		{"AF_VENDOR22", Const, 0},
    -		{"AF_VENDOR23", Const, 0},
    -		{"AF_VENDOR24", Const, 0},
    -		{"AF_VENDOR25", Const, 0},
    -		{"AF_VENDOR26", Const, 0},
    -		{"AF_VENDOR27", Const, 0},
    -		{"AF_VENDOR28", Const, 0},
    -		{"AF_VENDOR29", Const, 0},
    -		{"AF_VENDOR30", Const, 0},
    -		{"AF_VENDOR31", Const, 0},
    -		{"AF_VENDOR32", Const, 0},
    -		{"AF_VENDOR33", Const, 0},
    -		{"AF_VENDOR34", Const, 0},
    -		{"AF_VENDOR35", Const, 0},
    -		{"AF_VENDOR36", Const, 0},
    -		{"AF_VENDOR37", Const, 0},
    -		{"AF_VENDOR38", Const, 0},
    -		{"AF_VENDOR39", Const, 0},
    -		{"AF_VENDOR40", Const, 0},
    -		{"AF_VENDOR41", Const, 0},
    -		{"AF_VENDOR42", Const, 0},
    -		{"AF_VENDOR43", Const, 0},
    -		{"AF_VENDOR44", Const, 0},
    -		{"AF_VENDOR45", Const, 0},
    -		{"AF_VENDOR46", Const, 0},
    -		{"AF_VENDOR47", Const, 0},
    -		{"AF_WANPIPE", Const, 0},
    -		{"AF_X25", Const, 0},
    -		{"AI_CANONNAME", Const, 1},
    -		{"AI_NUMERICHOST", Const, 1},
    -		{"AI_PASSIVE", Const, 1},
    -		{"APPLICATION_ERROR", Const, 0},
    -		{"ARPHRD_ADAPT", Const, 0},
    -		{"ARPHRD_APPLETLK", Const, 0},
    -		{"ARPHRD_ARCNET", Const, 0},
    -		{"ARPHRD_ASH", Const, 0},
    -		{"ARPHRD_ATM", Const, 0},
    -		{"ARPHRD_AX25", Const, 0},
    -		{"ARPHRD_BIF", Const, 0},
    -		{"ARPHRD_CHAOS", Const, 0},
    -		{"ARPHRD_CISCO", Const, 0},
    -		{"ARPHRD_CSLIP", Const, 0},
    -		{"ARPHRD_CSLIP6", Const, 0},
    -		{"ARPHRD_DDCMP", Const, 0},
    -		{"ARPHRD_DLCI", Const, 0},
    -		{"ARPHRD_ECONET", Const, 0},
    -		{"ARPHRD_EETHER", Const, 0},
    -		{"ARPHRD_ETHER", Const, 0},
    -		{"ARPHRD_EUI64", Const, 0},
    -		{"ARPHRD_FCAL", Const, 0},
    -		{"ARPHRD_FCFABRIC", Const, 0},
    -		{"ARPHRD_FCPL", Const, 0},
    -		{"ARPHRD_FCPP", Const, 0},
    -		{"ARPHRD_FDDI", Const, 0},
    -		{"ARPHRD_FRAD", Const, 0},
    -		{"ARPHRD_FRELAY", Const, 1},
    -		{"ARPHRD_HDLC", Const, 0},
    -		{"ARPHRD_HIPPI", Const, 0},
    -		{"ARPHRD_HWX25", Const, 0},
    -		{"ARPHRD_IEEE1394", Const, 0},
    -		{"ARPHRD_IEEE802", Const, 0},
    -		{"ARPHRD_IEEE80211", Const, 0},
    -		{"ARPHRD_IEEE80211_PRISM", Const, 0},
    -		{"ARPHRD_IEEE80211_RADIOTAP", Const, 0},
    -		{"ARPHRD_IEEE802154", Const, 0},
    -		{"ARPHRD_IEEE802154_PHY", Const, 0},
    -		{"ARPHRD_IEEE802_TR", Const, 0},
    -		{"ARPHRD_INFINIBAND", Const, 0},
    -		{"ARPHRD_IPDDP", Const, 0},
    -		{"ARPHRD_IPGRE", Const, 0},
    -		{"ARPHRD_IRDA", Const, 0},
    -		{"ARPHRD_LAPB", Const, 0},
    -		{"ARPHRD_LOCALTLK", Const, 0},
    -		{"ARPHRD_LOOPBACK", Const, 0},
    -		{"ARPHRD_METRICOM", Const, 0},
    -		{"ARPHRD_NETROM", Const, 0},
    -		{"ARPHRD_NONE", Const, 0},
    -		{"ARPHRD_PIMREG", Const, 0},
    -		{"ARPHRD_PPP", Const, 0},
    -		{"ARPHRD_PRONET", Const, 0},
    -		{"ARPHRD_RAWHDLC", Const, 0},
    -		{"ARPHRD_ROSE", Const, 0},
    -		{"ARPHRD_RSRVD", Const, 0},
    -		{"ARPHRD_SIT", Const, 0},
    -		{"ARPHRD_SKIP", Const, 0},
    -		{"ARPHRD_SLIP", Const, 0},
    -		{"ARPHRD_SLIP6", Const, 0},
    -		{"ARPHRD_STRIP", Const, 1},
    -		{"ARPHRD_TUNNEL", Const, 0},
    -		{"ARPHRD_TUNNEL6", Const, 0},
    -		{"ARPHRD_VOID", Const, 0},
    -		{"ARPHRD_X25", Const, 0},
    -		{"AUTHTYPE_CLIENT", Const, 0},
    -		{"AUTHTYPE_SERVER", Const, 0},
    -		{"Accept", Func, 0},
    -		{"Accept4", Func, 1},
    -		{"AcceptEx", Func, 0},
    -		{"Access", Func, 0},
    -		{"Acct", Func, 0},
    -		{"AddrinfoW", Type, 1},
    -		{"AddrinfoW.Addr", Field, 1},
    -		{"AddrinfoW.Addrlen", Field, 1},
    -		{"AddrinfoW.Canonname", Field, 1},
    -		{"AddrinfoW.Family", Field, 1},
    -		{"AddrinfoW.Flags", Field, 1},
    -		{"AddrinfoW.Next", Field, 1},
    -		{"AddrinfoW.Protocol", Field, 1},
    -		{"AddrinfoW.Socktype", Field, 1},
    -		{"Adjtime", Func, 0},
    -		{"Adjtimex", Func, 0},
    -		{"AllThreadsSyscall", Func, 16},
    -		{"AllThreadsSyscall6", Func, 16},
    -		{"AttachLsf", Func, 0},
    -		{"B0", Const, 0},
    -		{"B1000000", Const, 0},
    -		{"B110", Const, 0},
    -		{"B115200", Const, 0},
    -		{"B1152000", Const, 0},
    -		{"B1200", Const, 0},
    -		{"B134", Const, 0},
    -		{"B14400", Const, 1},
    -		{"B150", Const, 0},
    -		{"B1500000", Const, 0},
    -		{"B1800", Const, 0},
    -		{"B19200", Const, 0},
    -		{"B200", Const, 0},
    -		{"B2000000", Const, 0},
    -		{"B230400", Const, 0},
    -		{"B2400", Const, 0},
    -		{"B2500000", Const, 0},
    -		{"B28800", Const, 1},
    -		{"B300", Const, 0},
    -		{"B3000000", Const, 0},
    -		{"B3500000", Const, 0},
    -		{"B38400", Const, 0},
    -		{"B4000000", Const, 0},
    -		{"B460800", Const, 0},
    -		{"B4800", Const, 0},
    -		{"B50", Const, 0},
    -		{"B500000", Const, 0},
    -		{"B57600", Const, 0},
    -		{"B576000", Const, 0},
    -		{"B600", Const, 0},
    -		{"B7200", Const, 1},
    -		{"B75", Const, 0},
    -		{"B76800", Const, 1},
    -		{"B921600", Const, 0},
    -		{"B9600", Const, 0},
    -		{"BASE_PROTOCOL", Const, 2},
    -		{"BIOCFEEDBACK", Const, 0},
    -		{"BIOCFLUSH", Const, 0},
    -		{"BIOCGBLEN", Const, 0},
    -		{"BIOCGDIRECTION", Const, 0},
    -		{"BIOCGDIRFILT", Const, 1},
    -		{"BIOCGDLT", Const, 0},
    -		{"BIOCGDLTLIST", Const, 0},
    -		{"BIOCGETBUFMODE", Const, 0},
    -		{"BIOCGETIF", Const, 0},
    -		{"BIOCGETZMAX", Const, 0},
    -		{"BIOCGFEEDBACK", Const, 1},
    -		{"BIOCGFILDROP", Const, 1},
    -		{"BIOCGHDRCMPLT", Const, 0},
    -		{"BIOCGRSIG", Const, 0},
    -		{"BIOCGRTIMEOUT", Const, 0},
    -		{"BIOCGSEESENT", Const, 0},
    -		{"BIOCGSTATS", Const, 0},
    -		{"BIOCGSTATSOLD", Const, 1},
    -		{"BIOCGTSTAMP", Const, 1},
    -		{"BIOCIMMEDIATE", Const, 0},
    -		{"BIOCLOCK", Const, 0},
    -		{"BIOCPROMISC", Const, 0},
    -		{"BIOCROTZBUF", Const, 0},
    -		{"BIOCSBLEN", Const, 0},
    -		{"BIOCSDIRECTION", Const, 0},
    -		{"BIOCSDIRFILT", Const, 1},
    -		{"BIOCSDLT", Const, 0},
    -		{"BIOCSETBUFMODE", Const, 0},
    -		{"BIOCSETF", Const, 0},
    -		{"BIOCSETFNR", Const, 0},
    -		{"BIOCSETIF", Const, 0},
    -		{"BIOCSETWF", Const, 0},
    -		{"BIOCSETZBUF", Const, 0},
    -		{"BIOCSFEEDBACK", Const, 1},
    -		{"BIOCSFILDROP", Const, 1},
    -		{"BIOCSHDRCMPLT", Const, 0},
    -		{"BIOCSRSIG", Const, 0},
    -		{"BIOCSRTIMEOUT", Const, 0},
    -		{"BIOCSSEESENT", Const, 0},
    -		{"BIOCSTCPF", Const, 1},
    -		{"BIOCSTSTAMP", Const, 1},
    -		{"BIOCSUDPF", Const, 1},
    -		{"BIOCVERSION", Const, 0},
    -		{"BPF_A", Const, 0},
    -		{"BPF_ABS", Const, 0},
    -		{"BPF_ADD", Const, 0},
    -		{"BPF_ALIGNMENT", Const, 0},
    -		{"BPF_ALIGNMENT32", Const, 1},
    -		{"BPF_ALU", Const, 0},
    -		{"BPF_AND", Const, 0},
    -		{"BPF_B", Const, 0},
    -		{"BPF_BUFMODE_BUFFER", Const, 0},
    -		{"BPF_BUFMODE_ZBUF", Const, 0},
    -		{"BPF_DFLTBUFSIZE", Const, 1},
    -		{"BPF_DIRECTION_IN", Const, 1},
    -		{"BPF_DIRECTION_OUT", Const, 1},
    -		{"BPF_DIV", Const, 0},
    -		{"BPF_H", Const, 0},
    -		{"BPF_IMM", Const, 0},
    -		{"BPF_IND", Const, 0},
    -		{"BPF_JA", Const, 0},
    -		{"BPF_JEQ", Const, 0},
    -		{"BPF_JGE", Const, 0},
    -		{"BPF_JGT", Const, 0},
    -		{"BPF_JMP", Const, 0},
    -		{"BPF_JSET", Const, 0},
    -		{"BPF_K", Const, 0},
    -		{"BPF_LD", Const, 0},
    -		{"BPF_LDX", Const, 0},
    -		{"BPF_LEN", Const, 0},
    -		{"BPF_LSH", Const, 0},
    -		{"BPF_MAJOR_VERSION", Const, 0},
    -		{"BPF_MAXBUFSIZE", Const, 0},
    -		{"BPF_MAXINSNS", Const, 0},
    -		{"BPF_MEM", Const, 0},
    -		{"BPF_MEMWORDS", Const, 0},
    -		{"BPF_MINBUFSIZE", Const, 0},
    -		{"BPF_MINOR_VERSION", Const, 0},
    -		{"BPF_MISC", Const, 0},
    -		{"BPF_MSH", Const, 0},
    -		{"BPF_MUL", Const, 0},
    -		{"BPF_NEG", Const, 0},
    -		{"BPF_OR", Const, 0},
    -		{"BPF_RELEASE", Const, 0},
    -		{"BPF_RET", Const, 0},
    -		{"BPF_RSH", Const, 0},
    -		{"BPF_ST", Const, 0},
    -		{"BPF_STX", Const, 0},
    -		{"BPF_SUB", Const, 0},
    -		{"BPF_TAX", Const, 0},
    -		{"BPF_TXA", Const, 0},
    -		{"BPF_T_BINTIME", Const, 1},
    -		{"BPF_T_BINTIME_FAST", Const, 1},
    -		{"BPF_T_BINTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_FAST", Const, 1},
    -		{"BPF_T_FLAG_MASK", Const, 1},
    -		{"BPF_T_FORMAT_MASK", Const, 1},
    -		{"BPF_T_MICROTIME", Const, 1},
    -		{"BPF_T_MICROTIME_FAST", Const, 1},
    -		{"BPF_T_MICROTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_MONOTONIC", Const, 1},
    -		{"BPF_T_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_NANOTIME", Const, 1},
    -		{"BPF_T_NANOTIME_FAST", Const, 1},
    -		{"BPF_T_NANOTIME_MONOTONIC", Const, 1},
    -		{"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1},
    -		{"BPF_T_NONE", Const, 1},
    -		{"BPF_T_NORMAL", Const, 1},
    -		{"BPF_W", Const, 0},
    -		{"BPF_X", Const, 0},
    -		{"BRKINT", Const, 0},
    -		{"Bind", Func, 0},
    -		{"BindToDevice", Func, 0},
    -		{"BpfBuflen", Func, 0},
    -		{"BpfDatalink", Func, 0},
    -		{"BpfHdr", Type, 0},
    -		{"BpfHdr.Caplen", Field, 0},
    -		{"BpfHdr.Datalen", Field, 0},
    -		{"BpfHdr.Hdrlen", Field, 0},
    -		{"BpfHdr.Pad_cgo_0", Field, 0},
    -		{"BpfHdr.Tstamp", Field, 0},
    -		{"BpfHeadercmpl", Func, 0},
    -		{"BpfInsn", Type, 0},
    -		{"BpfInsn.Code", Field, 0},
    -		{"BpfInsn.Jf", Field, 0},
    -		{"BpfInsn.Jt", Field, 0},
    -		{"BpfInsn.K", Field, 0},
    -		{"BpfInterface", Func, 0},
    -		{"BpfJump", Func, 0},
    -		{"BpfProgram", Type, 0},
    -		{"BpfProgram.Insns", Field, 0},
    -		{"BpfProgram.Len", Field, 0},
    -		{"BpfProgram.Pad_cgo_0", Field, 0},
    -		{"BpfStat", Type, 0},
    -		{"BpfStat.Capt", Field, 2},
    -		{"BpfStat.Drop", Field, 0},
    -		{"BpfStat.Padding", Field, 2},
    -		{"BpfStat.Recv", Field, 0},
    -		{"BpfStats", Func, 0},
    -		{"BpfStmt", Func, 0},
    -		{"BpfTimeout", Func, 0},
    -		{"BpfTimeval", Type, 2},
    -		{"BpfTimeval.Sec", Field, 2},
    -		{"BpfTimeval.Usec", Field, 2},
    -		{"BpfVersion", Type, 0},
    -		{"BpfVersion.Major", Field, 0},
    -		{"BpfVersion.Minor", Field, 0},
    -		{"BpfZbuf", Type, 0},
    -		{"BpfZbuf.Bufa", Field, 0},
    -		{"BpfZbuf.Bufb", Field, 0},
    -		{"BpfZbuf.Buflen", Field, 0},
    -		{"BpfZbufHeader", Type, 0},
    -		{"BpfZbufHeader.Kernel_gen", Field, 0},
    -		{"BpfZbufHeader.Kernel_len", Field, 0},
    -		{"BpfZbufHeader.User_gen", Field, 0},
    -		{"BpfZbufHeader.X_bzh_pad", Field, 0},
    -		{"ByHandleFileInformation", Type, 0},
    -		{"ByHandleFileInformation.CreationTime", Field, 0},
    -		{"ByHandleFileInformation.FileAttributes", Field, 0},
    -		{"ByHandleFileInformation.FileIndexHigh", Field, 0},
    -		{"ByHandleFileInformation.FileIndexLow", Field, 0},
    -		{"ByHandleFileInformation.FileSizeHigh", Field, 0},
    -		{"ByHandleFileInformation.FileSizeLow", Field, 0},
    -		{"ByHandleFileInformation.LastAccessTime", Field, 0},
    -		{"ByHandleFileInformation.LastWriteTime", Field, 0},
    -		{"ByHandleFileInformation.NumberOfLinks", Field, 0},
    -		{"ByHandleFileInformation.VolumeSerialNumber", Field, 0},
    -		{"BytePtrFromString", Func, 1},
    -		{"ByteSliceFromString", Func, 1},
    -		{"CCR0_FLUSH", Const, 1},
    -		{"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0},
    -		{"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0},
    -		{"CERT_CHAIN_POLICY_BASE", Const, 0},
    -		{"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0},
    -		{"CERT_CHAIN_POLICY_EV", Const, 0},
    -		{"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0},
    -		{"CERT_CHAIN_POLICY_NT_AUTH", Const, 0},
    -		{"CERT_CHAIN_POLICY_SSL", Const, 0},
    -		{"CERT_E_CN_NO_MATCH", Const, 0},
    -		{"CERT_E_EXPIRED", Const, 0},
    -		{"CERT_E_PURPOSE", Const, 0},
    -		{"CERT_E_ROLE", Const, 0},
    -		{"CERT_E_UNTRUSTEDROOT", Const, 0},
    -		{"CERT_STORE_ADD_ALWAYS", Const, 0},
    -		{"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0},
    -		{"CERT_STORE_PROV_MEMORY", Const, 0},
    -		{"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0},
    -		{"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0},
    -		{"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_INVALID_EXTENSION", Const, 0},
    -		{"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0},
    -		{"CERT_TRUST_IS_CYCLIC", Const, 0},
    -		{"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0},
    -		{"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0},
    -		{"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0},
    -		{"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0},
    -		{"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0},
    -		{"CERT_TRUST_IS_REVOKED", Const, 0},
    -		{"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0},
    -		{"CERT_TRUST_NO_ERROR", Const, 0},
    -		{"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0},
    -		{"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0},
    -		{"CFLUSH", Const, 1},
    -		{"CLOCAL", Const, 0},
    -		{"CLONE_CHILD_CLEARTID", Const, 2},
    -		{"CLONE_CHILD_SETTID", Const, 2},
    -		{"CLONE_CLEAR_SIGHAND", Const, 20},
    -		{"CLONE_CSIGNAL", Const, 3},
    -		{"CLONE_DETACHED", Const, 2},
    -		{"CLONE_FILES", Const, 2},
    -		{"CLONE_FS", Const, 2},
    -		{"CLONE_INTO_CGROUP", Const, 20},
    -		{"CLONE_IO", Const, 2},
    -		{"CLONE_NEWCGROUP", Const, 20},
    -		{"CLONE_NEWIPC", Const, 2},
    -		{"CLONE_NEWNET", Const, 2},
    -		{"CLONE_NEWNS", Const, 2},
    -		{"CLONE_NEWPID", Const, 2},
    -		{"CLONE_NEWTIME", Const, 20},
    -		{"CLONE_NEWUSER", Const, 2},
    -		{"CLONE_NEWUTS", Const, 2},
    -		{"CLONE_PARENT", Const, 2},
    -		{"CLONE_PARENT_SETTID", Const, 2},
    -		{"CLONE_PID", Const, 3},
    -		{"CLONE_PIDFD", Const, 20},
    -		{"CLONE_PTRACE", Const, 2},
    -		{"CLONE_SETTLS", Const, 2},
    -		{"CLONE_SIGHAND", Const, 2},
    -		{"CLONE_SYSVSEM", Const, 2},
    -		{"CLONE_THREAD", Const, 2},
    -		{"CLONE_UNTRACED", Const, 2},
    -		{"CLONE_VFORK", Const, 2},
    -		{"CLONE_VM", Const, 2},
    -		{"CPUID_CFLUSH", Const, 1},
    -		{"CREAD", Const, 0},
    -		{"CREATE_ALWAYS", Const, 0},
    -		{"CREATE_NEW", Const, 0},
    -		{"CREATE_NEW_PROCESS_GROUP", Const, 1},
    -		{"CREATE_UNICODE_ENVIRONMENT", Const, 0},
    -		{"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0},
    -		{"CRYPT_DELETEKEYSET", Const, 0},
    -		{"CRYPT_MACHINE_KEYSET", Const, 0},
    -		{"CRYPT_NEWKEYSET", Const, 0},
    -		{"CRYPT_SILENT", Const, 0},
    -		{"CRYPT_VERIFYCONTEXT", Const, 0},
    -		{"CS5", Const, 0},
    -		{"CS6", Const, 0},
    -		{"CS7", Const, 0},
    -		{"CS8", Const, 0},
    -		{"CSIZE", Const, 0},
    -		{"CSTART", Const, 1},
    -		{"CSTATUS", Const, 1},
    -		{"CSTOP", Const, 1},
    -		{"CSTOPB", Const, 0},
    -		{"CSUSP", Const, 1},
    -		{"CTL_MAXNAME", Const, 0},
    -		{"CTL_NET", Const, 0},
    -		{"CTL_QUERY", Const, 1},
    -		{"CTRL_BREAK_EVENT", Const, 1},
    -		{"CTRL_CLOSE_EVENT", Const, 14},
    -		{"CTRL_C_EVENT", Const, 1},
    -		{"CTRL_LOGOFF_EVENT", Const, 14},
    -		{"CTRL_SHUTDOWN_EVENT", Const, 14},
    -		{"CancelIo", Func, 0},
    -		{"CancelIoEx", Func, 1},
    -		{"CertAddCertificateContextToStore", Func, 0},
    -		{"CertChainContext", Type, 0},
    -		{"CertChainContext.ChainCount", Field, 0},
    -		{"CertChainContext.Chains", Field, 0},
    -		{"CertChainContext.HasRevocationFreshnessTime", Field, 0},
    -		{"CertChainContext.LowerQualityChainCount", Field, 0},
    -		{"CertChainContext.LowerQualityChains", Field, 0},
    -		{"CertChainContext.RevocationFreshnessTime", Field, 0},
    -		{"CertChainContext.Size", Field, 0},
    -		{"CertChainContext.TrustStatus", Field, 0},
    -		{"CertChainElement", Type, 0},
    -		{"CertChainElement.ApplicationUsage", Field, 0},
    -		{"CertChainElement.CertContext", Field, 0},
    -		{"CertChainElement.ExtendedErrorInfo", Field, 0},
    -		{"CertChainElement.IssuanceUsage", Field, 0},
    -		{"CertChainElement.RevocationInfo", Field, 0},
    -		{"CertChainElement.Size", Field, 0},
    -		{"CertChainElement.TrustStatus", Field, 0},
    -		{"CertChainPara", Type, 0},
    -		{"CertChainPara.CacheResync", Field, 0},
    -		{"CertChainPara.CheckRevocationFreshnessTime", Field, 0},
    -		{"CertChainPara.RequestedUsage", Field, 0},
    -		{"CertChainPara.RequstedIssuancePolicy", Field, 0},
    -		{"CertChainPara.RevocationFreshnessTime", Field, 0},
    -		{"CertChainPara.Size", Field, 0},
    -		{"CertChainPara.URLRetrievalTimeout", Field, 0},
    -		{"CertChainPolicyPara", Type, 0},
    -		{"CertChainPolicyPara.ExtraPolicyPara", Field, 0},
    -		{"CertChainPolicyPara.Flags", Field, 0},
    -		{"CertChainPolicyPara.Size", Field, 0},
    -		{"CertChainPolicyStatus", Type, 0},
    -		{"CertChainPolicyStatus.ChainIndex", Field, 0},
    -		{"CertChainPolicyStatus.ElementIndex", Field, 0},
    -		{"CertChainPolicyStatus.Error", Field, 0},
    -		{"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0},
    -		{"CertChainPolicyStatus.Size", Field, 0},
    -		{"CertCloseStore", Func, 0},
    -		{"CertContext", Type, 0},
    -		{"CertContext.CertInfo", Field, 0},
    -		{"CertContext.EncodedCert", Field, 0},
    -		{"CertContext.EncodingType", Field, 0},
    -		{"CertContext.Length", Field, 0},
    -		{"CertContext.Store", Field, 0},
    -		{"CertCreateCertificateContext", Func, 0},
    -		{"CertEnhKeyUsage", Type, 0},
    -		{"CertEnhKeyUsage.Length", Field, 0},
    -		{"CertEnhKeyUsage.UsageIdentifiers", Field, 0},
    -		{"CertEnumCertificatesInStore", Func, 0},
    -		{"CertFreeCertificateChain", Func, 0},
    -		{"CertFreeCertificateContext", Func, 0},
    -		{"CertGetCertificateChain", Func, 0},
    -		{"CertInfo", Type, 11},
    -		{"CertOpenStore", Func, 0},
    -		{"CertOpenSystemStore", Func, 0},
    -		{"CertRevocationCrlInfo", Type, 11},
    -		{"CertRevocationInfo", Type, 0},
    -		{"CertRevocationInfo.CrlInfo", Field, 0},
    -		{"CertRevocationInfo.FreshnessTime", Field, 0},
    -		{"CertRevocationInfo.HasFreshnessTime", Field, 0},
    -		{"CertRevocationInfo.OidSpecificInfo", Field, 0},
    -		{"CertRevocationInfo.RevocationOid", Field, 0},
    -		{"CertRevocationInfo.RevocationResult", Field, 0},
    -		{"CertRevocationInfo.Size", Field, 0},
    -		{"CertSimpleChain", Type, 0},
    -		{"CertSimpleChain.Elements", Field, 0},
    -		{"CertSimpleChain.HasRevocationFreshnessTime", Field, 0},
    -		{"CertSimpleChain.NumElements", Field, 0},
    -		{"CertSimpleChain.RevocationFreshnessTime", Field, 0},
    -		{"CertSimpleChain.Size", Field, 0},
    -		{"CertSimpleChain.TrustListInfo", Field, 0},
    -		{"CertSimpleChain.TrustStatus", Field, 0},
    -		{"CertTrustListInfo", Type, 11},
    -		{"CertTrustStatus", Type, 0},
    -		{"CertTrustStatus.ErrorStatus", Field, 0},
    -		{"CertTrustStatus.InfoStatus", Field, 0},
    -		{"CertUsageMatch", Type, 0},
    -		{"CertUsageMatch.Type", Field, 0},
    -		{"CertUsageMatch.Usage", Field, 0},
    -		{"CertVerifyCertificateChainPolicy", Func, 0},
    -		{"Chdir", Func, 0},
    -		{"CheckBpfVersion", Func, 0},
    -		{"Chflags", Func, 0},
    -		{"Chmod", Func, 0},
    -		{"Chown", Func, 0},
    -		{"Chroot", Func, 0},
    -		{"Clearenv", Func, 0},
    -		{"Close", Func, 0},
    -		{"CloseHandle", Func, 0},
    -		{"CloseOnExec", Func, 0},
    -		{"Closesocket", Func, 0},
    -		{"CmsgLen", Func, 0},
    -		{"CmsgSpace", Func, 0},
    -		{"Cmsghdr", Type, 0},
    -		{"Cmsghdr.Len", Field, 0},
    -		{"Cmsghdr.Level", Field, 0},
    -		{"Cmsghdr.Type", Field, 0},
    -		{"Cmsghdr.X__cmsg_data", Field, 0},
    -		{"CommandLineToArgv", Func, 0},
    -		{"ComputerName", Func, 0},
    -		{"Conn", Type, 9},
    -		{"Connect", Func, 0},
    -		{"ConnectEx", Func, 1},
    -		{"ConvertSidToStringSid", Func, 0},
    -		{"ConvertStringSidToSid", Func, 0},
    -		{"CopySid", Func, 0},
    -		{"Creat", Func, 0},
    -		{"CreateDirectory", Func, 0},
    -		{"CreateFile", Func, 0},
    -		{"CreateFileMapping", Func, 0},
    -		{"CreateHardLink", Func, 4},
    -		{"CreateIoCompletionPort", Func, 0},
    -		{"CreatePipe", Func, 0},
    -		{"CreateProcess", Func, 0},
    -		{"CreateProcessAsUser", Func, 10},
    -		{"CreateSymbolicLink", Func, 4},
    -		{"CreateToolhelp32Snapshot", Func, 4},
    -		{"Credential", Type, 0},
    -		{"Credential.Gid", Field, 0},
    -		{"Credential.Groups", Field, 0},
    -		{"Credential.NoSetGroups", Field, 9},
    -		{"Credential.Uid", Field, 0},
    -		{"CryptAcquireContext", Func, 0},
    -		{"CryptGenRandom", Func, 0},
    -		{"CryptReleaseContext", Func, 0},
    -		{"DIOCBSFLUSH", Const, 1},
    -		{"DIOCOSFPFLUSH", Const, 1},
    -		{"DLL", Type, 0},
    -		{"DLL.Handle", Field, 0},
    -		{"DLL.Name", Field, 0},
    -		{"DLLError", Type, 0},
    -		{"DLLError.Err", Field, 0},
    -		{"DLLError.Msg", Field, 0},
    -		{"DLLError.ObjName", Field, 0},
    -		{"DLT_A429", Const, 0},
    -		{"DLT_A653_ICM", Const, 0},
    -		{"DLT_AIRONET_HEADER", Const, 0},
    -		{"DLT_AOS", Const, 1},
    -		{"DLT_APPLE_IP_OVER_IEEE1394", Const, 0},
    -		{"DLT_ARCNET", Const, 0},
    -		{"DLT_ARCNET_LINUX", Const, 0},
    -		{"DLT_ATM_CLIP", Const, 0},
    -		{"DLT_ATM_RFC1483", Const, 0},
    -		{"DLT_AURORA", Const, 0},
    -		{"DLT_AX25", Const, 0},
    -		{"DLT_AX25_KISS", Const, 0},
    -		{"DLT_BACNET_MS_TP", Const, 0},
    -		{"DLT_BLUETOOTH_HCI_H4", Const, 0},
    -		{"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0},
    -		{"DLT_CAN20B", Const, 0},
    -		{"DLT_CAN_SOCKETCAN", Const, 1},
    -		{"DLT_CHAOS", Const, 0},
    -		{"DLT_CHDLC", Const, 0},
    -		{"DLT_CISCO_IOS", Const, 0},
    -		{"DLT_C_HDLC", Const, 0},
    -		{"DLT_C_HDLC_WITH_DIR", Const, 0},
    -		{"DLT_DBUS", Const, 1},
    -		{"DLT_DECT", Const, 1},
    -		{"DLT_DOCSIS", Const, 0},
    -		{"DLT_DVB_CI", Const, 1},
    -		{"DLT_ECONET", Const, 0},
    -		{"DLT_EN10MB", Const, 0},
    -		{"DLT_EN3MB", Const, 0},
    -		{"DLT_ENC", Const, 0},
    -		{"DLT_ERF", Const, 0},
    -		{"DLT_ERF_ETH", Const, 0},
    -		{"DLT_ERF_POS", Const, 0},
    -		{"DLT_FC_2", Const, 1},
    -		{"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1},
    -		{"DLT_FDDI", Const, 0},
    -		{"DLT_FLEXRAY", Const, 0},
    -		{"DLT_FRELAY", Const, 0},
    -		{"DLT_FRELAY_WITH_DIR", Const, 0},
    -		{"DLT_GCOM_SERIAL", Const, 0},
    -		{"DLT_GCOM_T1E1", Const, 0},
    -		{"DLT_GPF_F", Const, 0},
    -		{"DLT_GPF_T", Const, 0},
    -		{"DLT_GPRS_LLC", Const, 0},
    -		{"DLT_GSMTAP_ABIS", Const, 1},
    -		{"DLT_GSMTAP_UM", Const, 1},
    -		{"DLT_HDLC", Const, 1},
    -		{"DLT_HHDLC", Const, 0},
    -		{"DLT_HIPPI", Const, 1},
    -		{"DLT_IBM_SN", Const, 0},
    -		{"DLT_IBM_SP", Const, 0},
    -		{"DLT_IEEE802", Const, 0},
    -		{"DLT_IEEE802_11", Const, 0},
    -		{"DLT_IEEE802_11_RADIO", Const, 0},
    -		{"DLT_IEEE802_11_RADIO_AVS", Const, 0},
    -		{"DLT_IEEE802_15_4", Const, 0},
    -		{"DLT_IEEE802_15_4_LINUX", Const, 0},
    -		{"DLT_IEEE802_15_4_NOFCS", Const, 1},
    -		{"DLT_IEEE802_15_4_NONASK_PHY", Const, 0},
    -		{"DLT_IEEE802_16_MAC_CPS", Const, 0},
    -		{"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0},
    -		{"DLT_IPFILTER", Const, 0},
    -		{"DLT_IPMB", Const, 0},
    -		{"DLT_IPMB_LINUX", Const, 0},
    -		{"DLT_IPNET", Const, 1},
    -		{"DLT_IPOIB", Const, 1},
    -		{"DLT_IPV4", Const, 1},
    -		{"DLT_IPV6", Const, 1},
    -		{"DLT_IP_OVER_FC", Const, 0},
    -		{"DLT_JUNIPER_ATM1", Const, 0},
    -		{"DLT_JUNIPER_ATM2", Const, 0},
    -		{"DLT_JUNIPER_ATM_CEMIC", Const, 1},
    -		{"DLT_JUNIPER_CHDLC", Const, 0},
    -		{"DLT_JUNIPER_ES", Const, 0},
    -		{"DLT_JUNIPER_ETHER", Const, 0},
    -		{"DLT_JUNIPER_FIBRECHANNEL", Const, 1},
    -		{"DLT_JUNIPER_FRELAY", Const, 0},
    -		{"DLT_JUNIPER_GGSN", Const, 0},
    -		{"DLT_JUNIPER_ISM", Const, 0},
    -		{"DLT_JUNIPER_MFR", Const, 0},
    -		{"DLT_JUNIPER_MLFR", Const, 0},
    -		{"DLT_JUNIPER_MLPPP", Const, 0},
    -		{"DLT_JUNIPER_MONITOR", Const, 0},
    -		{"DLT_JUNIPER_PIC_PEER", Const, 0},
    -		{"DLT_JUNIPER_PPP", Const, 0},
    -		{"DLT_JUNIPER_PPPOE", Const, 0},
    -		{"DLT_JUNIPER_PPPOE_ATM", Const, 0},
    -		{"DLT_JUNIPER_SERVICES", Const, 0},
    -		{"DLT_JUNIPER_SRX_E2E", Const, 1},
    -		{"DLT_JUNIPER_ST", Const, 0},
    -		{"DLT_JUNIPER_VP", Const, 0},
    -		{"DLT_JUNIPER_VS", Const, 1},
    -		{"DLT_LAPB_WITH_DIR", Const, 0},
    -		{"DLT_LAPD", Const, 0},
    -		{"DLT_LIN", Const, 0},
    -		{"DLT_LINUX_EVDEV", Const, 1},
    -		{"DLT_LINUX_IRDA", Const, 0},
    -		{"DLT_LINUX_LAPD", Const, 0},
    -		{"DLT_LINUX_PPP_WITHDIRECTION", Const, 0},
    -		{"DLT_LINUX_SLL", Const, 0},
    -		{"DLT_LOOP", Const, 0},
    -		{"DLT_LTALK", Const, 0},
    -		{"DLT_MATCHING_MAX", Const, 1},
    -		{"DLT_MATCHING_MIN", Const, 1},
    -		{"DLT_MFR", Const, 0},
    -		{"DLT_MOST", Const, 0},
    -		{"DLT_MPEG_2_TS", Const, 1},
    -		{"DLT_MPLS", Const, 1},
    -		{"DLT_MTP2", Const, 0},
    -		{"DLT_MTP2_WITH_PHDR", Const, 0},
    -		{"DLT_MTP3", Const, 0},
    -		{"DLT_MUX27010", Const, 1},
    -		{"DLT_NETANALYZER", Const, 1},
    -		{"DLT_NETANALYZER_TRANSPARENT", Const, 1},
    -		{"DLT_NFC_LLCP", Const, 1},
    -		{"DLT_NFLOG", Const, 1},
    -		{"DLT_NG40", Const, 1},
    -		{"DLT_NULL", Const, 0},
    -		{"DLT_PCI_EXP", Const, 0},
    -		{"DLT_PFLOG", Const, 0},
    -		{"DLT_PFSYNC", Const, 0},
    -		{"DLT_PPI", Const, 0},
    -		{"DLT_PPP", Const, 0},
    -		{"DLT_PPP_BSDOS", Const, 0},
    -		{"DLT_PPP_ETHER", Const, 0},
    -		{"DLT_PPP_PPPD", Const, 0},
    -		{"DLT_PPP_SERIAL", Const, 0},
    -		{"DLT_PPP_WITH_DIR", Const, 0},
    -		{"DLT_PPP_WITH_DIRECTION", Const, 0},
    -		{"DLT_PRISM_HEADER", Const, 0},
    -		{"DLT_PRONET", Const, 0},
    -		{"DLT_RAIF1", Const, 0},
    -		{"DLT_RAW", Const, 0},
    -		{"DLT_RAWAF_MASK", Const, 1},
    -		{"DLT_RIO", Const, 0},
    -		{"DLT_SCCP", Const, 0},
    -		{"DLT_SITA", Const, 0},
    -		{"DLT_SLIP", Const, 0},
    -		{"DLT_SLIP_BSDOS", Const, 0},
    -		{"DLT_STANAG_5066_D_PDU", Const, 1},
    -		{"DLT_SUNATM", Const, 0},
    -		{"DLT_SYMANTEC_FIREWALL", Const, 0},
    -		{"DLT_TZSP", Const, 0},
    -		{"DLT_USB", Const, 0},
    -		{"DLT_USB_LINUX", Const, 0},
    -		{"DLT_USB_LINUX_MMAPPED", Const, 1},
    -		{"DLT_USER0", Const, 0},
    -		{"DLT_USER1", Const, 0},
    -		{"DLT_USER10", Const, 0},
    -		{"DLT_USER11", Const, 0},
    -		{"DLT_USER12", Const, 0},
    -		{"DLT_USER13", Const, 0},
    -		{"DLT_USER14", Const, 0},
    -		{"DLT_USER15", Const, 0},
    -		{"DLT_USER2", Const, 0},
    -		{"DLT_USER3", Const, 0},
    -		{"DLT_USER4", Const, 0},
    -		{"DLT_USER5", Const, 0},
    -		{"DLT_USER6", Const, 0},
    -		{"DLT_USER7", Const, 0},
    -		{"DLT_USER8", Const, 0},
    -		{"DLT_USER9", Const, 0},
    -		{"DLT_WIHART", Const, 1},
    -		{"DLT_X2E_SERIAL", Const, 0},
    -		{"DLT_X2E_XORAYA", Const, 0},
    -		{"DNSMXData", Type, 0},
    -		{"DNSMXData.NameExchange", Field, 0},
    -		{"DNSMXData.Pad", Field, 0},
    -		{"DNSMXData.Preference", Field, 0},
    -		{"DNSPTRData", Type, 0},
    -		{"DNSPTRData.Host", Field, 0},
    -		{"DNSRecord", Type, 0},
    -		{"DNSRecord.Data", Field, 0},
    -		{"DNSRecord.Dw", Field, 0},
    -		{"DNSRecord.Length", Field, 0},
    -		{"DNSRecord.Name", Field, 0},
    -		{"DNSRecord.Next", Field, 0},
    -		{"DNSRecord.Reserved", Field, 0},
    -		{"DNSRecord.Ttl", Field, 0},
    -		{"DNSRecord.Type", Field, 0},
    -		{"DNSSRVData", Type, 0},
    -		{"DNSSRVData.Pad", Field, 0},
    -		{"DNSSRVData.Port", Field, 0},
    -		{"DNSSRVData.Priority", Field, 0},
    -		{"DNSSRVData.Target", Field, 0},
    -		{"DNSSRVData.Weight", Field, 0},
    -		{"DNSTXTData", Type, 0},
    -		{"DNSTXTData.StringArray", Field, 0},
    -		{"DNSTXTData.StringCount", Field, 0},
    -		{"DNS_INFO_NO_RECORDS", Const, 4},
    -		{"DNS_TYPE_A", Const, 0},
    -		{"DNS_TYPE_A6", Const, 0},
    -		{"DNS_TYPE_AAAA", Const, 0},
    -		{"DNS_TYPE_ADDRS", Const, 0},
    -		{"DNS_TYPE_AFSDB", Const, 0},
    -		{"DNS_TYPE_ALL", Const, 0},
    -		{"DNS_TYPE_ANY", Const, 0},
    -		{"DNS_TYPE_ATMA", Const, 0},
    -		{"DNS_TYPE_AXFR", Const, 0},
    -		{"DNS_TYPE_CERT", Const, 0},
    -		{"DNS_TYPE_CNAME", Const, 0},
    -		{"DNS_TYPE_DHCID", Const, 0},
    -		{"DNS_TYPE_DNAME", Const, 0},
    -		{"DNS_TYPE_DNSKEY", Const, 0},
    -		{"DNS_TYPE_DS", Const, 0},
    -		{"DNS_TYPE_EID", Const, 0},
    -		{"DNS_TYPE_GID", Const, 0},
    -		{"DNS_TYPE_GPOS", Const, 0},
    -		{"DNS_TYPE_HINFO", Const, 0},
    -		{"DNS_TYPE_ISDN", Const, 0},
    -		{"DNS_TYPE_IXFR", Const, 0},
    -		{"DNS_TYPE_KEY", Const, 0},
    -		{"DNS_TYPE_KX", Const, 0},
    -		{"DNS_TYPE_LOC", Const, 0},
    -		{"DNS_TYPE_MAILA", Const, 0},
    -		{"DNS_TYPE_MAILB", Const, 0},
    -		{"DNS_TYPE_MB", Const, 0},
    -		{"DNS_TYPE_MD", Const, 0},
    -		{"DNS_TYPE_MF", Const, 0},
    -		{"DNS_TYPE_MG", Const, 0},
    -		{"DNS_TYPE_MINFO", Const, 0},
    -		{"DNS_TYPE_MR", Const, 0},
    -		{"DNS_TYPE_MX", Const, 0},
    -		{"DNS_TYPE_NAPTR", Const, 0},
    -		{"DNS_TYPE_NBSTAT", Const, 0},
    -		{"DNS_TYPE_NIMLOC", Const, 0},
    -		{"DNS_TYPE_NS", Const, 0},
    -		{"DNS_TYPE_NSAP", Const, 0},
    -		{"DNS_TYPE_NSAPPTR", Const, 0},
    -		{"DNS_TYPE_NSEC", Const, 0},
    -		{"DNS_TYPE_NULL", Const, 0},
    -		{"DNS_TYPE_NXT", Const, 0},
    -		{"DNS_TYPE_OPT", Const, 0},
    -		{"DNS_TYPE_PTR", Const, 0},
    -		{"DNS_TYPE_PX", Const, 0},
    -		{"DNS_TYPE_RP", Const, 0},
    -		{"DNS_TYPE_RRSIG", Const, 0},
    -		{"DNS_TYPE_RT", Const, 0},
    -		{"DNS_TYPE_SIG", Const, 0},
    -		{"DNS_TYPE_SINK", Const, 0},
    -		{"DNS_TYPE_SOA", Const, 0},
    -		{"DNS_TYPE_SRV", Const, 0},
    -		{"DNS_TYPE_TEXT", Const, 0},
    -		{"DNS_TYPE_TKEY", Const, 0},
    -		{"DNS_TYPE_TSIG", Const, 0},
    -		{"DNS_TYPE_UID", Const, 0},
    -		{"DNS_TYPE_UINFO", Const, 0},
    -		{"DNS_TYPE_UNSPEC", Const, 0},
    -		{"DNS_TYPE_WINS", Const, 0},
    -		{"DNS_TYPE_WINSR", Const, 0},
    -		{"DNS_TYPE_WKS", Const, 0},
    -		{"DNS_TYPE_X25", Const, 0},
    -		{"DT_BLK", Const, 0},
    -		{"DT_CHR", Const, 0},
    -		{"DT_DIR", Const, 0},
    -		{"DT_FIFO", Const, 0},
    -		{"DT_LNK", Const, 0},
    -		{"DT_REG", Const, 0},
    -		{"DT_SOCK", Const, 0},
    -		{"DT_UNKNOWN", Const, 0},
    -		{"DT_WHT", Const, 0},
    -		{"DUPLICATE_CLOSE_SOURCE", Const, 0},
    -		{"DUPLICATE_SAME_ACCESS", Const, 0},
    -		{"DeleteFile", Func, 0},
    -		{"DetachLsf", Func, 0},
    -		{"DeviceIoControl", Func, 4},
    -		{"Dirent", Type, 0},
    -		{"Dirent.Fileno", Field, 0},
    -		{"Dirent.Ino", Field, 0},
    -		{"Dirent.Name", Field, 0},
    -		{"Dirent.Namlen", Field, 0},
    -		{"Dirent.Off", Field, 0},
    -		{"Dirent.Pad0", Field, 12},
    -		{"Dirent.Pad1", Field, 12},
    -		{"Dirent.Pad_cgo_0", Field, 0},
    -		{"Dirent.Reclen", Field, 0},
    -		{"Dirent.Seekoff", Field, 0},
    -		{"Dirent.Type", Field, 0},
    -		{"Dirent.X__d_padding", Field, 3},
    -		{"DnsNameCompare", Func, 4},
    -		{"DnsQuery", Func, 0},
    -		{"DnsRecordListFree", Func, 0},
    -		{"DnsSectionAdditional", Const, 4},
    -		{"DnsSectionAnswer", Const, 4},
    -		{"DnsSectionAuthority", Const, 4},
    -		{"DnsSectionQuestion", Const, 4},
    -		{"Dup", Func, 0},
    -		{"Dup2", Func, 0},
    -		{"Dup3", Func, 2},
    -		{"DuplicateHandle", Func, 0},
    -		{"E2BIG", Const, 0},
    -		{"EACCES", Const, 0},
    -		{"EADDRINUSE", Const, 0},
    -		{"EADDRNOTAVAIL", Const, 0},
    -		{"EADV", Const, 0},
    -		{"EAFNOSUPPORT", Const, 0},
    -		{"EAGAIN", Const, 0},
    -		{"EALREADY", Const, 0},
    -		{"EAUTH", Const, 0},
    -		{"EBADARCH", Const, 0},
    -		{"EBADE", Const, 0},
    -		{"EBADEXEC", Const, 0},
    -		{"EBADF", Const, 0},
    -		{"EBADFD", Const, 0},
    -		{"EBADMACHO", Const, 0},
    -		{"EBADMSG", Const, 0},
    -		{"EBADR", Const, 0},
    -		{"EBADRPC", Const, 0},
    -		{"EBADRQC", Const, 0},
    -		{"EBADSLT", Const, 0},
    -		{"EBFONT", Const, 0},
    -		{"EBUSY", Const, 0},
    -		{"ECANCELED", Const, 0},
    -		{"ECAPMODE", Const, 1},
    -		{"ECHILD", Const, 0},
    -		{"ECHO", Const, 0},
    -		{"ECHOCTL", Const, 0},
    -		{"ECHOE", Const, 0},
    -		{"ECHOK", Const, 0},
    -		{"ECHOKE", Const, 0},
    -		{"ECHONL", Const, 0},
    -		{"ECHOPRT", Const, 0},
    -		{"ECHRNG", Const, 0},
    -		{"ECOMM", Const, 0},
    -		{"ECONNABORTED", Const, 0},
    -		{"ECONNREFUSED", Const, 0},
    -		{"ECONNRESET", Const, 0},
    -		{"EDEADLK", Const, 0},
    -		{"EDEADLOCK", Const, 0},
    -		{"EDESTADDRREQ", Const, 0},
    -		{"EDEVERR", Const, 0},
    -		{"EDOM", Const, 0},
    -		{"EDOOFUS", Const, 0},
    -		{"EDOTDOT", Const, 0},
    -		{"EDQUOT", Const, 0},
    -		{"EEXIST", Const, 0},
    -		{"EFAULT", Const, 0},
    -		{"EFBIG", Const, 0},
    -		{"EFER_LMA", Const, 1},
    -		{"EFER_LME", Const, 1},
    -		{"EFER_NXE", Const, 1},
    -		{"EFER_SCE", Const, 1},
    -		{"EFTYPE", Const, 0},
    -		{"EHOSTDOWN", Const, 0},
    -		{"EHOSTUNREACH", Const, 0},
    -		{"EHWPOISON", Const, 0},
    -		{"EIDRM", Const, 0},
    -		{"EILSEQ", Const, 0},
    -		{"EINPROGRESS", Const, 0},
    -		{"EINTR", Const, 0},
    -		{"EINVAL", Const, 0},
    -		{"EIO", Const, 0},
    -		{"EIPSEC", Const, 1},
    -		{"EISCONN", Const, 0},
    -		{"EISDIR", Const, 0},
    -		{"EISNAM", Const, 0},
    -		{"EKEYEXPIRED", Const, 0},
    -		{"EKEYREJECTED", Const, 0},
    -		{"EKEYREVOKED", Const, 0},
    -		{"EL2HLT", Const, 0},
    -		{"EL2NSYNC", Const, 0},
    -		{"EL3HLT", Const, 0},
    -		{"EL3RST", Const, 0},
    -		{"ELAST", Const, 0},
    -		{"ELF_NGREG", Const, 0},
    -		{"ELF_PRARGSZ", Const, 0},
    -		{"ELIBACC", Const, 0},
    -		{"ELIBBAD", Const, 0},
    -		{"ELIBEXEC", Const, 0},
    -		{"ELIBMAX", Const, 0},
    -		{"ELIBSCN", Const, 0},
    -		{"ELNRNG", Const, 0},
    -		{"ELOOP", Const, 0},
    -		{"EMEDIUMTYPE", Const, 0},
    -		{"EMFILE", Const, 0},
    -		{"EMLINK", Const, 0},
    -		{"EMSGSIZE", Const, 0},
    -		{"EMT_TAGOVF", Const, 1},
    -		{"EMULTIHOP", Const, 0},
    -		{"EMUL_ENABLED", Const, 1},
    -		{"EMUL_LINUX", Const, 1},
    -		{"EMUL_LINUX32", Const, 1},
    -		{"EMUL_MAXID", Const, 1},
    -		{"EMUL_NATIVE", Const, 1},
    -		{"ENAMETOOLONG", Const, 0},
    -		{"ENAVAIL", Const, 0},
    -		{"ENDRUNDISC", Const, 1},
    -		{"ENEEDAUTH", Const, 0},
    -		{"ENETDOWN", Const, 0},
    -		{"ENETRESET", Const, 0},
    -		{"ENETUNREACH", Const, 0},
    -		{"ENFILE", Const, 0},
    -		{"ENOANO", Const, 0},
    -		{"ENOATTR", Const, 0},
    -		{"ENOBUFS", Const, 0},
    -		{"ENOCSI", Const, 0},
    -		{"ENODATA", Const, 0},
    -		{"ENODEV", Const, 0},
    -		{"ENOENT", Const, 0},
    -		{"ENOEXEC", Const, 0},
    -		{"ENOKEY", Const, 0},
    -		{"ENOLCK", Const, 0},
    -		{"ENOLINK", Const, 0},
    -		{"ENOMEDIUM", Const, 0},
    -		{"ENOMEM", Const, 0},
    -		{"ENOMSG", Const, 0},
    -		{"ENONET", Const, 0},
    -		{"ENOPKG", Const, 0},
    -		{"ENOPOLICY", Const, 0},
    -		{"ENOPROTOOPT", Const, 0},
    -		{"ENOSPC", Const, 0},
    -		{"ENOSR", Const, 0},
    -		{"ENOSTR", Const, 0},
    -		{"ENOSYS", Const, 0},
    -		{"ENOTBLK", Const, 0},
    -		{"ENOTCAPABLE", Const, 0},
    -		{"ENOTCONN", Const, 0},
    -		{"ENOTDIR", Const, 0},
    -		{"ENOTEMPTY", Const, 0},
    -		{"ENOTNAM", Const, 0},
    -		{"ENOTRECOVERABLE", Const, 0},
    -		{"ENOTSOCK", Const, 0},
    -		{"ENOTSUP", Const, 0},
    -		{"ENOTTY", Const, 0},
    -		{"ENOTUNIQ", Const, 0},
    -		{"ENXIO", Const, 0},
    -		{"EN_SW_CTL_INF", Const, 1},
    -		{"EN_SW_CTL_PREC", Const, 1},
    -		{"EN_SW_CTL_ROUND", Const, 1},
    -		{"EN_SW_DATACHAIN", Const, 1},
    -		{"EN_SW_DENORM", Const, 1},
    -		{"EN_SW_INVOP", Const, 1},
    -		{"EN_SW_OVERFLOW", Const, 1},
    -		{"EN_SW_PRECLOSS", Const, 1},
    -		{"EN_SW_UNDERFLOW", Const, 1},
    -		{"EN_SW_ZERODIV", Const, 1},
    -		{"EOPNOTSUPP", Const, 0},
    -		{"EOVERFLOW", Const, 0},
    -		{"EOWNERDEAD", Const, 0},
    -		{"EPERM", Const, 0},
    -		{"EPFNOSUPPORT", Const, 0},
    -		{"EPIPE", Const, 0},
    -		{"EPOLLERR", Const, 0},
    -		{"EPOLLET", Const, 0},
    -		{"EPOLLHUP", Const, 0},
    -		{"EPOLLIN", Const, 0},
    -		{"EPOLLMSG", Const, 0},
    -		{"EPOLLONESHOT", Const, 0},
    -		{"EPOLLOUT", Const, 0},
    -		{"EPOLLPRI", Const, 0},
    -		{"EPOLLRDBAND", Const, 0},
    -		{"EPOLLRDHUP", Const, 0},
    -		{"EPOLLRDNORM", Const, 0},
    -		{"EPOLLWRBAND", Const, 0},
    -		{"EPOLLWRNORM", Const, 0},
    -		{"EPOLL_CLOEXEC", Const, 0},
    -		{"EPOLL_CTL_ADD", Const, 0},
    -		{"EPOLL_CTL_DEL", Const, 0},
    -		{"EPOLL_CTL_MOD", Const, 0},
    -		{"EPOLL_NONBLOCK", Const, 0},
    -		{"EPROCLIM", Const, 0},
    -		{"EPROCUNAVAIL", Const, 0},
    -		{"EPROGMISMATCH", Const, 0},
    -		{"EPROGUNAVAIL", Const, 0},
    -		{"EPROTO", Const, 0},
    -		{"EPROTONOSUPPORT", Const, 0},
    -		{"EPROTOTYPE", Const, 0},
    -		{"EPWROFF", Const, 0},
    -		{"EQFULL", Const, 16},
    -		{"ERANGE", Const, 0},
    -		{"EREMCHG", Const, 0},
    -		{"EREMOTE", Const, 0},
    -		{"EREMOTEIO", Const, 0},
    -		{"ERESTART", Const, 0},
    -		{"ERFKILL", Const, 0},
    -		{"EROFS", Const, 0},
    -		{"ERPCMISMATCH", Const, 0},
    -		{"ERROR_ACCESS_DENIED", Const, 0},
    -		{"ERROR_ALREADY_EXISTS", Const, 0},
    -		{"ERROR_BROKEN_PIPE", Const, 0},
    -		{"ERROR_BUFFER_OVERFLOW", Const, 0},
    -		{"ERROR_DIR_NOT_EMPTY", Const, 8},
    -		{"ERROR_ENVVAR_NOT_FOUND", Const, 0},
    -		{"ERROR_FILE_EXISTS", Const, 0},
    -		{"ERROR_FILE_NOT_FOUND", Const, 0},
    -		{"ERROR_HANDLE_EOF", Const, 2},
    -		{"ERROR_INSUFFICIENT_BUFFER", Const, 0},
    -		{"ERROR_IO_PENDING", Const, 0},
    -		{"ERROR_MOD_NOT_FOUND", Const, 0},
    -		{"ERROR_MORE_DATA", Const, 3},
    -		{"ERROR_NETNAME_DELETED", Const, 3},
    -		{"ERROR_NOT_FOUND", Const, 1},
    -		{"ERROR_NO_MORE_FILES", Const, 0},
    -		{"ERROR_OPERATION_ABORTED", Const, 0},
    -		{"ERROR_PATH_NOT_FOUND", Const, 0},
    -		{"ERROR_PRIVILEGE_NOT_HELD", Const, 4},
    -		{"ERROR_PROC_NOT_FOUND", Const, 0},
    -		{"ESHLIBVERS", Const, 0},
    -		{"ESHUTDOWN", Const, 0},
    -		{"ESOCKTNOSUPPORT", Const, 0},
    -		{"ESPIPE", Const, 0},
    -		{"ESRCH", Const, 0},
    -		{"ESRMNT", Const, 0},
    -		{"ESTALE", Const, 0},
    -		{"ESTRPIPE", Const, 0},
    -		{"ETHERCAP_JUMBO_MTU", Const, 1},
    -		{"ETHERCAP_VLAN_HWTAGGING", Const, 1},
    -		{"ETHERCAP_VLAN_MTU", Const, 1},
    -		{"ETHERMIN", Const, 1},
    -		{"ETHERMTU", Const, 1},
    -		{"ETHERMTU_JUMBO", Const, 1},
    -		{"ETHERTYPE_8023", Const, 1},
    -		{"ETHERTYPE_AARP", Const, 1},
    -		{"ETHERTYPE_ACCTON", Const, 1},
    -		{"ETHERTYPE_AEONIC", Const, 1},
    -		{"ETHERTYPE_ALPHA", Const, 1},
    -		{"ETHERTYPE_AMBER", Const, 1},
    -		{"ETHERTYPE_AMOEBA", Const, 1},
    -		{"ETHERTYPE_AOE", Const, 1},
    -		{"ETHERTYPE_APOLLO", Const, 1},
    -		{"ETHERTYPE_APOLLODOMAIN", Const, 1},
    -		{"ETHERTYPE_APPLETALK", Const, 1},
    -		{"ETHERTYPE_APPLITEK", Const, 1},
    -		{"ETHERTYPE_ARGONAUT", Const, 1},
    -		{"ETHERTYPE_ARP", Const, 1},
    -		{"ETHERTYPE_AT", Const, 1},
    -		{"ETHERTYPE_ATALK", Const, 1},
    -		{"ETHERTYPE_ATOMIC", Const, 1},
    -		{"ETHERTYPE_ATT", Const, 1},
    -		{"ETHERTYPE_ATTSTANFORD", Const, 1},
    -		{"ETHERTYPE_AUTOPHON", Const, 1},
    -		{"ETHERTYPE_AXIS", Const, 1},
    -		{"ETHERTYPE_BCLOOP", Const, 1},
    -		{"ETHERTYPE_BOFL", Const, 1},
    -		{"ETHERTYPE_CABLETRON", Const, 1},
    -		{"ETHERTYPE_CHAOS", Const, 1},
    -		{"ETHERTYPE_COMDESIGN", Const, 1},
    -		{"ETHERTYPE_COMPUGRAPHIC", Const, 1},
    -		{"ETHERTYPE_COUNTERPOINT", Const, 1},
    -		{"ETHERTYPE_CRONUS", Const, 1},
    -		{"ETHERTYPE_CRONUSVLN", Const, 1},
    -		{"ETHERTYPE_DCA", Const, 1},
    -		{"ETHERTYPE_DDE", Const, 1},
    -		{"ETHERTYPE_DEBNI", Const, 1},
    -		{"ETHERTYPE_DECAM", Const, 1},
    -		{"ETHERTYPE_DECCUST", Const, 1},
    -		{"ETHERTYPE_DECDIAG", Const, 1},
    -		{"ETHERTYPE_DECDNS", Const, 1},
    -		{"ETHERTYPE_DECDTS", Const, 1},
    -		{"ETHERTYPE_DECEXPER", Const, 1},
    -		{"ETHERTYPE_DECLAST", Const, 1},
    -		{"ETHERTYPE_DECLTM", Const, 1},
    -		{"ETHERTYPE_DECMUMPS", Const, 1},
    -		{"ETHERTYPE_DECNETBIOS", Const, 1},
    -		{"ETHERTYPE_DELTACON", Const, 1},
    -		{"ETHERTYPE_DIDDLE", Const, 1},
    -		{"ETHERTYPE_DLOG1", Const, 1},
    -		{"ETHERTYPE_DLOG2", Const, 1},
    -		{"ETHERTYPE_DN", Const, 1},
    -		{"ETHERTYPE_DOGFIGHT", Const, 1},
    -		{"ETHERTYPE_DSMD", Const, 1},
    -		{"ETHERTYPE_ECMA", Const, 1},
    -		{"ETHERTYPE_ENCRYPT", Const, 1},
    -		{"ETHERTYPE_ES", Const, 1},
    -		{"ETHERTYPE_EXCELAN", Const, 1},
    -		{"ETHERTYPE_EXPERDATA", Const, 1},
    -		{"ETHERTYPE_FLIP", Const, 1},
    -		{"ETHERTYPE_FLOWCONTROL", Const, 1},
    -		{"ETHERTYPE_FRARP", Const, 1},
    -		{"ETHERTYPE_GENDYN", Const, 1},
    -		{"ETHERTYPE_HAYES", Const, 1},
    -		{"ETHERTYPE_HIPPI_FP", Const, 1},
    -		{"ETHERTYPE_HITACHI", Const, 1},
    -		{"ETHERTYPE_HP", Const, 1},
    -		{"ETHERTYPE_IEEEPUP", Const, 1},
    -		{"ETHERTYPE_IEEEPUPAT", Const, 1},
    -		{"ETHERTYPE_IMLBL", Const, 1},
    -		{"ETHERTYPE_IMLBLDIAG", Const, 1},
    -		{"ETHERTYPE_IP", Const, 1},
    -		{"ETHERTYPE_IPAS", Const, 1},
    -		{"ETHERTYPE_IPV6", Const, 1},
    -		{"ETHERTYPE_IPX", Const, 1},
    -		{"ETHERTYPE_IPXNEW", Const, 1},
    -		{"ETHERTYPE_KALPANA", Const, 1},
    -		{"ETHERTYPE_LANBRIDGE", Const, 1},
    -		{"ETHERTYPE_LANPROBE", Const, 1},
    -		{"ETHERTYPE_LAT", Const, 1},
    -		{"ETHERTYPE_LBACK", Const, 1},
    -		{"ETHERTYPE_LITTLE", Const, 1},
    -		{"ETHERTYPE_LLDP", Const, 1},
    -		{"ETHERTYPE_LOGICRAFT", Const, 1},
    -		{"ETHERTYPE_LOOPBACK", Const, 1},
    -		{"ETHERTYPE_MATRA", Const, 1},
    -		{"ETHERTYPE_MAX", Const, 1},
    -		{"ETHERTYPE_MERIT", Const, 1},
    -		{"ETHERTYPE_MICP", Const, 1},
    -		{"ETHERTYPE_MOPDL", Const, 1},
    -		{"ETHERTYPE_MOPRC", Const, 1},
    -		{"ETHERTYPE_MOTOROLA", Const, 1},
    -		{"ETHERTYPE_MPLS", Const, 1},
    -		{"ETHERTYPE_MPLS_MCAST", Const, 1},
    -		{"ETHERTYPE_MUMPS", Const, 1},
    -		{"ETHERTYPE_NBPCC", Const, 1},
    -		{"ETHERTYPE_NBPCLAIM", Const, 1},
    -		{"ETHERTYPE_NBPCLREQ", Const, 1},
    -		{"ETHERTYPE_NBPCLRSP", Const, 1},
    -		{"ETHERTYPE_NBPCREQ", Const, 1},
    -		{"ETHERTYPE_NBPCRSP", Const, 1},
    -		{"ETHERTYPE_NBPDG", Const, 1},
    -		{"ETHERTYPE_NBPDGB", Const, 1},
    -		{"ETHERTYPE_NBPDLTE", Const, 1},
    -		{"ETHERTYPE_NBPRAR", Const, 1},
    -		{"ETHERTYPE_NBPRAS", Const, 1},
    -		{"ETHERTYPE_NBPRST", Const, 1},
    -		{"ETHERTYPE_NBPSCD", Const, 1},
    -		{"ETHERTYPE_NBPVCD", Const, 1},
    -		{"ETHERTYPE_NBS", Const, 1},
    -		{"ETHERTYPE_NCD", Const, 1},
    -		{"ETHERTYPE_NESTAR", Const, 1},
    -		{"ETHERTYPE_NETBEUI", Const, 1},
    -		{"ETHERTYPE_NOVELL", Const, 1},
    -		{"ETHERTYPE_NS", Const, 1},
    -		{"ETHERTYPE_NSAT", Const, 1},
    -		{"ETHERTYPE_NSCOMPAT", Const, 1},
    -		{"ETHERTYPE_NTRAILER", Const, 1},
    -		{"ETHERTYPE_OS9", Const, 1},
    -		{"ETHERTYPE_OS9NET", Const, 1},
    -		{"ETHERTYPE_PACER", Const, 1},
    -		{"ETHERTYPE_PAE", Const, 1},
    -		{"ETHERTYPE_PCS", Const, 1},
    -		{"ETHERTYPE_PLANNING", Const, 1},
    -		{"ETHERTYPE_PPP", Const, 1},
    -		{"ETHERTYPE_PPPOE", Const, 1},
    -		{"ETHERTYPE_PPPOEDISC", Const, 1},
    -		{"ETHERTYPE_PRIMENTS", Const, 1},
    -		{"ETHERTYPE_PUP", Const, 1},
    -		{"ETHERTYPE_PUPAT", Const, 1},
    -		{"ETHERTYPE_QINQ", Const, 1},
    -		{"ETHERTYPE_RACAL", Const, 1},
    -		{"ETHERTYPE_RATIONAL", Const, 1},
    -		{"ETHERTYPE_RAWFR", Const, 1},
    -		{"ETHERTYPE_RCL", Const, 1},
    -		{"ETHERTYPE_RDP", Const, 1},
    -		{"ETHERTYPE_RETIX", Const, 1},
    -		{"ETHERTYPE_REVARP", Const, 1},
    -		{"ETHERTYPE_SCA", Const, 1},
    -		{"ETHERTYPE_SECTRA", Const, 1},
    -		{"ETHERTYPE_SECUREDATA", Const, 1},
    -		{"ETHERTYPE_SGITW", Const, 1},
    -		{"ETHERTYPE_SG_BOUNCE", Const, 1},
    -		{"ETHERTYPE_SG_DIAG", Const, 1},
    -		{"ETHERTYPE_SG_NETGAMES", Const, 1},
    -		{"ETHERTYPE_SG_RESV", Const, 1},
    -		{"ETHERTYPE_SIMNET", Const, 1},
    -		{"ETHERTYPE_SLOW", Const, 1},
    -		{"ETHERTYPE_SLOWPROTOCOLS", Const, 1},
    -		{"ETHERTYPE_SNA", Const, 1},
    -		{"ETHERTYPE_SNMP", Const, 1},
    -		{"ETHERTYPE_SONIX", Const, 1},
    -		{"ETHERTYPE_SPIDER", Const, 1},
    -		{"ETHERTYPE_SPRITE", Const, 1},
    -		{"ETHERTYPE_STP", Const, 1},
    -		{"ETHERTYPE_TALARIS", Const, 1},
    -		{"ETHERTYPE_TALARISMC", Const, 1},
    -		{"ETHERTYPE_TCPCOMP", Const, 1},
    -		{"ETHERTYPE_TCPSM", Const, 1},
    -		{"ETHERTYPE_TEC", Const, 1},
    -		{"ETHERTYPE_TIGAN", Const, 1},
    -		{"ETHERTYPE_TRAIL", Const, 1},
    -		{"ETHERTYPE_TRANSETHER", Const, 1},
    -		{"ETHERTYPE_TYMSHARE", Const, 1},
    -		{"ETHERTYPE_UBBST", Const, 1},
    -		{"ETHERTYPE_UBDEBUG", Const, 1},
    -		{"ETHERTYPE_UBDIAGLOOP", Const, 1},
    -		{"ETHERTYPE_UBDL", Const, 1},
    -		{"ETHERTYPE_UBNIU", Const, 1},
    -		{"ETHERTYPE_UBNMC", Const, 1},
    -		{"ETHERTYPE_VALID", Const, 1},
    -		{"ETHERTYPE_VARIAN", Const, 1},
    -		{"ETHERTYPE_VAXELN", Const, 1},
    -		{"ETHERTYPE_VEECO", Const, 1},
    -		{"ETHERTYPE_VEXP", Const, 1},
    -		{"ETHERTYPE_VGLAB", Const, 1},
    -		{"ETHERTYPE_VINES", Const, 1},
    -		{"ETHERTYPE_VINESECHO", Const, 1},
    -		{"ETHERTYPE_VINESLOOP", Const, 1},
    -		{"ETHERTYPE_VITAL", Const, 1},
    -		{"ETHERTYPE_VLAN", Const, 1},
    -		{"ETHERTYPE_VLTLMAN", Const, 1},
    -		{"ETHERTYPE_VPROD", Const, 1},
    -		{"ETHERTYPE_VURESERVED", Const, 1},
    -		{"ETHERTYPE_WATERLOO", Const, 1},
    -		{"ETHERTYPE_WELLFLEET", Const, 1},
    -		{"ETHERTYPE_X25", Const, 1},
    -		{"ETHERTYPE_X75", Const, 1},
    -		{"ETHERTYPE_XNSSM", Const, 1},
    -		{"ETHERTYPE_XTP", Const, 1},
    -		{"ETHER_ADDR_LEN", Const, 1},
    -		{"ETHER_ALIGN", Const, 1},
    -		{"ETHER_CRC_LEN", Const, 1},
    -		{"ETHER_CRC_POLY_BE", Const, 1},
    -		{"ETHER_CRC_POLY_LE", Const, 1},
    -		{"ETHER_HDR_LEN", Const, 1},
    -		{"ETHER_MAX_DIX_LEN", Const, 1},
    -		{"ETHER_MAX_LEN", Const, 1},
    -		{"ETHER_MAX_LEN_JUMBO", Const, 1},
    -		{"ETHER_MIN_LEN", Const, 1},
    -		{"ETHER_PPPOE_ENCAP_LEN", Const, 1},
    -		{"ETHER_TYPE_LEN", Const, 1},
    -		{"ETHER_VLAN_ENCAP_LEN", Const, 1},
    -		{"ETH_P_1588", Const, 0},
    -		{"ETH_P_8021Q", Const, 0},
    -		{"ETH_P_802_2", Const, 0},
    -		{"ETH_P_802_3", Const, 0},
    -		{"ETH_P_AARP", Const, 0},
    -		{"ETH_P_ALL", Const, 0},
    -		{"ETH_P_AOE", Const, 0},
    -		{"ETH_P_ARCNET", Const, 0},
    -		{"ETH_P_ARP", Const, 0},
    -		{"ETH_P_ATALK", Const, 0},
    -		{"ETH_P_ATMFATE", Const, 0},
    -		{"ETH_P_ATMMPOA", Const, 0},
    -		{"ETH_P_AX25", Const, 0},
    -		{"ETH_P_BPQ", Const, 0},
    -		{"ETH_P_CAIF", Const, 0},
    -		{"ETH_P_CAN", Const, 0},
    -		{"ETH_P_CONTROL", Const, 0},
    -		{"ETH_P_CUST", Const, 0},
    -		{"ETH_P_DDCMP", Const, 0},
    -		{"ETH_P_DEC", Const, 0},
    -		{"ETH_P_DIAG", Const, 0},
    -		{"ETH_P_DNA_DL", Const, 0},
    -		{"ETH_P_DNA_RC", Const, 0},
    -		{"ETH_P_DNA_RT", Const, 0},
    -		{"ETH_P_DSA", Const, 0},
    -		{"ETH_P_ECONET", Const, 0},
    -		{"ETH_P_EDSA", Const, 0},
    -		{"ETH_P_FCOE", Const, 0},
    -		{"ETH_P_FIP", Const, 0},
    -		{"ETH_P_HDLC", Const, 0},
    -		{"ETH_P_IEEE802154", Const, 0},
    -		{"ETH_P_IEEEPUP", Const, 0},
    -		{"ETH_P_IEEEPUPAT", Const, 0},
    -		{"ETH_P_IP", Const, 0},
    -		{"ETH_P_IPV6", Const, 0},
    -		{"ETH_P_IPX", Const, 0},
    -		{"ETH_P_IRDA", Const, 0},
    -		{"ETH_P_LAT", Const, 0},
    -		{"ETH_P_LINK_CTL", Const, 0},
    -		{"ETH_P_LOCALTALK", Const, 0},
    -		{"ETH_P_LOOP", Const, 0},
    -		{"ETH_P_MOBITEX", Const, 0},
    -		{"ETH_P_MPLS_MC", Const, 0},
    -		{"ETH_P_MPLS_UC", Const, 0},
    -		{"ETH_P_PAE", Const, 0},
    -		{"ETH_P_PAUSE", Const, 0},
    -		{"ETH_P_PHONET", Const, 0},
    -		{"ETH_P_PPPTALK", Const, 0},
    -		{"ETH_P_PPP_DISC", Const, 0},
    -		{"ETH_P_PPP_MP", Const, 0},
    -		{"ETH_P_PPP_SES", Const, 0},
    -		{"ETH_P_PUP", Const, 0},
    -		{"ETH_P_PUPAT", Const, 0},
    -		{"ETH_P_RARP", Const, 0},
    -		{"ETH_P_SCA", Const, 0},
    -		{"ETH_P_SLOW", Const, 0},
    -		{"ETH_P_SNAP", Const, 0},
    -		{"ETH_P_TEB", Const, 0},
    -		{"ETH_P_TIPC", Const, 0},
    -		{"ETH_P_TRAILER", Const, 0},
    -		{"ETH_P_TR_802_2", Const, 0},
    -		{"ETH_P_WAN_PPP", Const, 0},
    -		{"ETH_P_WCCP", Const, 0},
    -		{"ETH_P_X25", Const, 0},
    -		{"ETIME", Const, 0},
    -		{"ETIMEDOUT", Const, 0},
    -		{"ETOOMANYREFS", Const, 0},
    -		{"ETXTBSY", Const, 0},
    -		{"EUCLEAN", Const, 0},
    -		{"EUNATCH", Const, 0},
    -		{"EUSERS", Const, 0},
    -		{"EVFILT_AIO", Const, 0},
    -		{"EVFILT_FS", Const, 0},
    -		{"EVFILT_LIO", Const, 0},
    -		{"EVFILT_MACHPORT", Const, 0},
    -		{"EVFILT_PROC", Const, 0},
    -		{"EVFILT_READ", Const, 0},
    -		{"EVFILT_SIGNAL", Const, 0},
    -		{"EVFILT_SYSCOUNT", Const, 0},
    -		{"EVFILT_THREADMARKER", Const, 0},
    -		{"EVFILT_TIMER", Const, 0},
    -		{"EVFILT_USER", Const, 0},
    -		{"EVFILT_VM", Const, 0},
    -		{"EVFILT_VNODE", Const, 0},
    -		{"EVFILT_WRITE", Const, 0},
    -		{"EV_ADD", Const, 0},
    -		{"EV_CLEAR", Const, 0},
    -		{"EV_DELETE", Const, 0},
    -		{"EV_DISABLE", Const, 0},
    -		{"EV_DISPATCH", Const, 0},
    -		{"EV_DROP", Const, 3},
    -		{"EV_ENABLE", Const, 0},
    -		{"EV_EOF", Const, 0},
    -		{"EV_ERROR", Const, 0},
    -		{"EV_FLAG0", Const, 0},
    -		{"EV_FLAG1", Const, 0},
    -		{"EV_ONESHOT", Const, 0},
    -		{"EV_OOBAND", Const, 0},
    -		{"EV_POLL", Const, 0},
    -		{"EV_RECEIPT", Const, 0},
    -		{"EV_SYSFLAGS", Const, 0},
    -		{"EWINDOWS", Const, 0},
    -		{"EWOULDBLOCK", Const, 0},
    -		{"EXDEV", Const, 0},
    -		{"EXFULL", Const, 0},
    -		{"EXTA", Const, 0},
    -		{"EXTB", Const, 0},
    -		{"EXTPROC", Const, 0},
    -		{"Environ", Func, 0},
    -		{"EpollCreate", Func, 0},
    -		{"EpollCreate1", Func, 0},
    -		{"EpollCtl", Func, 0},
    -		{"EpollEvent", Type, 0},
    -		{"EpollEvent.Events", Field, 0},
    -		{"EpollEvent.Fd", Field, 0},
    -		{"EpollEvent.Pad", Field, 0},
    -		{"EpollEvent.PadFd", Field, 0},
    -		{"EpollWait", Func, 0},
    -		{"Errno", Type, 0},
    -		{"EscapeArg", Func, 0},
    -		{"Exchangedata", Func, 0},
    -		{"Exec", Func, 0},
    -		{"Exit", Func, 0},
    -		{"ExitProcess", Func, 0},
    -		{"FD_CLOEXEC", Const, 0},
    -		{"FD_SETSIZE", Const, 0},
    -		{"FILE_ACTION_ADDED", Const, 0},
    -		{"FILE_ACTION_MODIFIED", Const, 0},
    -		{"FILE_ACTION_REMOVED", Const, 0},
    -		{"FILE_ACTION_RENAMED_NEW_NAME", Const, 0},
    -		{"FILE_ACTION_RENAMED_OLD_NAME", Const, 0},
    -		{"FILE_APPEND_DATA", Const, 0},
    -		{"FILE_ATTRIBUTE_ARCHIVE", Const, 0},
    -		{"FILE_ATTRIBUTE_DIRECTORY", Const, 0},
    -		{"FILE_ATTRIBUTE_HIDDEN", Const, 0},
    -		{"FILE_ATTRIBUTE_NORMAL", Const, 0},
    -		{"FILE_ATTRIBUTE_READONLY", Const, 0},
    -		{"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4},
    -		{"FILE_ATTRIBUTE_SYSTEM", Const, 0},
    -		{"FILE_BEGIN", Const, 0},
    -		{"FILE_CURRENT", Const, 0},
    -		{"FILE_END", Const, 0},
    -		{"FILE_FLAG_BACKUP_SEMANTICS", Const, 0},
    -		{"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4},
    -		{"FILE_FLAG_OVERLAPPED", Const, 0},
    -		{"FILE_LIST_DIRECTORY", Const, 0},
    -		{"FILE_MAP_COPY", Const, 0},
    -		{"FILE_MAP_EXECUTE", Const, 0},
    -		{"FILE_MAP_READ", Const, 0},
    -		{"FILE_MAP_WRITE", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_CREATION", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0},
    -		{"FILE_NOTIFY_CHANGE_SIZE", Const, 0},
    -		{"FILE_SHARE_DELETE", Const, 0},
    -		{"FILE_SHARE_READ", Const, 0},
    -		{"FILE_SHARE_WRITE", Const, 0},
    -		{"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2},
    -		{"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2},
    -		{"FILE_TYPE_CHAR", Const, 0},
    -		{"FILE_TYPE_DISK", Const, 0},
    -		{"FILE_TYPE_PIPE", Const, 0},
    -		{"FILE_TYPE_REMOTE", Const, 0},
    -		{"FILE_TYPE_UNKNOWN", Const, 0},
    -		{"FILE_WRITE_ATTRIBUTES", Const, 0},
    -		{"FLUSHO", Const, 0},
    -		{"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0},
    -		{"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_HMODULE", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_STRING", Const, 0},
    -		{"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0},
    -		{"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0},
    -		{"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0},
    -		{"FSCTL_GET_REPARSE_POINT", Const, 4},
    -		{"F_ADDFILESIGS", Const, 0},
    -		{"F_ADDSIGS", Const, 0},
    -		{"F_ALLOCATEALL", Const, 0},
    -		{"F_ALLOCATECONTIG", Const, 0},
    -		{"F_CANCEL", Const, 0},
    -		{"F_CHKCLEAN", Const, 0},
    -		{"F_CLOSEM", Const, 1},
    -		{"F_DUP2FD", Const, 0},
    -		{"F_DUP2FD_CLOEXEC", Const, 1},
    -		{"F_DUPFD", Const, 0},
    -		{"F_DUPFD_CLOEXEC", Const, 0},
    -		{"F_EXLCK", Const, 0},
    -		{"F_FINDSIGS", Const, 16},
    -		{"F_FLUSH_DATA", Const, 0},
    -		{"F_FREEZE_FS", Const, 0},
    -		{"F_FSCTL", Const, 1},
    -		{"F_FSDIRMASK", Const, 1},
    -		{"F_FSIN", Const, 1},
    -		{"F_FSINOUT", Const, 1},
    -		{"F_FSOUT", Const, 1},
    -		{"F_FSPRIV", Const, 1},
    -		{"F_FSVOID", Const, 1},
    -		{"F_FULLFSYNC", Const, 0},
    -		{"F_GETCODEDIR", Const, 16},
    -		{"F_GETFD", Const, 0},
    -		{"F_GETFL", Const, 0},
    -		{"F_GETLEASE", Const, 0},
    -		{"F_GETLK", Const, 0},
    -		{"F_GETLK64", Const, 0},
    -		{"F_GETLKPID", Const, 0},
    -		{"F_GETNOSIGPIPE", Const, 0},
    -		{"F_GETOWN", Const, 0},
    -		{"F_GETOWN_EX", Const, 0},
    -		{"F_GETPATH", Const, 0},
    -		{"F_GETPATH_MTMINFO", Const, 0},
    -		{"F_GETPIPE_SZ", Const, 0},
    -		{"F_GETPROTECTIONCLASS", Const, 0},
    -		{"F_GETPROTECTIONLEVEL", Const, 16},
    -		{"F_GETSIG", Const, 0},
    -		{"F_GLOBAL_NOCACHE", Const, 0},
    -		{"F_LOCK", Const, 0},
    -		{"F_LOG2PHYS", Const, 0},
    -		{"F_LOG2PHYS_EXT", Const, 0},
    -		{"F_MARKDEPENDENCY", Const, 0},
    -		{"F_MAXFD", Const, 1},
    -		{"F_NOCACHE", Const, 0},
    -		{"F_NODIRECT", Const, 0},
    -		{"F_NOTIFY", Const, 0},
    -		{"F_OGETLK", Const, 0},
    -		{"F_OK", Const, 0},
    -		{"F_OSETLK", Const, 0},
    -		{"F_OSETLKW", Const, 0},
    -		{"F_PARAM_MASK", Const, 1},
    -		{"F_PARAM_MAX", Const, 1},
    -		{"F_PATHPKG_CHECK", Const, 0},
    -		{"F_PEOFPOSMODE", Const, 0},
    -		{"F_PREALLOCATE", Const, 0},
    -		{"F_RDADVISE", Const, 0},
    -		{"F_RDAHEAD", Const, 0},
    -		{"F_RDLCK", Const, 0},
    -		{"F_READAHEAD", Const, 0},
    -		{"F_READBOOTSTRAP", Const, 0},
    -		{"F_SETBACKINGSTORE", Const, 0},
    -		{"F_SETFD", Const, 0},
    -		{"F_SETFL", Const, 0},
    -		{"F_SETLEASE", Const, 0},
    -		{"F_SETLK", Const, 0},
    -		{"F_SETLK64", Const, 0},
    -		{"F_SETLKW", Const, 0},
    -		{"F_SETLKW64", Const, 0},
    -		{"F_SETLKWTIMEOUT", Const, 16},
    -		{"F_SETLK_REMOTE", Const, 0},
    -		{"F_SETNOSIGPIPE", Const, 0},
    -		{"F_SETOWN", Const, 0},
    -		{"F_SETOWN_EX", Const, 0},
    -		{"F_SETPIPE_SZ", Const, 0},
    -		{"F_SETPROTECTIONCLASS", Const, 0},
    -		{"F_SETSIG", Const, 0},
    -		{"F_SETSIZE", Const, 0},
    -		{"F_SHLCK", Const, 0},
    -		{"F_SINGLE_WRITER", Const, 16},
    -		{"F_TEST", Const, 0},
    -		{"F_THAW_FS", Const, 0},
    -		{"F_TLOCK", Const, 0},
    -		{"F_TRANSCODEKEY", Const, 16},
    -		{"F_ULOCK", Const, 0},
    -		{"F_UNLCK", Const, 0},
    -		{"F_UNLCKSYS", Const, 0},
    -		{"F_VOLPOSMODE", Const, 0},
    -		{"F_WRITEBOOTSTRAP", Const, 0},
    -		{"F_WRLCK", Const, 0},
    -		{"Faccessat", Func, 0},
    -		{"Fallocate", Func, 0},
    -		{"Fbootstraptransfer_t", Type, 0},
    -		{"Fbootstraptransfer_t.Buffer", Field, 0},
    -		{"Fbootstraptransfer_t.Length", Field, 0},
    -		{"Fbootstraptransfer_t.Offset", Field, 0},
    -		{"Fchdir", Func, 0},
    -		{"Fchflags", Func, 0},
    -		{"Fchmod", Func, 0},
    -		{"Fchmodat", Func, 0},
    -		{"Fchown", Func, 0},
    -		{"Fchownat", Func, 0},
    -		{"FcntlFlock", Func, 3},
    -		{"FdSet", Type, 0},
    -		{"FdSet.Bits", Field, 0},
    -		{"FdSet.X__fds_bits", Field, 0},
    -		{"Fdatasync", Func, 0},
    -		{"FileNotifyInformation", Type, 0},
    -		{"FileNotifyInformation.Action", Field, 0},
    -		{"FileNotifyInformation.FileName", Field, 0},
    -		{"FileNotifyInformation.FileNameLength", Field, 0},
    -		{"FileNotifyInformation.NextEntryOffset", Field, 0},
    -		{"Filetime", Type, 0},
    -		{"Filetime.HighDateTime", Field, 0},
    -		{"Filetime.LowDateTime", Field, 0},
    -		{"FindClose", Func, 0},
    -		{"FindFirstFile", Func, 0},
    -		{"FindNextFile", Func, 0},
    -		{"Flock", Func, 0},
    -		{"Flock_t", Type, 0},
    -		{"Flock_t.Len", Field, 0},
    -		{"Flock_t.Pad_cgo_0", Field, 0},
    -		{"Flock_t.Pad_cgo_1", Field, 3},
    -		{"Flock_t.Pid", Field, 0},
    -		{"Flock_t.Start", Field, 0},
    -		{"Flock_t.Sysid", Field, 0},
    -		{"Flock_t.Type", Field, 0},
    -		{"Flock_t.Whence", Field, 0},
    -		{"FlushBpf", Func, 0},
    -		{"FlushFileBuffers", Func, 0},
    -		{"FlushViewOfFile", Func, 0},
    -		{"ForkExec", Func, 0},
    -		{"ForkLock", Var, 0},
    -		{"FormatMessage", Func, 0},
    -		{"Fpathconf", Func, 0},
    -		{"FreeAddrInfoW", Func, 1},
    -		{"FreeEnvironmentStrings", Func, 0},
    -		{"FreeLibrary", Func, 0},
    -		{"Fsid", Type, 0},
    -		{"Fsid.Val", Field, 0},
    -		{"Fsid.X__fsid_val", Field, 2},
    -		{"Fsid.X__val", Field, 0},
    -		{"Fstat", Func, 0},
    -		{"Fstatat", Func, 12},
    -		{"Fstatfs", Func, 0},
    -		{"Fstore_t", Type, 0},
    -		{"Fstore_t.Bytesalloc", Field, 0},
    -		{"Fstore_t.Flags", Field, 0},
    -		{"Fstore_t.Length", Field, 0},
    -		{"Fstore_t.Offset", Field, 0},
    -		{"Fstore_t.Posmode", Field, 0},
    -		{"Fsync", Func, 0},
    -		{"Ftruncate", Func, 0},
    -		{"FullPath", Func, 4},
    -		{"Futimes", Func, 0},
    -		{"Futimesat", Func, 0},
    -		{"GENERIC_ALL", Const, 0},
    -		{"GENERIC_EXECUTE", Const, 0},
    -		{"GENERIC_READ", Const, 0},
    -		{"GENERIC_WRITE", Const, 0},
    -		{"GUID", Type, 1},
    -		{"GUID.Data1", Field, 1},
    -		{"GUID.Data2", Field, 1},
    -		{"GUID.Data3", Field, 1},
    -		{"GUID.Data4", Field, 1},
    -		{"GetAcceptExSockaddrs", Func, 0},
    -		{"GetAdaptersInfo", Func, 0},
    -		{"GetAddrInfoW", Func, 1},
    -		{"GetCommandLine", Func, 0},
    -		{"GetComputerName", Func, 0},
    -		{"GetConsoleMode", Func, 1},
    -		{"GetCurrentDirectory", Func, 0},
    -		{"GetCurrentProcess", Func, 0},
    -		{"GetEnvironmentStrings", Func, 0},
    -		{"GetEnvironmentVariable", Func, 0},
    -		{"GetExitCodeProcess", Func, 0},
    -		{"GetFileAttributes", Func, 0},
    -		{"GetFileAttributesEx", Func, 0},
    -		{"GetFileExInfoStandard", Const, 0},
    -		{"GetFileExMaxInfoLevel", Const, 0},
    -		{"GetFileInformationByHandle", Func, 0},
    -		{"GetFileType", Func, 0},
    -		{"GetFullPathName", Func, 0},
    -		{"GetHostByName", Func, 0},
    -		{"GetIfEntry", Func, 0},
    -		{"GetLastError", Func, 0},
    -		{"GetLengthSid", Func, 0},
    -		{"GetLongPathName", Func, 0},
    -		{"GetProcAddress", Func, 0},
    -		{"GetProcessTimes", Func, 0},
    -		{"GetProtoByName", Func, 0},
    -		{"GetQueuedCompletionStatus", Func, 0},
    -		{"GetServByName", Func, 0},
    -		{"GetShortPathName", Func, 0},
    -		{"GetStartupInfo", Func, 0},
    -		{"GetStdHandle", Func, 0},
    -		{"GetSystemTimeAsFileTime", Func, 0},
    -		{"GetTempPath", Func, 0},
    -		{"GetTimeZoneInformation", Func, 0},
    -		{"GetTokenInformation", Func, 0},
    -		{"GetUserNameEx", Func, 0},
    -		{"GetUserProfileDirectory", Func, 0},
    -		{"GetVersion", Func, 0},
    -		{"Getcwd", Func, 0},
    -		{"Getdents", Func, 0},
    -		{"Getdirentries", Func, 0},
    -		{"Getdtablesize", Func, 0},
    -		{"Getegid", Func, 0},
    -		{"Getenv", Func, 0},
    -		{"Geteuid", Func, 0},
    -		{"Getfsstat", Func, 0},
    -		{"Getgid", Func, 0},
    -		{"Getgroups", Func, 0},
    -		{"Getpagesize", Func, 0},
    -		{"Getpeername", Func, 0},
    -		{"Getpgid", Func, 0},
    -		{"Getpgrp", Func, 0},
    -		{"Getpid", Func, 0},
    -		{"Getppid", Func, 0},
    -		{"Getpriority", Func, 0},
    -		{"Getrlimit", Func, 0},
    -		{"Getrusage", Func, 0},
    -		{"Getsid", Func, 0},
    -		{"Getsockname", Func, 0},
    -		{"Getsockopt", Func, 1},
    -		{"GetsockoptByte", Func, 0},
    -		{"GetsockoptICMPv6Filter", Func, 2},
    -		{"GetsockoptIPMreq", Func, 0},
    -		{"GetsockoptIPMreqn", Func, 0},
    -		{"GetsockoptIPv6MTUInfo", Func, 2},
    -		{"GetsockoptIPv6Mreq", Func, 0},
    -		{"GetsockoptInet4Addr", Func, 0},
    -		{"GetsockoptInt", Func, 0},
    -		{"GetsockoptUcred", Func, 1},
    -		{"Gettid", Func, 0},
    -		{"Gettimeofday", Func, 0},
    -		{"Getuid", Func, 0},
    -		{"Getwd", Func, 0},
    -		{"Getxattr", Func, 1},
    -		{"HANDLE_FLAG_INHERIT", Const, 0},
    -		{"HKEY_CLASSES_ROOT", Const, 0},
    -		{"HKEY_CURRENT_CONFIG", Const, 0},
    -		{"HKEY_CURRENT_USER", Const, 0},
    -		{"HKEY_DYN_DATA", Const, 0},
    -		{"HKEY_LOCAL_MACHINE", Const, 0},
    -		{"HKEY_PERFORMANCE_DATA", Const, 0},
    -		{"HKEY_USERS", Const, 0},
    -		{"HUPCL", Const, 0},
    -		{"Handle", Type, 0},
    -		{"Hostent", Type, 0},
    -		{"Hostent.AddrList", Field, 0},
    -		{"Hostent.AddrType", Field, 0},
    -		{"Hostent.Aliases", Field, 0},
    -		{"Hostent.Length", Field, 0},
    -		{"Hostent.Name", Field, 0},
    -		{"ICANON", Const, 0},
    -		{"ICMP6_FILTER", Const, 2},
    -		{"ICMPV6_FILTER", Const, 2},
    -		{"ICMPv6Filter", Type, 2},
    -		{"ICMPv6Filter.Data", Field, 2},
    -		{"ICMPv6Filter.Filt", Field, 2},
    -		{"ICRNL", Const, 0},
    -		{"IEXTEN", Const, 0},
    -		{"IFAN_ARRIVAL", Const, 1},
    -		{"IFAN_DEPARTURE", Const, 1},
    -		{"IFA_ADDRESS", Const, 0},
    -		{"IFA_ANYCAST", Const, 0},
    -		{"IFA_BROADCAST", Const, 0},
    -		{"IFA_CACHEINFO", Const, 0},
    -		{"IFA_F_DADFAILED", Const, 0},
    -		{"IFA_F_DEPRECATED", Const, 0},
    -		{"IFA_F_HOMEADDRESS", Const, 0},
    -		{"IFA_F_NODAD", Const, 0},
    -		{"IFA_F_OPTIMISTIC", Const, 0},
    -		{"IFA_F_PERMANENT", Const, 0},
    -		{"IFA_F_SECONDARY", Const, 0},
    -		{"IFA_F_TEMPORARY", Const, 0},
    -		{"IFA_F_TENTATIVE", Const, 0},
    -		{"IFA_LABEL", Const, 0},
    -		{"IFA_LOCAL", Const, 0},
    -		{"IFA_MAX", Const, 0},
    -		{"IFA_MULTICAST", Const, 0},
    -		{"IFA_ROUTE", Const, 1},
    -		{"IFA_UNSPEC", Const, 0},
    -		{"IFF_ALLMULTI", Const, 0},
    -		{"IFF_ALTPHYS", Const, 0},
    -		{"IFF_AUTOMEDIA", Const, 0},
    -		{"IFF_BROADCAST", Const, 0},
    -		{"IFF_CANTCHANGE", Const, 0},
    -		{"IFF_CANTCONFIG", Const, 1},
    -		{"IFF_DEBUG", Const, 0},
    -		{"IFF_DRV_OACTIVE", Const, 0},
    -		{"IFF_DRV_RUNNING", Const, 0},
    -		{"IFF_DYING", Const, 0},
    -		{"IFF_DYNAMIC", Const, 0},
    -		{"IFF_LINK0", Const, 0},
    -		{"IFF_LINK1", Const, 0},
    -		{"IFF_LINK2", Const, 0},
    -		{"IFF_LOOPBACK", Const, 0},
    -		{"IFF_MASTER", Const, 0},
    -		{"IFF_MONITOR", Const, 0},
    -		{"IFF_MULTICAST", Const, 0},
    -		{"IFF_NOARP", Const, 0},
    -		{"IFF_NOTRAILERS", Const, 0},
    -		{"IFF_NO_PI", Const, 0},
    -		{"IFF_OACTIVE", Const, 0},
    -		{"IFF_ONE_QUEUE", Const, 0},
    -		{"IFF_POINTOPOINT", Const, 0},
    -		{"IFF_POINTTOPOINT", Const, 0},
    -		{"IFF_PORTSEL", Const, 0},
    -		{"IFF_PPROMISC", Const, 0},
    -		{"IFF_PROMISC", Const, 0},
    -		{"IFF_RENAMING", Const, 0},
    -		{"IFF_RUNNING", Const, 0},
    -		{"IFF_SIMPLEX", Const, 0},
    -		{"IFF_SLAVE", Const, 0},
    -		{"IFF_SMART", Const, 0},
    -		{"IFF_STATICARP", Const, 0},
    -		{"IFF_TAP", Const, 0},
    -		{"IFF_TUN", Const, 0},
    -		{"IFF_TUN_EXCL", Const, 0},
    -		{"IFF_UP", Const, 0},
    -		{"IFF_VNET_HDR", Const, 0},
    -		{"IFLA_ADDRESS", Const, 0},
    -		{"IFLA_BROADCAST", Const, 0},
    -		{"IFLA_COST", Const, 0},
    -		{"IFLA_IFALIAS", Const, 0},
    -		{"IFLA_IFNAME", Const, 0},
    -		{"IFLA_LINK", Const, 0},
    -		{"IFLA_LINKINFO", Const, 0},
    -		{"IFLA_LINKMODE", Const, 0},
    -		{"IFLA_MAP", Const, 0},
    -		{"IFLA_MASTER", Const, 0},
    -		{"IFLA_MAX", Const, 0},
    -		{"IFLA_MTU", Const, 0},
    -		{"IFLA_NET_NS_PID", Const, 0},
    -		{"IFLA_OPERSTATE", Const, 0},
    -		{"IFLA_PRIORITY", Const, 0},
    -		{"IFLA_PROTINFO", Const, 0},
    -		{"IFLA_QDISC", Const, 0},
    -		{"IFLA_STATS", Const, 0},
    -		{"IFLA_TXQLEN", Const, 0},
    -		{"IFLA_UNSPEC", Const, 0},
    -		{"IFLA_WEIGHT", Const, 0},
    -		{"IFLA_WIRELESS", Const, 0},
    -		{"IFNAMSIZ", Const, 0},
    -		{"IFT_1822", Const, 0},
    -		{"IFT_A12MPPSWITCH", Const, 0},
    -		{"IFT_AAL2", Const, 0},
    -		{"IFT_AAL5", Const, 0},
    -		{"IFT_ADSL", Const, 0},
    -		{"IFT_AFLANE8023", Const, 0},
    -		{"IFT_AFLANE8025", Const, 0},
    -		{"IFT_ARAP", Const, 0},
    -		{"IFT_ARCNET", Const, 0},
    -		{"IFT_ARCNETPLUS", Const, 0},
    -		{"IFT_ASYNC", Const, 0},
    -		{"IFT_ATM", Const, 0},
    -		{"IFT_ATMDXI", Const, 0},
    -		{"IFT_ATMFUNI", Const, 0},
    -		{"IFT_ATMIMA", Const, 0},
    -		{"IFT_ATMLOGICAL", Const, 0},
    -		{"IFT_ATMRADIO", Const, 0},
    -		{"IFT_ATMSUBINTERFACE", Const, 0},
    -		{"IFT_ATMVCIENDPT", Const, 0},
    -		{"IFT_ATMVIRTUAL", Const, 0},
    -		{"IFT_BGPPOLICYACCOUNTING", Const, 0},
    -		{"IFT_BLUETOOTH", Const, 1},
    -		{"IFT_BRIDGE", Const, 0},
    -		{"IFT_BSC", Const, 0},
    -		{"IFT_CARP", Const, 0},
    -		{"IFT_CCTEMUL", Const, 0},
    -		{"IFT_CELLULAR", Const, 0},
    -		{"IFT_CEPT", Const, 0},
    -		{"IFT_CES", Const, 0},
    -		{"IFT_CHANNEL", Const, 0},
    -		{"IFT_CNR", Const, 0},
    -		{"IFT_COFFEE", Const, 0},
    -		{"IFT_COMPOSITELINK", Const, 0},
    -		{"IFT_DCN", Const, 0},
    -		{"IFT_DIGITALPOWERLINE", Const, 0},
    -		{"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0},
    -		{"IFT_DLSW", Const, 0},
    -		{"IFT_DOCSCABLEDOWNSTREAM", Const, 0},
    -		{"IFT_DOCSCABLEMACLAYER", Const, 0},
    -		{"IFT_DOCSCABLEUPSTREAM", Const, 0},
    -		{"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1},
    -		{"IFT_DS0", Const, 0},
    -		{"IFT_DS0BUNDLE", Const, 0},
    -		{"IFT_DS1FDL", Const, 0},
    -		{"IFT_DS3", Const, 0},
    -		{"IFT_DTM", Const, 0},
    -		{"IFT_DUMMY", Const, 1},
    -		{"IFT_DVBASILN", Const, 0},
    -		{"IFT_DVBASIOUT", Const, 0},
    -		{"IFT_DVBRCCDOWNSTREAM", Const, 0},
    -		{"IFT_DVBRCCMACLAYER", Const, 0},
    -		{"IFT_DVBRCCUPSTREAM", Const, 0},
    -		{"IFT_ECONET", Const, 1},
    -		{"IFT_ENC", Const, 0},
    -		{"IFT_EON", Const, 0},
    -		{"IFT_EPLRS", Const, 0},
    -		{"IFT_ESCON", Const, 0},
    -		{"IFT_ETHER", Const, 0},
    -		{"IFT_FAITH", Const, 0},
    -		{"IFT_FAST", Const, 0},
    -		{"IFT_FASTETHER", Const, 0},
    -		{"IFT_FASTETHERFX", Const, 0},
    -		{"IFT_FDDI", Const, 0},
    -		{"IFT_FIBRECHANNEL", Const, 0},
    -		{"IFT_FRAMERELAYINTERCONNECT", Const, 0},
    -		{"IFT_FRAMERELAYMPI", Const, 0},
    -		{"IFT_FRDLCIENDPT", Const, 0},
    -		{"IFT_FRELAY", Const, 0},
    -		{"IFT_FRELAYDCE", Const, 0},
    -		{"IFT_FRF16MFRBUNDLE", Const, 0},
    -		{"IFT_FRFORWARD", Const, 0},
    -		{"IFT_G703AT2MB", Const, 0},
    -		{"IFT_G703AT64K", Const, 0},
    -		{"IFT_GIF", Const, 0},
    -		{"IFT_GIGABITETHERNET", Const, 0},
    -		{"IFT_GR303IDT", Const, 0},
    -		{"IFT_GR303RDT", Const, 0},
    -		{"IFT_H323GATEKEEPER", Const, 0},
    -		{"IFT_H323PROXY", Const, 0},
    -		{"IFT_HDH1822", Const, 0},
    -		{"IFT_HDLC", Const, 0},
    -		{"IFT_HDSL2", Const, 0},
    -		{"IFT_HIPERLAN2", Const, 0},
    -		{"IFT_HIPPI", Const, 0},
    -		{"IFT_HIPPIINTERFACE", Const, 0},
    -		{"IFT_HOSTPAD", Const, 0},
    -		{"IFT_HSSI", Const, 0},
    -		{"IFT_HY", Const, 0},
    -		{"IFT_IBM370PARCHAN", Const, 0},
    -		{"IFT_IDSL", Const, 0},
    -		{"IFT_IEEE1394", Const, 0},
    -		{"IFT_IEEE80211", Const, 0},
    -		{"IFT_IEEE80212", Const, 0},
    -		{"IFT_IEEE8023ADLAG", Const, 0},
    -		{"IFT_IFGSN", Const, 0},
    -		{"IFT_IMT", Const, 0},
    -		{"IFT_INFINIBAND", Const, 1},
    -		{"IFT_INTERLEAVE", Const, 0},
    -		{"IFT_IP", Const, 0},
    -		{"IFT_IPFORWARD", Const, 0},
    -		{"IFT_IPOVERATM", Const, 0},
    -		{"IFT_IPOVERCDLC", Const, 0},
    -		{"IFT_IPOVERCLAW", Const, 0},
    -		{"IFT_IPSWITCH", Const, 0},
    -		{"IFT_IPXIP", Const, 0},
    -		{"IFT_ISDN", Const, 0},
    -		{"IFT_ISDNBASIC", Const, 0},
    -		{"IFT_ISDNPRIMARY", Const, 0},
    -		{"IFT_ISDNS", Const, 0},
    -		{"IFT_ISDNU", Const, 0},
    -		{"IFT_ISO88022LLC", Const, 0},
    -		{"IFT_ISO88023", Const, 0},
    -		{"IFT_ISO88024", Const, 0},
    -		{"IFT_ISO88025", Const, 0},
    -		{"IFT_ISO88025CRFPINT", Const, 0},
    -		{"IFT_ISO88025DTR", Const, 0},
    -		{"IFT_ISO88025FIBER", Const, 0},
    -		{"IFT_ISO88026", Const, 0},
    -		{"IFT_ISUP", Const, 0},
    -		{"IFT_L2VLAN", Const, 0},
    -		{"IFT_L3IPVLAN", Const, 0},
    -		{"IFT_L3IPXVLAN", Const, 0},
    -		{"IFT_LAPB", Const, 0},
    -		{"IFT_LAPD", Const, 0},
    -		{"IFT_LAPF", Const, 0},
    -		{"IFT_LINEGROUP", Const, 1},
    -		{"IFT_LOCALTALK", Const, 0},
    -		{"IFT_LOOP", Const, 0},
    -		{"IFT_MEDIAMAILOVERIP", Const, 0},
    -		{"IFT_MFSIGLINK", Const, 0},
    -		{"IFT_MIOX25", Const, 0},
    -		{"IFT_MODEM", Const, 0},
    -		{"IFT_MPC", Const, 0},
    -		{"IFT_MPLS", Const, 0},
    -		{"IFT_MPLSTUNNEL", Const, 0},
    -		{"IFT_MSDSL", Const, 0},
    -		{"IFT_MVL", Const, 0},
    -		{"IFT_MYRINET", Const, 0},
    -		{"IFT_NFAS", Const, 0},
    -		{"IFT_NSIP", Const, 0},
    -		{"IFT_OPTICALCHANNEL", Const, 0},
    -		{"IFT_OPTICALTRANSPORT", Const, 0},
    -		{"IFT_OTHER", Const, 0},
    -		{"IFT_P10", Const, 0},
    -		{"IFT_P80", Const, 0},
    -		{"IFT_PARA", Const, 0},
    -		{"IFT_PDP", Const, 0},
    -		{"IFT_PFLOG", Const, 0},
    -		{"IFT_PFLOW", Const, 1},
    -		{"IFT_PFSYNC", Const, 0},
    -		{"IFT_PLC", Const, 0},
    -		{"IFT_PON155", Const, 1},
    -		{"IFT_PON622", Const, 1},
    -		{"IFT_POS", Const, 0},
    -		{"IFT_PPP", Const, 0},
    -		{"IFT_PPPMULTILINKBUNDLE", Const, 0},
    -		{"IFT_PROPATM", Const, 1},
    -		{"IFT_PROPBWAP2MP", Const, 0},
    -		{"IFT_PROPCNLS", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0},
    -		{"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0},
    -		{"IFT_PROPMUX", Const, 0},
    -		{"IFT_PROPVIRTUAL", Const, 0},
    -		{"IFT_PROPWIRELESSP2P", Const, 0},
    -		{"IFT_PTPSERIAL", Const, 0},
    -		{"IFT_PVC", Const, 0},
    -		{"IFT_Q2931", Const, 1},
    -		{"IFT_QLLC", Const, 0},
    -		{"IFT_RADIOMAC", Const, 0},
    -		{"IFT_RADSL", Const, 0},
    -		{"IFT_REACHDSL", Const, 0},
    -		{"IFT_RFC1483", Const, 0},
    -		{"IFT_RS232", Const, 0},
    -		{"IFT_RSRB", Const, 0},
    -		{"IFT_SDLC", Const, 0},
    -		{"IFT_SDSL", Const, 0},
    -		{"IFT_SHDSL", Const, 0},
    -		{"IFT_SIP", Const, 0},
    -		{"IFT_SIPSIG", Const, 1},
    -		{"IFT_SIPTG", Const, 1},
    -		{"IFT_SLIP", Const, 0},
    -		{"IFT_SMDSDXI", Const, 0},
    -		{"IFT_SMDSICIP", Const, 0},
    -		{"IFT_SONET", Const, 0},
    -		{"IFT_SONETOVERHEADCHANNEL", Const, 0},
    -		{"IFT_SONETPATH", Const, 0},
    -		{"IFT_SONETVT", Const, 0},
    -		{"IFT_SRP", Const, 0},
    -		{"IFT_SS7SIGLINK", Const, 0},
    -		{"IFT_STACKTOSTACK", Const, 0},
    -		{"IFT_STARLAN", Const, 0},
    -		{"IFT_STF", Const, 0},
    -		{"IFT_T1", Const, 0},
    -		{"IFT_TDLC", Const, 0},
    -		{"IFT_TELINK", Const, 1},
    -		{"IFT_TERMPAD", Const, 0},
    -		{"IFT_TR008", Const, 0},
    -		{"IFT_TRANSPHDLC", Const, 0},
    -		{"IFT_TUNNEL", Const, 0},
    -		{"IFT_ULTRA", Const, 0},
    -		{"IFT_USB", Const, 0},
    -		{"IFT_V11", Const, 0},
    -		{"IFT_V35", Const, 0},
    -		{"IFT_V36", Const, 0},
    -		{"IFT_V37", Const, 0},
    -		{"IFT_VDSL", Const, 0},
    -		{"IFT_VIRTUALIPADDRESS", Const, 0},
    -		{"IFT_VIRTUALTG", Const, 1},
    -		{"IFT_VOICEDID", Const, 1},
    -		{"IFT_VOICEEM", Const, 0},
    -		{"IFT_VOICEEMFGD", Const, 1},
    -		{"IFT_VOICEENCAP", Const, 0},
    -		{"IFT_VOICEFGDEANA", Const, 1},
    -		{"IFT_VOICEFXO", Const, 0},
    -		{"IFT_VOICEFXS", Const, 0},
    -		{"IFT_VOICEOVERATM", Const, 0},
    -		{"IFT_VOICEOVERCABLE", Const, 1},
    -		{"IFT_VOICEOVERFRAMERELAY", Const, 0},
    -		{"IFT_VOICEOVERIP", Const, 0},
    -		{"IFT_X213", Const, 0},
    -		{"IFT_X25", Const, 0},
    -		{"IFT_X25DDN", Const, 0},
    -		{"IFT_X25HUNTGROUP", Const, 0},
    -		{"IFT_X25MLP", Const, 0},
    -		{"IFT_X25PLE", Const, 0},
    -		{"IFT_XETHER", Const, 0},
    -		{"IGNBRK", Const, 0},
    -		{"IGNCR", Const, 0},
    -		{"IGNORE", Const, 0},
    -		{"IGNPAR", Const, 0},
    -		{"IMAXBEL", Const, 0},
    -		{"INFINITE", Const, 0},
    -		{"INLCR", Const, 0},
    -		{"INPCK", Const, 0},
    -		{"INVALID_FILE_ATTRIBUTES", Const, 0},
    -		{"IN_ACCESS", Const, 0},
    -		{"IN_ALL_EVENTS", Const, 0},
    -		{"IN_ATTRIB", Const, 0},
    -		{"IN_CLASSA_HOST", Const, 0},
    -		{"IN_CLASSA_MAX", Const, 0},
    -		{"IN_CLASSA_NET", Const, 0},
    -		{"IN_CLASSA_NSHIFT", Const, 0},
    -		{"IN_CLASSB_HOST", Const, 0},
    -		{"IN_CLASSB_MAX", Const, 0},
    -		{"IN_CLASSB_NET", Const, 0},
    -		{"IN_CLASSB_NSHIFT", Const, 0},
    -		{"IN_CLASSC_HOST", Const, 0},
    -		{"IN_CLASSC_NET", Const, 0},
    -		{"IN_CLASSC_NSHIFT", Const, 0},
    -		{"IN_CLASSD_HOST", Const, 0},
    -		{"IN_CLASSD_NET", Const, 0},
    -		{"IN_CLASSD_NSHIFT", Const, 0},
    -		{"IN_CLOEXEC", Const, 0},
    -		{"IN_CLOSE", Const, 0},
    -		{"IN_CLOSE_NOWRITE", Const, 0},
    -		{"IN_CLOSE_WRITE", Const, 0},
    -		{"IN_CREATE", Const, 0},
    -		{"IN_DELETE", Const, 0},
    -		{"IN_DELETE_SELF", Const, 0},
    -		{"IN_DONT_FOLLOW", Const, 0},
    -		{"IN_EXCL_UNLINK", Const, 0},
    -		{"IN_IGNORED", Const, 0},
    -		{"IN_ISDIR", Const, 0},
    -		{"IN_LINKLOCALNETNUM", Const, 0},
    -		{"IN_LOOPBACKNET", Const, 0},
    -		{"IN_MASK_ADD", Const, 0},
    -		{"IN_MODIFY", Const, 0},
    -		{"IN_MOVE", Const, 0},
    -		{"IN_MOVED_FROM", Const, 0},
    -		{"IN_MOVED_TO", Const, 0},
    -		{"IN_MOVE_SELF", Const, 0},
    -		{"IN_NONBLOCK", Const, 0},
    -		{"IN_ONESHOT", Const, 0},
    -		{"IN_ONLYDIR", Const, 0},
    -		{"IN_OPEN", Const, 0},
    -		{"IN_Q_OVERFLOW", Const, 0},
    -		{"IN_RFC3021_HOST", Const, 1},
    -		{"IN_RFC3021_MASK", Const, 1},
    -		{"IN_RFC3021_NET", Const, 1},
    -		{"IN_RFC3021_NSHIFT", Const, 1},
    -		{"IN_UNMOUNT", Const, 0},
    -		{"IOC_IN", Const, 1},
    -		{"IOC_INOUT", Const, 1},
    -		{"IOC_OUT", Const, 1},
    -		{"IOC_VENDOR", Const, 3},
    -		{"IOC_WS2", Const, 1},
    -		{"IO_REPARSE_TAG_SYMLINK", Const, 4},
    -		{"IPMreq", Type, 0},
    -		{"IPMreq.Interface", Field, 0},
    -		{"IPMreq.Multiaddr", Field, 0},
    -		{"IPMreqn", Type, 0},
    -		{"IPMreqn.Address", Field, 0},
    -		{"IPMreqn.Ifindex", Field, 0},
    -		{"IPMreqn.Multiaddr", Field, 0},
    -		{"IPPROTO_3PC", Const, 0},
    -		{"IPPROTO_ADFS", Const, 0},
    -		{"IPPROTO_AH", Const, 0},
    -		{"IPPROTO_AHIP", Const, 0},
    -		{"IPPROTO_APES", Const, 0},
    -		{"IPPROTO_ARGUS", Const, 0},
    -		{"IPPROTO_AX25", Const, 0},
    -		{"IPPROTO_BHA", Const, 0},
    -		{"IPPROTO_BLT", Const, 0},
    -		{"IPPROTO_BRSATMON", Const, 0},
    -		{"IPPROTO_CARP", Const, 0},
    -		{"IPPROTO_CFTP", Const, 0},
    -		{"IPPROTO_CHAOS", Const, 0},
    -		{"IPPROTO_CMTP", Const, 0},
    -		{"IPPROTO_COMP", Const, 0},
    -		{"IPPROTO_CPHB", Const, 0},
    -		{"IPPROTO_CPNX", Const, 0},
    -		{"IPPROTO_DCCP", Const, 0},
    -		{"IPPROTO_DDP", Const, 0},
    -		{"IPPROTO_DGP", Const, 0},
    -		{"IPPROTO_DIVERT", Const, 0},
    -		{"IPPROTO_DIVERT_INIT", Const, 3},
    -		{"IPPROTO_DIVERT_RESP", Const, 3},
    -		{"IPPROTO_DONE", Const, 0},
    -		{"IPPROTO_DSTOPTS", Const, 0},
    -		{"IPPROTO_EGP", Const, 0},
    -		{"IPPROTO_EMCON", Const, 0},
    -		{"IPPROTO_ENCAP", Const, 0},
    -		{"IPPROTO_EON", Const, 0},
    -		{"IPPROTO_ESP", Const, 0},
    -		{"IPPROTO_ETHERIP", Const, 0},
    -		{"IPPROTO_FRAGMENT", Const, 0},
    -		{"IPPROTO_GGP", Const, 0},
    -		{"IPPROTO_GMTP", Const, 0},
    -		{"IPPROTO_GRE", Const, 0},
    -		{"IPPROTO_HELLO", Const, 0},
    -		{"IPPROTO_HMP", Const, 0},
    -		{"IPPROTO_HOPOPTS", Const, 0},
    -		{"IPPROTO_ICMP", Const, 0},
    -		{"IPPROTO_ICMPV6", Const, 0},
    -		{"IPPROTO_IDP", Const, 0},
    -		{"IPPROTO_IDPR", Const, 0},
    -		{"IPPROTO_IDRP", Const, 0},
    -		{"IPPROTO_IGMP", Const, 0},
    -		{"IPPROTO_IGP", Const, 0},
    -		{"IPPROTO_IGRP", Const, 0},
    -		{"IPPROTO_IL", Const, 0},
    -		{"IPPROTO_INLSP", Const, 0},
    -		{"IPPROTO_INP", Const, 0},
    -		{"IPPROTO_IP", Const, 0},
    -		{"IPPROTO_IPCOMP", Const, 0},
    -		{"IPPROTO_IPCV", Const, 0},
    -		{"IPPROTO_IPEIP", Const, 0},
    -		{"IPPROTO_IPIP", Const, 0},
    -		{"IPPROTO_IPPC", Const, 0},
    -		{"IPPROTO_IPV4", Const, 0},
    -		{"IPPROTO_IPV6", Const, 0},
    -		{"IPPROTO_IPV6_ICMP", Const, 1},
    -		{"IPPROTO_IRTP", Const, 0},
    -		{"IPPROTO_KRYPTOLAN", Const, 0},
    -		{"IPPROTO_LARP", Const, 0},
    -		{"IPPROTO_LEAF1", Const, 0},
    -		{"IPPROTO_LEAF2", Const, 0},
    -		{"IPPROTO_MAX", Const, 0},
    -		{"IPPROTO_MAXID", Const, 0},
    -		{"IPPROTO_MEAS", Const, 0},
    -		{"IPPROTO_MH", Const, 1},
    -		{"IPPROTO_MHRP", Const, 0},
    -		{"IPPROTO_MICP", Const, 0},
    -		{"IPPROTO_MOBILE", Const, 0},
    -		{"IPPROTO_MPLS", Const, 1},
    -		{"IPPROTO_MTP", Const, 0},
    -		{"IPPROTO_MUX", Const, 0},
    -		{"IPPROTO_ND", Const, 0},
    -		{"IPPROTO_NHRP", Const, 0},
    -		{"IPPROTO_NONE", Const, 0},
    -		{"IPPROTO_NSP", Const, 0},
    -		{"IPPROTO_NVPII", Const, 0},
    -		{"IPPROTO_OLD_DIVERT", Const, 0},
    -		{"IPPROTO_OSPFIGP", Const, 0},
    -		{"IPPROTO_PFSYNC", Const, 0},
    -		{"IPPROTO_PGM", Const, 0},
    -		{"IPPROTO_PIGP", Const, 0},
    -		{"IPPROTO_PIM", Const, 0},
    -		{"IPPROTO_PRM", Const, 0},
    -		{"IPPROTO_PUP", Const, 0},
    -		{"IPPROTO_PVP", Const, 0},
    -		{"IPPROTO_RAW", Const, 0},
    -		{"IPPROTO_RCCMON", Const, 0},
    -		{"IPPROTO_RDP", Const, 0},
    -		{"IPPROTO_ROUTING", Const, 0},
    -		{"IPPROTO_RSVP", Const, 0},
    -		{"IPPROTO_RVD", Const, 0},
    -		{"IPPROTO_SATEXPAK", Const, 0},
    -		{"IPPROTO_SATMON", Const, 0},
    -		{"IPPROTO_SCCSP", Const, 0},
    -		{"IPPROTO_SCTP", Const, 0},
    -		{"IPPROTO_SDRP", Const, 0},
    -		{"IPPROTO_SEND", Const, 1},
    -		{"IPPROTO_SEP", Const, 0},
    -		{"IPPROTO_SKIP", Const, 0},
    -		{"IPPROTO_SPACER", Const, 0},
    -		{"IPPROTO_SRPC", Const, 0},
    -		{"IPPROTO_ST", Const, 0},
    -		{"IPPROTO_SVMTP", Const, 0},
    -		{"IPPROTO_SWIPE", Const, 0},
    -		{"IPPROTO_TCF", Const, 0},
    -		{"IPPROTO_TCP", Const, 0},
    -		{"IPPROTO_TLSP", Const, 0},
    -		{"IPPROTO_TP", Const, 0},
    -		{"IPPROTO_TPXX", Const, 0},
    -		{"IPPROTO_TRUNK1", Const, 0},
    -		{"IPPROTO_TRUNK2", Const, 0},
    -		{"IPPROTO_TTP", Const, 0},
    -		{"IPPROTO_UDP", Const, 0},
    -		{"IPPROTO_UDPLITE", Const, 0},
    -		{"IPPROTO_VINES", Const, 0},
    -		{"IPPROTO_VISA", Const, 0},
    -		{"IPPROTO_VMTP", Const, 0},
    -		{"IPPROTO_VRRP", Const, 1},
    -		{"IPPROTO_WBEXPAK", Const, 0},
    -		{"IPPROTO_WBMON", Const, 0},
    -		{"IPPROTO_WSN", Const, 0},
    -		{"IPPROTO_XNET", Const, 0},
    -		{"IPPROTO_XTP", Const, 0},
    -		{"IPV6_2292DSTOPTS", Const, 0},
    -		{"IPV6_2292HOPLIMIT", Const, 0},
    -		{"IPV6_2292HOPOPTS", Const, 0},
    -		{"IPV6_2292NEXTHOP", Const, 0},
    -		{"IPV6_2292PKTINFO", Const, 0},
    -		{"IPV6_2292PKTOPTIONS", Const, 0},
    -		{"IPV6_2292RTHDR", Const, 0},
    -		{"IPV6_ADDRFORM", Const, 0},
    -		{"IPV6_ADD_MEMBERSHIP", Const, 0},
    -		{"IPV6_AUTHHDR", Const, 0},
    -		{"IPV6_AUTH_LEVEL", Const, 1},
    -		{"IPV6_AUTOFLOWLABEL", Const, 0},
    -		{"IPV6_BINDANY", Const, 0},
    -		{"IPV6_BINDV6ONLY", Const, 0},
    -		{"IPV6_BOUND_IF", Const, 0},
    -		{"IPV6_CHECKSUM", Const, 0},
    -		{"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0},
    -		{"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0},
    -		{"IPV6_DEFHLIM", Const, 0},
    -		{"IPV6_DONTFRAG", Const, 0},
    -		{"IPV6_DROP_MEMBERSHIP", Const, 0},
    -		{"IPV6_DSTOPTS", Const, 0},
    -		{"IPV6_ESP_NETWORK_LEVEL", Const, 1},
    -		{"IPV6_ESP_TRANS_LEVEL", Const, 1},
    -		{"IPV6_FAITH", Const, 0},
    -		{"IPV6_FLOWINFO_MASK", Const, 0},
    -		{"IPV6_FLOWLABEL_MASK", Const, 0},
    -		{"IPV6_FRAGTTL", Const, 0},
    -		{"IPV6_FW_ADD", Const, 0},
    -		{"IPV6_FW_DEL", Const, 0},
    -		{"IPV6_FW_FLUSH", Const, 0},
    -		{"IPV6_FW_GET", Const, 0},
    -		{"IPV6_FW_ZERO", Const, 0},
    -		{"IPV6_HLIMDEC", Const, 0},
    -		{"IPV6_HOPLIMIT", Const, 0},
    -		{"IPV6_HOPOPTS", Const, 0},
    -		{"IPV6_IPCOMP_LEVEL", Const, 1},
    -		{"IPV6_IPSEC_POLICY", Const, 0},
    -		{"IPV6_JOIN_ANYCAST", Const, 0},
    -		{"IPV6_JOIN_GROUP", Const, 0},
    -		{"IPV6_LEAVE_ANYCAST", Const, 0},
    -		{"IPV6_LEAVE_GROUP", Const, 0},
    -		{"IPV6_MAXHLIM", Const, 0},
    -		{"IPV6_MAXOPTHDR", Const, 0},
    -		{"IPV6_MAXPACKET", Const, 0},
    -		{"IPV6_MAX_GROUP_SRC_FILTER", Const, 0},
    -		{"IPV6_MAX_MEMBERSHIPS", Const, 0},
    -		{"IPV6_MAX_SOCK_SRC_FILTER", Const, 0},
    -		{"IPV6_MIN_MEMBERSHIPS", Const, 0},
    -		{"IPV6_MMTU", Const, 0},
    -		{"IPV6_MSFILTER", Const, 0},
    -		{"IPV6_MTU", Const, 0},
    -		{"IPV6_MTU_DISCOVER", Const, 0},
    -		{"IPV6_MULTICAST_HOPS", Const, 0},
    -		{"IPV6_MULTICAST_IF", Const, 0},
    -		{"IPV6_MULTICAST_LOOP", Const, 0},
    -		{"IPV6_NEXTHOP", Const, 0},
    -		{"IPV6_OPTIONS", Const, 1},
    -		{"IPV6_PATHMTU", Const, 0},
    -		{"IPV6_PIPEX", Const, 1},
    -		{"IPV6_PKTINFO", Const, 0},
    -		{"IPV6_PMTUDISC_DO", Const, 0},
    -		{"IPV6_PMTUDISC_DONT", Const, 0},
    -		{"IPV6_PMTUDISC_PROBE", Const, 0},
    -		{"IPV6_PMTUDISC_WANT", Const, 0},
    -		{"IPV6_PORTRANGE", Const, 0},
    -		{"IPV6_PORTRANGE_DEFAULT", Const, 0},
    -		{"IPV6_PORTRANGE_HIGH", Const, 0},
    -		{"IPV6_PORTRANGE_LOW", Const, 0},
    -		{"IPV6_PREFER_TEMPADDR", Const, 0},
    -		{"IPV6_RECVDSTOPTS", Const, 0},
    -		{"IPV6_RECVDSTPORT", Const, 3},
    -		{"IPV6_RECVERR", Const, 0},
    -		{"IPV6_RECVHOPLIMIT", Const, 0},
    -		{"IPV6_RECVHOPOPTS", Const, 0},
    -		{"IPV6_RECVPATHMTU", Const, 0},
    -		{"IPV6_RECVPKTINFO", Const, 0},
    -		{"IPV6_RECVRTHDR", Const, 0},
    -		{"IPV6_RECVTCLASS", Const, 0},
    -		{"IPV6_ROUTER_ALERT", Const, 0},
    -		{"IPV6_RTABLE", Const, 1},
    -		{"IPV6_RTHDR", Const, 0},
    -		{"IPV6_RTHDRDSTOPTS", Const, 0},
    -		{"IPV6_RTHDR_LOOSE", Const, 0},
    -		{"IPV6_RTHDR_STRICT", Const, 0},
    -		{"IPV6_RTHDR_TYPE_0", Const, 0},
    -		{"IPV6_RXDSTOPTS", Const, 0},
    -		{"IPV6_RXHOPOPTS", Const, 0},
    -		{"IPV6_SOCKOPT_RESERVED1", Const, 0},
    -		{"IPV6_TCLASS", Const, 0},
    -		{"IPV6_UNICAST_HOPS", Const, 0},
    -		{"IPV6_USE_MIN_MTU", Const, 0},
    -		{"IPV6_V6ONLY", Const, 0},
    -		{"IPV6_VERSION", Const, 0},
    -		{"IPV6_VERSION_MASK", Const, 0},
    -		{"IPV6_XFRM_POLICY", Const, 0},
    -		{"IP_ADD_MEMBERSHIP", Const, 0},
    -		{"IP_ADD_SOURCE_MEMBERSHIP", Const, 0},
    -		{"IP_AUTH_LEVEL", Const, 1},
    -		{"IP_BINDANY", Const, 0},
    -		{"IP_BLOCK_SOURCE", Const, 0},
    -		{"IP_BOUND_IF", Const, 0},
    -		{"IP_DEFAULT_MULTICAST_LOOP", Const, 0},
    -		{"IP_DEFAULT_MULTICAST_TTL", Const, 0},
    -		{"IP_DF", Const, 0},
    -		{"IP_DIVERTFL", Const, 3},
    -		{"IP_DONTFRAG", Const, 0},
    -		{"IP_DROP_MEMBERSHIP", Const, 0},
    -		{"IP_DROP_SOURCE_MEMBERSHIP", Const, 0},
    -		{"IP_DUMMYNET3", Const, 0},
    -		{"IP_DUMMYNET_CONFIGURE", Const, 0},
    -		{"IP_DUMMYNET_DEL", Const, 0},
    -		{"IP_DUMMYNET_FLUSH", Const, 0},
    -		{"IP_DUMMYNET_GET", Const, 0},
    -		{"IP_EF", Const, 1},
    -		{"IP_ERRORMTU", Const, 1},
    -		{"IP_ESP_NETWORK_LEVEL", Const, 1},
    -		{"IP_ESP_TRANS_LEVEL", Const, 1},
    -		{"IP_FAITH", Const, 0},
    -		{"IP_FREEBIND", Const, 0},
    -		{"IP_FW3", Const, 0},
    -		{"IP_FW_ADD", Const, 0},
    -		{"IP_FW_DEL", Const, 0},
    -		{"IP_FW_FLUSH", Const, 0},
    -		{"IP_FW_GET", Const, 0},
    -		{"IP_FW_NAT_CFG", Const, 0},
    -		{"IP_FW_NAT_DEL", Const, 0},
    -		{"IP_FW_NAT_GET_CONFIG", Const, 0},
    -		{"IP_FW_NAT_GET_LOG", Const, 0},
    -		{"IP_FW_RESETLOG", Const, 0},
    -		{"IP_FW_TABLE_ADD", Const, 0},
    -		{"IP_FW_TABLE_DEL", Const, 0},
    -		{"IP_FW_TABLE_FLUSH", Const, 0},
    -		{"IP_FW_TABLE_GETSIZE", Const, 0},
    -		{"IP_FW_TABLE_LIST", Const, 0},
    -		{"IP_FW_ZERO", Const, 0},
    -		{"IP_HDRINCL", Const, 0},
    -		{"IP_IPCOMP_LEVEL", Const, 1},
    -		{"IP_IPSECFLOWINFO", Const, 1},
    -		{"IP_IPSEC_LOCAL_AUTH", Const, 1},
    -		{"IP_IPSEC_LOCAL_CRED", Const, 1},
    -		{"IP_IPSEC_LOCAL_ID", Const, 1},
    -		{"IP_IPSEC_POLICY", Const, 0},
    -		{"IP_IPSEC_REMOTE_AUTH", Const, 1},
    -		{"IP_IPSEC_REMOTE_CRED", Const, 1},
    -		{"IP_IPSEC_REMOTE_ID", Const, 1},
    -		{"IP_MAXPACKET", Const, 0},
    -		{"IP_MAX_GROUP_SRC_FILTER", Const, 0},
    -		{"IP_MAX_MEMBERSHIPS", Const, 0},
    -		{"IP_MAX_SOCK_MUTE_FILTER", Const, 0},
    -		{"IP_MAX_SOCK_SRC_FILTER", Const, 0},
    -		{"IP_MAX_SOURCE_FILTER", Const, 0},
    -		{"IP_MF", Const, 0},
    -		{"IP_MINFRAGSIZE", Const, 1},
    -		{"IP_MINTTL", Const, 0},
    -		{"IP_MIN_MEMBERSHIPS", Const, 0},
    -		{"IP_MSFILTER", Const, 0},
    -		{"IP_MSS", Const, 0},
    -		{"IP_MTU", Const, 0},
    -		{"IP_MTU_DISCOVER", Const, 0},
    -		{"IP_MULTICAST_IF", Const, 0},
    -		{"IP_MULTICAST_IFINDEX", Const, 0},
    -		{"IP_MULTICAST_LOOP", Const, 0},
    -		{"IP_MULTICAST_TTL", Const, 0},
    -		{"IP_MULTICAST_VIF", Const, 0},
    -		{"IP_NAT__XXX", Const, 0},
    -		{"IP_OFFMASK", Const, 0},
    -		{"IP_OLD_FW_ADD", Const, 0},
    -		{"IP_OLD_FW_DEL", Const, 0},
    -		{"IP_OLD_FW_FLUSH", Const, 0},
    -		{"IP_OLD_FW_GET", Const, 0},
    -		{"IP_OLD_FW_RESETLOG", Const, 0},
    -		{"IP_OLD_FW_ZERO", Const, 0},
    -		{"IP_ONESBCAST", Const, 0},
    -		{"IP_OPTIONS", Const, 0},
    -		{"IP_ORIGDSTADDR", Const, 0},
    -		{"IP_PASSSEC", Const, 0},
    -		{"IP_PIPEX", Const, 1},
    -		{"IP_PKTINFO", Const, 0},
    -		{"IP_PKTOPTIONS", Const, 0},
    -		{"IP_PMTUDISC", Const, 0},
    -		{"IP_PMTUDISC_DO", Const, 0},
    -		{"IP_PMTUDISC_DONT", Const, 0},
    -		{"IP_PMTUDISC_PROBE", Const, 0},
    -		{"IP_PMTUDISC_WANT", Const, 0},
    -		{"IP_PORTRANGE", Const, 0},
    -		{"IP_PORTRANGE_DEFAULT", Const, 0},
    -		{"IP_PORTRANGE_HIGH", Const, 0},
    -		{"IP_PORTRANGE_LOW", Const, 0},
    -		{"IP_RECVDSTADDR", Const, 0},
    -		{"IP_RECVDSTPORT", Const, 1},
    -		{"IP_RECVERR", Const, 0},
    -		{"IP_RECVIF", Const, 0},
    -		{"IP_RECVOPTS", Const, 0},
    -		{"IP_RECVORIGDSTADDR", Const, 0},
    -		{"IP_RECVPKTINFO", Const, 0},
    -		{"IP_RECVRETOPTS", Const, 0},
    -		{"IP_RECVRTABLE", Const, 1},
    -		{"IP_RECVTOS", Const, 0},
    -		{"IP_RECVTTL", Const, 0},
    -		{"IP_RETOPTS", Const, 0},
    -		{"IP_RF", Const, 0},
    -		{"IP_ROUTER_ALERT", Const, 0},
    -		{"IP_RSVP_OFF", Const, 0},
    -		{"IP_RSVP_ON", Const, 0},
    -		{"IP_RSVP_VIF_OFF", Const, 0},
    -		{"IP_RSVP_VIF_ON", Const, 0},
    -		{"IP_RTABLE", Const, 1},
    -		{"IP_SENDSRCADDR", Const, 0},
    -		{"IP_STRIPHDR", Const, 0},
    -		{"IP_TOS", Const, 0},
    -		{"IP_TRAFFIC_MGT_BACKGROUND", Const, 0},
    -		{"IP_TRANSPARENT", Const, 0},
    -		{"IP_TTL", Const, 0},
    -		{"IP_UNBLOCK_SOURCE", Const, 0},
    -		{"IP_XFRM_POLICY", Const, 0},
    -		{"IPv6MTUInfo", Type, 2},
    -		{"IPv6MTUInfo.Addr", Field, 2},
    -		{"IPv6MTUInfo.Mtu", Field, 2},
    -		{"IPv6Mreq", Type, 0},
    -		{"IPv6Mreq.Interface", Field, 0},
    -		{"IPv6Mreq.Multiaddr", Field, 0},
    -		{"ISIG", Const, 0},
    -		{"ISTRIP", Const, 0},
    -		{"IUCLC", Const, 0},
    -		{"IUTF8", Const, 0},
    -		{"IXANY", Const, 0},
    -		{"IXOFF", Const, 0},
    -		{"IXON", Const, 0},
    -		{"IfAddrmsg", Type, 0},
    -		{"IfAddrmsg.Family", Field, 0},
    -		{"IfAddrmsg.Flags", Field, 0},
    -		{"IfAddrmsg.Index", Field, 0},
    -		{"IfAddrmsg.Prefixlen", Field, 0},
    -		{"IfAddrmsg.Scope", Field, 0},
    -		{"IfAnnounceMsghdr", Type, 1},
    -		{"IfAnnounceMsghdr.Hdrlen", Field, 2},
    -		{"IfAnnounceMsghdr.Index", Field, 1},
    -		{"IfAnnounceMsghdr.Msglen", Field, 1},
    -		{"IfAnnounceMsghdr.Name", Field, 1},
    -		{"IfAnnounceMsghdr.Type", Field, 1},
    -		{"IfAnnounceMsghdr.Version", Field, 1},
    -		{"IfAnnounceMsghdr.What", Field, 1},
    -		{"IfData", Type, 0},
    -		{"IfData.Addrlen", Field, 0},
    -		{"IfData.Baudrate", Field, 0},
    -		{"IfData.Capabilities", Field, 2},
    -		{"IfData.Collisions", Field, 0},
    -		{"IfData.Datalen", Field, 0},
    -		{"IfData.Epoch", Field, 0},
    -		{"IfData.Hdrlen", Field, 0},
    -		{"IfData.Hwassist", Field, 0},
    -		{"IfData.Ibytes", Field, 0},
    -		{"IfData.Ierrors", Field, 0},
    -		{"IfData.Imcasts", Field, 0},
    -		{"IfData.Ipackets", Field, 0},
    -		{"IfData.Iqdrops", Field, 0},
    -		{"IfData.Lastchange", Field, 0},
    -		{"IfData.Link_state", Field, 0},
    -		{"IfData.Mclpool", Field, 2},
    -		{"IfData.Metric", Field, 0},
    -		{"IfData.Mtu", Field, 0},
    -		{"IfData.Noproto", Field, 0},
    -		{"IfData.Obytes", Field, 0},
    -		{"IfData.Oerrors", Field, 0},
    -		{"IfData.Omcasts", Field, 0},
    -		{"IfData.Opackets", Field, 0},
    -		{"IfData.Pad", Field, 2},
    -		{"IfData.Pad_cgo_0", Field, 2},
    -		{"IfData.Pad_cgo_1", Field, 2},
    -		{"IfData.Physical", Field, 0},
    -		{"IfData.Recvquota", Field, 0},
    -		{"IfData.Recvtiming", Field, 0},
    -		{"IfData.Reserved1", Field, 0},
    -		{"IfData.Reserved2", Field, 0},
    -		{"IfData.Spare_char1", Field, 0},
    -		{"IfData.Spare_char2", Field, 0},
    -		{"IfData.Type", Field, 0},
    -		{"IfData.Typelen", Field, 0},
    -		{"IfData.Unused1", Field, 0},
    -		{"IfData.Unused2", Field, 0},
    -		{"IfData.Xmitquota", Field, 0},
    -		{"IfData.Xmittiming", Field, 0},
    -		{"IfInfomsg", Type, 0},
    -		{"IfInfomsg.Change", Field, 0},
    -		{"IfInfomsg.Family", Field, 0},
    -		{"IfInfomsg.Flags", Field, 0},
    -		{"IfInfomsg.Index", Field, 0},
    -		{"IfInfomsg.Type", Field, 0},
    -		{"IfInfomsg.X__ifi_pad", Field, 0},
    -		{"IfMsghdr", Type, 0},
    -		{"IfMsghdr.Addrs", Field, 0},
    -		{"IfMsghdr.Data", Field, 0},
    -		{"IfMsghdr.Flags", Field, 0},
    -		{"IfMsghdr.Hdrlen", Field, 2},
    -		{"IfMsghdr.Index", Field, 0},
    -		{"IfMsghdr.Msglen", Field, 0},
    -		{"IfMsghdr.Pad1", Field, 2},
    -		{"IfMsghdr.Pad2", Field, 2},
    -		{"IfMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfMsghdr.Pad_cgo_1", Field, 2},
    -		{"IfMsghdr.Tableid", Field, 2},
    -		{"IfMsghdr.Type", Field, 0},
    -		{"IfMsghdr.Version", Field, 0},
    -		{"IfMsghdr.Xflags", Field, 2},
    -		{"IfaMsghdr", Type, 0},
    -		{"IfaMsghdr.Addrs", Field, 0},
    -		{"IfaMsghdr.Flags", Field, 0},
    -		{"IfaMsghdr.Hdrlen", Field, 2},
    -		{"IfaMsghdr.Index", Field, 0},
    -		{"IfaMsghdr.Metric", Field, 0},
    -		{"IfaMsghdr.Msglen", Field, 0},
    -		{"IfaMsghdr.Pad1", Field, 2},
    -		{"IfaMsghdr.Pad2", Field, 2},
    -		{"IfaMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfaMsghdr.Tableid", Field, 2},
    -		{"IfaMsghdr.Type", Field, 0},
    -		{"IfaMsghdr.Version", Field, 0},
    -		{"IfmaMsghdr", Type, 0},
    -		{"IfmaMsghdr.Addrs", Field, 0},
    -		{"IfmaMsghdr.Flags", Field, 0},
    -		{"IfmaMsghdr.Index", Field, 0},
    -		{"IfmaMsghdr.Msglen", Field, 0},
    -		{"IfmaMsghdr.Pad_cgo_0", Field, 0},
    -		{"IfmaMsghdr.Type", Field, 0},
    -		{"IfmaMsghdr.Version", Field, 0},
    -		{"IfmaMsghdr2", Type, 0},
    -		{"IfmaMsghdr2.Addrs", Field, 0},
    -		{"IfmaMsghdr2.Flags", Field, 0},
    -		{"IfmaMsghdr2.Index", Field, 0},
    -		{"IfmaMsghdr2.Msglen", Field, 0},
    -		{"IfmaMsghdr2.Pad_cgo_0", Field, 0},
    -		{"IfmaMsghdr2.Refcount", Field, 0},
    -		{"IfmaMsghdr2.Type", Field, 0},
    -		{"IfmaMsghdr2.Version", Field, 0},
    -		{"ImplementsGetwd", Const, 0},
    -		{"Inet4Pktinfo", Type, 0},
    -		{"Inet4Pktinfo.Addr", Field, 0},
    -		{"Inet4Pktinfo.Ifindex", Field, 0},
    -		{"Inet4Pktinfo.Spec_dst", Field, 0},
    -		{"Inet6Pktinfo", Type, 0},
    -		{"Inet6Pktinfo.Addr", Field, 0},
    -		{"Inet6Pktinfo.Ifindex", Field, 0},
    -		{"InotifyAddWatch", Func, 0},
    -		{"InotifyEvent", Type, 0},
    -		{"InotifyEvent.Cookie", Field, 0},
    -		{"InotifyEvent.Len", Field, 0},
    -		{"InotifyEvent.Mask", Field, 0},
    -		{"InotifyEvent.Name", Field, 0},
    -		{"InotifyEvent.Wd", Field, 0},
    -		{"InotifyInit", Func, 0},
    -		{"InotifyInit1", Func, 0},
    -		{"InotifyRmWatch", Func, 0},
    -		{"InterfaceAddrMessage", Type, 0},
    -		{"InterfaceAddrMessage.Data", Field, 0},
    -		{"InterfaceAddrMessage.Header", Field, 0},
    -		{"InterfaceAnnounceMessage", Type, 1},
    -		{"InterfaceAnnounceMessage.Header", Field, 1},
    -		{"InterfaceInfo", Type, 0},
    -		{"InterfaceInfo.Address", Field, 0},
    -		{"InterfaceInfo.BroadcastAddress", Field, 0},
    -		{"InterfaceInfo.Flags", Field, 0},
    -		{"InterfaceInfo.Netmask", Field, 0},
    -		{"InterfaceMessage", Type, 0},
    -		{"InterfaceMessage.Data", Field, 0},
    -		{"InterfaceMessage.Header", Field, 0},
    -		{"InterfaceMulticastAddrMessage", Type, 0},
    -		{"InterfaceMulticastAddrMessage.Data", Field, 0},
    -		{"InterfaceMulticastAddrMessage.Header", Field, 0},
    -		{"InvalidHandle", Const, 0},
    -		{"Ioperm", Func, 0},
    -		{"Iopl", Func, 0},
    -		{"Iovec", Type, 0},
    -		{"Iovec.Base", Field, 0},
    -		{"Iovec.Len", Field, 0},
    -		{"IpAdapterInfo", Type, 0},
    -		{"IpAdapterInfo.AdapterName", Field, 0},
    -		{"IpAdapterInfo.Address", Field, 0},
    -		{"IpAdapterInfo.AddressLength", Field, 0},
    -		{"IpAdapterInfo.ComboIndex", Field, 0},
    -		{"IpAdapterInfo.CurrentIpAddress", Field, 0},
    -		{"IpAdapterInfo.Description", Field, 0},
    -		{"IpAdapterInfo.DhcpEnabled", Field, 0},
    -		{"IpAdapterInfo.DhcpServer", Field, 0},
    -		{"IpAdapterInfo.GatewayList", Field, 0},
    -		{"IpAdapterInfo.HaveWins", Field, 0},
    -		{"IpAdapterInfo.Index", Field, 0},
    -		{"IpAdapterInfo.IpAddressList", Field, 0},
    -		{"IpAdapterInfo.LeaseExpires", Field, 0},
    -		{"IpAdapterInfo.LeaseObtained", Field, 0},
    -		{"IpAdapterInfo.Next", Field, 0},
    -		{"IpAdapterInfo.PrimaryWinsServer", Field, 0},
    -		{"IpAdapterInfo.SecondaryWinsServer", Field, 0},
    -		{"IpAdapterInfo.Type", Field, 0},
    -		{"IpAddrString", Type, 0},
    -		{"IpAddrString.Context", Field, 0},
    -		{"IpAddrString.IpAddress", Field, 0},
    -		{"IpAddrString.IpMask", Field, 0},
    -		{"IpAddrString.Next", Field, 0},
    -		{"IpAddressString", Type, 0},
    -		{"IpAddressString.String", Field, 0},
    -		{"IpMaskString", Type, 0},
    -		{"IpMaskString.String", Field, 2},
    -		{"Issetugid", Func, 0},
    -		{"KEY_ALL_ACCESS", Const, 0},
    -		{"KEY_CREATE_LINK", Const, 0},
    -		{"KEY_CREATE_SUB_KEY", Const, 0},
    -		{"KEY_ENUMERATE_SUB_KEYS", Const, 0},
    -		{"KEY_EXECUTE", Const, 0},
    -		{"KEY_NOTIFY", Const, 0},
    -		{"KEY_QUERY_VALUE", Const, 0},
    -		{"KEY_READ", Const, 0},
    -		{"KEY_SET_VALUE", Const, 0},
    -		{"KEY_WOW64_32KEY", Const, 0},
    -		{"KEY_WOW64_64KEY", Const, 0},
    -		{"KEY_WRITE", Const, 0},
    -		{"Kevent", Func, 0},
    -		{"Kevent_t", Type, 0},
    -		{"Kevent_t.Data", Field, 0},
    -		{"Kevent_t.Fflags", Field, 0},
    -		{"Kevent_t.Filter", Field, 0},
    -		{"Kevent_t.Flags", Field, 0},
    -		{"Kevent_t.Ident", Field, 0},
    -		{"Kevent_t.Pad_cgo_0", Field, 2},
    -		{"Kevent_t.Udata", Field, 0},
    -		{"Kill", Func, 0},
    -		{"Klogctl", Func, 0},
    -		{"Kqueue", Func, 0},
    -		{"LANG_ENGLISH", Const, 0},
    -		{"LAYERED_PROTOCOL", Const, 2},
    -		{"LCNT_OVERLOAD_FLUSH", Const, 1},
    -		{"LINUX_REBOOT_CMD_CAD_OFF", Const, 0},
    -		{"LINUX_REBOOT_CMD_CAD_ON", Const, 0},
    -		{"LINUX_REBOOT_CMD_HALT", Const, 0},
    -		{"LINUX_REBOOT_CMD_KEXEC", Const, 0},
    -		{"LINUX_REBOOT_CMD_POWER_OFF", Const, 0},
    -		{"LINUX_REBOOT_CMD_RESTART", Const, 0},
    -		{"LINUX_REBOOT_CMD_RESTART2", Const, 0},
    -		{"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0},
    -		{"LINUX_REBOOT_MAGIC1", Const, 0},
    -		{"LINUX_REBOOT_MAGIC2", Const, 0},
    -		{"LOCK_EX", Const, 0},
    -		{"LOCK_NB", Const, 0},
    -		{"LOCK_SH", Const, 0},
    -		{"LOCK_UN", Const, 0},
    -		{"LazyDLL", Type, 0},
    -		{"LazyDLL.Name", Field, 0},
    -		{"LazyProc", Type, 0},
    -		{"LazyProc.Name", Field, 0},
    -		{"Lchown", Func, 0},
    -		{"Linger", Type, 0},
    -		{"Linger.Linger", Field, 0},
    -		{"Linger.Onoff", Field, 0},
    -		{"Link", Func, 0},
    -		{"Listen", Func, 0},
    -		{"Listxattr", Func, 1},
    -		{"LoadCancelIoEx", Func, 1},
    -		{"LoadConnectEx", Func, 1},
    -		{"LoadCreateSymbolicLink", Func, 4},
    -		{"LoadDLL", Func, 0},
    -		{"LoadGetAddrInfo", Func, 1},
    -		{"LoadLibrary", Func, 0},
    -		{"LoadSetFileCompletionNotificationModes", Func, 2},
    -		{"LocalFree", Func, 0},
    -		{"Log2phys_t", Type, 0},
    -		{"Log2phys_t.Contigbytes", Field, 0},
    -		{"Log2phys_t.Devoffset", Field, 0},
    -		{"Log2phys_t.Flags", Field, 0},
    -		{"LookupAccountName", Func, 0},
    -		{"LookupAccountSid", Func, 0},
    -		{"LookupSID", Func, 0},
    -		{"LsfJump", Func, 0},
    -		{"LsfSocket", Func, 0},
    -		{"LsfStmt", Func, 0},
    -		{"Lstat", Func, 0},
    -		{"MADV_AUTOSYNC", Const, 1},
    -		{"MADV_CAN_REUSE", Const, 0},
    -		{"MADV_CORE", Const, 1},
    -		{"MADV_DOFORK", Const, 0},
    -		{"MADV_DONTFORK", Const, 0},
    -		{"MADV_DONTNEED", Const, 0},
    -		{"MADV_FREE", Const, 0},
    -		{"MADV_FREE_REUSABLE", Const, 0},
    -		{"MADV_FREE_REUSE", Const, 0},
    -		{"MADV_HUGEPAGE", Const, 0},
    -		{"MADV_HWPOISON", Const, 0},
    -		{"MADV_MERGEABLE", Const, 0},
    -		{"MADV_NOCORE", Const, 1},
    -		{"MADV_NOHUGEPAGE", Const, 0},
    -		{"MADV_NORMAL", Const, 0},
    -		{"MADV_NOSYNC", Const, 1},
    -		{"MADV_PROTECT", Const, 1},
    -		{"MADV_RANDOM", Const, 0},
    -		{"MADV_REMOVE", Const, 0},
    -		{"MADV_SEQUENTIAL", Const, 0},
    -		{"MADV_SPACEAVAIL", Const, 3},
    -		{"MADV_UNMERGEABLE", Const, 0},
    -		{"MADV_WILLNEED", Const, 0},
    -		{"MADV_ZERO_WIRED_PAGES", Const, 0},
    -		{"MAP_32BIT", Const, 0},
    -		{"MAP_ALIGNED_SUPER", Const, 3},
    -		{"MAP_ALIGNMENT_16MB", Const, 3},
    -		{"MAP_ALIGNMENT_1TB", Const, 3},
    -		{"MAP_ALIGNMENT_256TB", Const, 3},
    -		{"MAP_ALIGNMENT_4GB", Const, 3},
    -		{"MAP_ALIGNMENT_64KB", Const, 3},
    -		{"MAP_ALIGNMENT_64PB", Const, 3},
    -		{"MAP_ALIGNMENT_MASK", Const, 3},
    -		{"MAP_ALIGNMENT_SHIFT", Const, 3},
    -		{"MAP_ANON", Const, 0},
    -		{"MAP_ANONYMOUS", Const, 0},
    -		{"MAP_COPY", Const, 0},
    -		{"MAP_DENYWRITE", Const, 0},
    -		{"MAP_EXECUTABLE", Const, 0},
    -		{"MAP_FILE", Const, 0},
    -		{"MAP_FIXED", Const, 0},
    -		{"MAP_FLAGMASK", Const, 3},
    -		{"MAP_GROWSDOWN", Const, 0},
    -		{"MAP_HASSEMAPHORE", Const, 0},
    -		{"MAP_HUGETLB", Const, 0},
    -		{"MAP_INHERIT", Const, 3},
    -		{"MAP_INHERIT_COPY", Const, 3},
    -		{"MAP_INHERIT_DEFAULT", Const, 3},
    -		{"MAP_INHERIT_DONATE_COPY", Const, 3},
    -		{"MAP_INHERIT_NONE", Const, 3},
    -		{"MAP_INHERIT_SHARE", Const, 3},
    -		{"MAP_JIT", Const, 0},
    -		{"MAP_LOCKED", Const, 0},
    -		{"MAP_NOCACHE", Const, 0},
    -		{"MAP_NOCORE", Const, 1},
    -		{"MAP_NOEXTEND", Const, 0},
    -		{"MAP_NONBLOCK", Const, 0},
    -		{"MAP_NORESERVE", Const, 0},
    -		{"MAP_NOSYNC", Const, 1},
    -		{"MAP_POPULATE", Const, 0},
    -		{"MAP_PREFAULT_READ", Const, 1},
    -		{"MAP_PRIVATE", Const, 0},
    -		{"MAP_RENAME", Const, 0},
    -		{"MAP_RESERVED0080", Const, 0},
    -		{"MAP_RESERVED0100", Const, 1},
    -		{"MAP_SHARED", Const, 0},
    -		{"MAP_STACK", Const, 0},
    -		{"MAP_TRYFIXED", Const, 3},
    -		{"MAP_TYPE", Const, 0},
    -		{"MAP_WIRED", Const, 3},
    -		{"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4},
    -		{"MAXLEN_IFDESCR", Const, 0},
    -		{"MAXLEN_PHYSADDR", Const, 0},
    -		{"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0},
    -		{"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0},
    -		{"MAX_ADAPTER_NAME_LENGTH", Const, 0},
    -		{"MAX_COMPUTERNAME_LENGTH", Const, 0},
    -		{"MAX_INTERFACE_NAME_LEN", Const, 0},
    -		{"MAX_LONG_PATH", Const, 0},
    -		{"MAX_PATH", Const, 0},
    -		{"MAX_PROTOCOL_CHAIN", Const, 2},
    -		{"MCL_CURRENT", Const, 0},
    -		{"MCL_FUTURE", Const, 0},
    -		{"MNT_DETACH", Const, 0},
    -		{"MNT_EXPIRE", Const, 0},
    -		{"MNT_FORCE", Const, 0},
    -		{"MSG_BCAST", Const, 1},
    -		{"MSG_CMSG_CLOEXEC", Const, 0},
    -		{"MSG_COMPAT", Const, 0},
    -		{"MSG_CONFIRM", Const, 0},
    -		{"MSG_CONTROLMBUF", Const, 1},
    -		{"MSG_CTRUNC", Const, 0},
    -		{"MSG_DONTROUTE", Const, 0},
    -		{"MSG_DONTWAIT", Const, 0},
    -		{"MSG_EOF", Const, 0},
    -		{"MSG_EOR", Const, 0},
    -		{"MSG_ERRQUEUE", Const, 0},
    -		{"MSG_FASTOPEN", Const, 1},
    -		{"MSG_FIN", Const, 0},
    -		{"MSG_FLUSH", Const, 0},
    -		{"MSG_HAVEMORE", Const, 0},
    -		{"MSG_HOLD", Const, 0},
    -		{"MSG_IOVUSRSPACE", Const, 1},
    -		{"MSG_LENUSRSPACE", Const, 1},
    -		{"MSG_MCAST", Const, 1},
    -		{"MSG_MORE", Const, 0},
    -		{"MSG_NAMEMBUF", Const, 1},
    -		{"MSG_NBIO", Const, 0},
    -		{"MSG_NEEDSA", Const, 0},
    -		{"MSG_NOSIGNAL", Const, 0},
    -		{"MSG_NOTIFICATION", Const, 0},
    -		{"MSG_OOB", Const, 0},
    -		{"MSG_PEEK", Const, 0},
    -		{"MSG_PROXY", Const, 0},
    -		{"MSG_RCVMORE", Const, 0},
    -		{"MSG_RST", Const, 0},
    -		{"MSG_SEND", Const, 0},
    -		{"MSG_SYN", Const, 0},
    -		{"MSG_TRUNC", Const, 0},
    -		{"MSG_TRYHARD", Const, 0},
    -		{"MSG_USERFLAGS", Const, 1},
    -		{"MSG_WAITALL", Const, 0},
    -		{"MSG_WAITFORONE", Const, 0},
    -		{"MSG_WAITSTREAM", Const, 0},
    -		{"MS_ACTIVE", Const, 0},
    -		{"MS_ASYNC", Const, 0},
    -		{"MS_BIND", Const, 0},
    -		{"MS_DEACTIVATE", Const, 0},
    -		{"MS_DIRSYNC", Const, 0},
    -		{"MS_INVALIDATE", Const, 0},
    -		{"MS_I_VERSION", Const, 0},
    -		{"MS_KERNMOUNT", Const, 0},
    -		{"MS_KILLPAGES", Const, 0},
    -		{"MS_MANDLOCK", Const, 0},
    -		{"MS_MGC_MSK", Const, 0},
    -		{"MS_MGC_VAL", Const, 0},
    -		{"MS_MOVE", Const, 0},
    -		{"MS_NOATIME", Const, 0},
    -		{"MS_NODEV", Const, 0},
    -		{"MS_NODIRATIME", Const, 0},
    -		{"MS_NOEXEC", Const, 0},
    -		{"MS_NOSUID", Const, 0},
    -		{"MS_NOUSER", Const, 0},
    -		{"MS_POSIXACL", Const, 0},
    -		{"MS_PRIVATE", Const, 0},
    -		{"MS_RDONLY", Const, 0},
    -		{"MS_REC", Const, 0},
    -		{"MS_RELATIME", Const, 0},
    -		{"MS_REMOUNT", Const, 0},
    -		{"MS_RMT_MASK", Const, 0},
    -		{"MS_SHARED", Const, 0},
    -		{"MS_SILENT", Const, 0},
    -		{"MS_SLAVE", Const, 0},
    -		{"MS_STRICTATIME", Const, 0},
    -		{"MS_SYNC", Const, 0},
    -		{"MS_SYNCHRONOUS", Const, 0},
    -		{"MS_UNBINDABLE", Const, 0},
    -		{"Madvise", Func, 0},
    -		{"MapViewOfFile", Func, 0},
    -		{"MaxTokenInfoClass", Const, 0},
    -		{"Mclpool", Type, 2},
    -		{"Mclpool.Alive", Field, 2},
    -		{"Mclpool.Cwm", Field, 2},
    -		{"Mclpool.Grown", Field, 2},
    -		{"Mclpool.Hwm", Field, 2},
    -		{"Mclpool.Lwm", Field, 2},
    -		{"MibIfRow", Type, 0},
    -		{"MibIfRow.AdminStatus", Field, 0},
    -		{"MibIfRow.Descr", Field, 0},
    -		{"MibIfRow.DescrLen", Field, 0},
    -		{"MibIfRow.InDiscards", Field, 0},
    -		{"MibIfRow.InErrors", Field, 0},
    -		{"MibIfRow.InNUcastPkts", Field, 0},
    -		{"MibIfRow.InOctets", Field, 0},
    -		{"MibIfRow.InUcastPkts", Field, 0},
    -		{"MibIfRow.InUnknownProtos", Field, 0},
    -		{"MibIfRow.Index", Field, 0},
    -		{"MibIfRow.LastChange", Field, 0},
    -		{"MibIfRow.Mtu", Field, 0},
    -		{"MibIfRow.Name", Field, 0},
    -		{"MibIfRow.OperStatus", Field, 0},
    -		{"MibIfRow.OutDiscards", Field, 0},
    -		{"MibIfRow.OutErrors", Field, 0},
    -		{"MibIfRow.OutNUcastPkts", Field, 0},
    -		{"MibIfRow.OutOctets", Field, 0},
    -		{"MibIfRow.OutQLen", Field, 0},
    -		{"MibIfRow.OutUcastPkts", Field, 0},
    -		{"MibIfRow.PhysAddr", Field, 0},
    -		{"MibIfRow.PhysAddrLen", Field, 0},
    -		{"MibIfRow.Speed", Field, 0},
    -		{"MibIfRow.Type", Field, 0},
    -		{"Mkdir", Func, 0},
    -		{"Mkdirat", Func, 0},
    -		{"Mkfifo", Func, 0},
    -		{"Mknod", Func, 0},
    -		{"Mknodat", Func, 0},
    -		{"Mlock", Func, 0},
    -		{"Mlockall", Func, 0},
    -		{"Mmap", Func, 0},
    -		{"Mount", Func, 0},
    -		{"MoveFile", Func, 0},
    -		{"Mprotect", Func, 0},
    -		{"Msghdr", Type, 0},
    -		{"Msghdr.Control", Field, 0},
    -		{"Msghdr.Controllen", Field, 0},
    -		{"Msghdr.Flags", Field, 0},
    -		{"Msghdr.Iov", Field, 0},
    -		{"Msghdr.Iovlen", Field, 0},
    -		{"Msghdr.Name", Field, 0},
    -		{"Msghdr.Namelen", Field, 0},
    -		{"Msghdr.Pad_cgo_0", Field, 0},
    -		{"Msghdr.Pad_cgo_1", Field, 0},
    -		{"Munlock", Func, 0},
    -		{"Munlockall", Func, 0},
    -		{"Munmap", Func, 0},
    -		{"MustLoadDLL", Func, 0},
    -		{"NAME_MAX", Const, 0},
    -		{"NETLINK_ADD_MEMBERSHIP", Const, 0},
    -		{"NETLINK_AUDIT", Const, 0},
    -		{"NETLINK_BROADCAST_ERROR", Const, 0},
    -		{"NETLINK_CONNECTOR", Const, 0},
    -		{"NETLINK_DNRTMSG", Const, 0},
    -		{"NETLINK_DROP_MEMBERSHIP", Const, 0},
    -		{"NETLINK_ECRYPTFS", Const, 0},
    -		{"NETLINK_FIB_LOOKUP", Const, 0},
    -		{"NETLINK_FIREWALL", Const, 0},
    -		{"NETLINK_GENERIC", Const, 0},
    -		{"NETLINK_INET_DIAG", Const, 0},
    -		{"NETLINK_IP6_FW", Const, 0},
    -		{"NETLINK_ISCSI", Const, 0},
    -		{"NETLINK_KOBJECT_UEVENT", Const, 0},
    -		{"NETLINK_NETFILTER", Const, 0},
    -		{"NETLINK_NFLOG", Const, 0},
    -		{"NETLINK_NO_ENOBUFS", Const, 0},
    -		{"NETLINK_PKTINFO", Const, 0},
    -		{"NETLINK_RDMA", Const, 0},
    -		{"NETLINK_ROUTE", Const, 0},
    -		{"NETLINK_SCSITRANSPORT", Const, 0},
    -		{"NETLINK_SELINUX", Const, 0},
    -		{"NETLINK_UNUSED", Const, 0},
    -		{"NETLINK_USERSOCK", Const, 0},
    -		{"NETLINK_XFRM", Const, 0},
    -		{"NET_RT_DUMP", Const, 0},
    -		{"NET_RT_DUMP2", Const, 0},
    -		{"NET_RT_FLAGS", Const, 0},
    -		{"NET_RT_IFLIST", Const, 0},
    -		{"NET_RT_IFLIST2", Const, 0},
    -		{"NET_RT_IFLISTL", Const, 1},
    -		{"NET_RT_IFMALIST", Const, 0},
    -		{"NET_RT_MAXID", Const, 0},
    -		{"NET_RT_OIFLIST", Const, 1},
    -		{"NET_RT_OOIFLIST", Const, 1},
    -		{"NET_RT_STAT", Const, 0},
    -		{"NET_RT_STATS", Const, 1},
    -		{"NET_RT_TABLE", Const, 1},
    -		{"NET_RT_TRASH", Const, 0},
    -		{"NLA_ALIGNTO", Const, 0},
    -		{"NLA_F_NESTED", Const, 0},
    -		{"NLA_F_NET_BYTEORDER", Const, 0},
    -		{"NLA_HDRLEN", Const, 0},
    -		{"NLMSG_ALIGNTO", Const, 0},
    -		{"NLMSG_DONE", Const, 0},
    -		{"NLMSG_ERROR", Const, 0},
    -		{"NLMSG_HDRLEN", Const, 0},
    -		{"NLMSG_MIN_TYPE", Const, 0},
    -		{"NLMSG_NOOP", Const, 0},
    -		{"NLMSG_OVERRUN", Const, 0},
    -		{"NLM_F_ACK", Const, 0},
    -		{"NLM_F_APPEND", Const, 0},
    -		{"NLM_F_ATOMIC", Const, 0},
    -		{"NLM_F_CREATE", Const, 0},
    -		{"NLM_F_DUMP", Const, 0},
    -		{"NLM_F_ECHO", Const, 0},
    -		{"NLM_F_EXCL", Const, 0},
    -		{"NLM_F_MATCH", Const, 0},
    -		{"NLM_F_MULTI", Const, 0},
    -		{"NLM_F_REPLACE", Const, 0},
    -		{"NLM_F_REQUEST", Const, 0},
    -		{"NLM_F_ROOT", Const, 0},
    -		{"NOFLSH", Const, 0},
    -		{"NOTE_ABSOLUTE", Const, 0},
    -		{"NOTE_ATTRIB", Const, 0},
    -		{"NOTE_BACKGROUND", Const, 16},
    -		{"NOTE_CHILD", Const, 0},
    -		{"NOTE_CRITICAL", Const, 16},
    -		{"NOTE_DELETE", Const, 0},
    -		{"NOTE_EOF", Const, 1},
    -		{"NOTE_EXEC", Const, 0},
    -		{"NOTE_EXIT", Const, 0},
    -		{"NOTE_EXITSTATUS", Const, 0},
    -		{"NOTE_EXIT_CSERROR", Const, 16},
    -		{"NOTE_EXIT_DECRYPTFAIL", Const, 16},
    -		{"NOTE_EXIT_DETAIL", Const, 16},
    -		{"NOTE_EXIT_DETAIL_MASK", Const, 16},
    -		{"NOTE_EXIT_MEMORY", Const, 16},
    -		{"NOTE_EXIT_REPARENTED", Const, 16},
    -		{"NOTE_EXTEND", Const, 0},
    -		{"NOTE_FFAND", Const, 0},
    -		{"NOTE_FFCOPY", Const, 0},
    -		{"NOTE_FFCTRLMASK", Const, 0},
    -		{"NOTE_FFLAGSMASK", Const, 0},
    -		{"NOTE_FFNOP", Const, 0},
    -		{"NOTE_FFOR", Const, 0},
    -		{"NOTE_FORK", Const, 0},
    -		{"NOTE_LEEWAY", Const, 16},
    -		{"NOTE_LINK", Const, 0},
    -		{"NOTE_LOWAT", Const, 0},
    -		{"NOTE_NONE", Const, 0},
    -		{"NOTE_NSECONDS", Const, 0},
    -		{"NOTE_PCTRLMASK", Const, 0},
    -		{"NOTE_PDATAMASK", Const, 0},
    -		{"NOTE_REAP", Const, 0},
    -		{"NOTE_RENAME", Const, 0},
    -		{"NOTE_RESOURCEEND", Const, 0},
    -		{"NOTE_REVOKE", Const, 0},
    -		{"NOTE_SECONDS", Const, 0},
    -		{"NOTE_SIGNAL", Const, 0},
    -		{"NOTE_TRACK", Const, 0},
    -		{"NOTE_TRACKERR", Const, 0},
    -		{"NOTE_TRIGGER", Const, 0},
    -		{"NOTE_TRUNCATE", Const, 1},
    -		{"NOTE_USECONDS", Const, 0},
    -		{"NOTE_VM_ERROR", Const, 0},
    -		{"NOTE_VM_PRESSURE", Const, 0},
    -		{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0},
    -		{"NOTE_VM_PRESSURE_TERMINATE", Const, 0},
    -		{"NOTE_WRITE", Const, 0},
    -		{"NameCanonical", Const, 0},
    -		{"NameCanonicalEx", Const, 0},
    -		{"NameDisplay", Const, 0},
    -		{"NameDnsDomain", Const, 0},
    -		{"NameFullyQualifiedDN", Const, 0},
    -		{"NameSamCompatible", Const, 0},
    -		{"NameServicePrincipal", Const, 0},
    -		{"NameUniqueId", Const, 0},
    -		{"NameUnknown", Const, 0},
    -		{"NameUserPrincipal", Const, 0},
    -		{"Nanosleep", Func, 0},
    -		{"NetApiBufferFree", Func, 0},
    -		{"NetGetJoinInformation", Func, 2},
    -		{"NetSetupDomainName", Const, 2},
    -		{"NetSetupUnjoined", Const, 2},
    -		{"NetSetupUnknownStatus", Const, 2},
    -		{"NetSetupWorkgroupName", Const, 2},
    -		{"NetUserGetInfo", Func, 0},
    -		{"NetlinkMessage", Type, 0},
    -		{"NetlinkMessage.Data", Field, 0},
    -		{"NetlinkMessage.Header", Field, 0},
    -		{"NetlinkRIB", Func, 0},
    -		{"NetlinkRouteAttr", Type, 0},
    -		{"NetlinkRouteAttr.Attr", Field, 0},
    -		{"NetlinkRouteAttr.Value", Field, 0},
    -		{"NetlinkRouteRequest", Type, 0},
    -		{"NetlinkRouteRequest.Data", Field, 0},
    -		{"NetlinkRouteRequest.Header", Field, 0},
    -		{"NewCallback", Func, 0},
    -		{"NewCallbackCDecl", Func, 3},
    -		{"NewLazyDLL", Func, 0},
    -		{"NlAttr", Type, 0},
    -		{"NlAttr.Len", Field, 0},
    -		{"NlAttr.Type", Field, 0},
    -		{"NlMsgerr", Type, 0},
    -		{"NlMsgerr.Error", Field, 0},
    -		{"NlMsgerr.Msg", Field, 0},
    -		{"NlMsghdr", Type, 0},
    -		{"NlMsghdr.Flags", Field, 0},
    -		{"NlMsghdr.Len", Field, 0},
    -		{"NlMsghdr.Pid", Field, 0},
    -		{"NlMsghdr.Seq", Field, 0},
    -		{"NlMsghdr.Type", Field, 0},
    -		{"NsecToFiletime", Func, 0},
    -		{"NsecToTimespec", Func, 0},
    -		{"NsecToTimeval", Func, 0},
    -		{"Ntohs", Func, 0},
    -		{"OCRNL", Const, 0},
    -		{"OFDEL", Const, 0},
    -		{"OFILL", Const, 0},
    -		{"OFIOGETBMAP", Const, 1},
    -		{"OID_PKIX_KP_SERVER_AUTH", Var, 0},
    -		{"OID_SERVER_GATED_CRYPTO", Var, 0},
    -		{"OID_SGC_NETSCAPE", Var, 0},
    -		{"OLCUC", Const, 0},
    -		{"ONLCR", Const, 0},
    -		{"ONLRET", Const, 0},
    -		{"ONOCR", Const, 0},
    -		{"ONOEOT", Const, 1},
    -		{"OPEN_ALWAYS", Const, 0},
    -		{"OPEN_EXISTING", Const, 0},
    -		{"OPOST", Const, 0},
    -		{"O_ACCMODE", Const, 0},
    -		{"O_ALERT", Const, 0},
    -		{"O_ALT_IO", Const, 1},
    -		{"O_APPEND", Const, 0},
    -		{"O_ASYNC", Const, 0},
    -		{"O_CLOEXEC", Const, 0},
    -		{"O_CREAT", Const, 0},
    -		{"O_DIRECT", Const, 0},
    -		{"O_DIRECTORY", Const, 0},
    -		{"O_DP_GETRAWENCRYPTED", Const, 16},
    -		{"O_DSYNC", Const, 0},
    -		{"O_EVTONLY", Const, 0},
    -		{"O_EXCL", Const, 0},
    -		{"O_EXEC", Const, 0},
    -		{"O_EXLOCK", Const, 0},
    -		{"O_FSYNC", Const, 0},
    -		{"O_LARGEFILE", Const, 0},
    -		{"O_NDELAY", Const, 0},
    -		{"O_NOATIME", Const, 0},
    -		{"O_NOCTTY", Const, 0},
    -		{"O_NOFOLLOW", Const, 0},
    -		{"O_NONBLOCK", Const, 0},
    -		{"O_NOSIGPIPE", Const, 1},
    -		{"O_POPUP", Const, 0},
    -		{"O_RDONLY", Const, 0},
    -		{"O_RDWR", Const, 0},
    -		{"O_RSYNC", Const, 0},
    -		{"O_SHLOCK", Const, 0},
    -		{"O_SYMLINK", Const, 0},
    -		{"O_SYNC", Const, 0},
    -		{"O_TRUNC", Const, 0},
    -		{"O_TTY_INIT", Const, 0},
    -		{"O_WRONLY", Const, 0},
    -		{"Open", Func, 0},
    -		{"OpenCurrentProcessToken", Func, 0},
    -		{"OpenProcess", Func, 0},
    -		{"OpenProcessToken", Func, 0},
    -		{"Openat", Func, 0},
    -		{"Overlapped", Type, 0},
    -		{"Overlapped.HEvent", Field, 0},
    -		{"Overlapped.Internal", Field, 0},
    -		{"Overlapped.InternalHigh", Field, 0},
    -		{"Overlapped.Offset", Field, 0},
    -		{"Overlapped.OffsetHigh", Field, 0},
    -		{"PACKET_ADD_MEMBERSHIP", Const, 0},
    -		{"PACKET_BROADCAST", Const, 0},
    -		{"PACKET_DROP_MEMBERSHIP", Const, 0},
    -		{"PACKET_FASTROUTE", Const, 0},
    -		{"PACKET_HOST", Const, 0},
    -		{"PACKET_LOOPBACK", Const, 0},
    -		{"PACKET_MR_ALLMULTI", Const, 0},
    -		{"PACKET_MR_MULTICAST", Const, 0},
    -		{"PACKET_MR_PROMISC", Const, 0},
    -		{"PACKET_MULTICAST", Const, 0},
    -		{"PACKET_OTHERHOST", Const, 0},
    -		{"PACKET_OUTGOING", Const, 0},
    -		{"PACKET_RECV_OUTPUT", Const, 0},
    -		{"PACKET_RX_RING", Const, 0},
    -		{"PACKET_STATISTICS", Const, 0},
    -		{"PAGE_EXECUTE_READ", Const, 0},
    -		{"PAGE_EXECUTE_READWRITE", Const, 0},
    -		{"PAGE_EXECUTE_WRITECOPY", Const, 0},
    -		{"PAGE_READONLY", Const, 0},
    -		{"PAGE_READWRITE", Const, 0},
    -		{"PAGE_WRITECOPY", Const, 0},
    -		{"PARENB", Const, 0},
    -		{"PARMRK", Const, 0},
    -		{"PARODD", Const, 0},
    -		{"PENDIN", Const, 0},
    -		{"PFL_HIDDEN", Const, 2},
    -		{"PFL_MATCHES_PROTOCOL_ZERO", Const, 2},
    -		{"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2},
    -		{"PFL_NETWORKDIRECT_PROVIDER", Const, 2},
    -		{"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2},
    -		{"PF_FLUSH", Const, 1},
    -		{"PKCS_7_ASN_ENCODING", Const, 0},
    -		{"PMC5_PIPELINE_FLUSH", Const, 1},
    -		{"PRIO_PGRP", Const, 2},
    -		{"PRIO_PROCESS", Const, 2},
    -		{"PRIO_USER", Const, 2},
    -		{"PRI_IOFLUSH", Const, 1},
    -		{"PROCESS_QUERY_INFORMATION", Const, 0},
    -		{"PROCESS_TERMINATE", Const, 2},
    -		{"PROT_EXEC", Const, 0},
    -		{"PROT_GROWSDOWN", Const, 0},
    -		{"PROT_GROWSUP", Const, 0},
    -		{"PROT_NONE", Const, 0},
    -		{"PROT_READ", Const, 0},
    -		{"PROT_WRITE", Const, 0},
    -		{"PROV_DH_SCHANNEL", Const, 0},
    -		{"PROV_DSS", Const, 0},
    -		{"PROV_DSS_DH", Const, 0},
    -		{"PROV_EC_ECDSA_FULL", Const, 0},
    -		{"PROV_EC_ECDSA_SIG", Const, 0},
    -		{"PROV_EC_ECNRA_FULL", Const, 0},
    -		{"PROV_EC_ECNRA_SIG", Const, 0},
    -		{"PROV_FORTEZZA", Const, 0},
    -		{"PROV_INTEL_SEC", Const, 0},
    -		{"PROV_MS_EXCHANGE", Const, 0},
    -		{"PROV_REPLACE_OWF", Const, 0},
    -		{"PROV_RNG", Const, 0},
    -		{"PROV_RSA_AES", Const, 0},
    -		{"PROV_RSA_FULL", Const, 0},
    -		{"PROV_RSA_SCHANNEL", Const, 0},
    -		{"PROV_RSA_SIG", Const, 0},
    -		{"PROV_SPYRUS_LYNKS", Const, 0},
    -		{"PROV_SSL", Const, 0},
    -		{"PR_CAPBSET_DROP", Const, 0},
    -		{"PR_CAPBSET_READ", Const, 0},
    -		{"PR_CLEAR_SECCOMP_FILTER", Const, 0},
    -		{"PR_ENDIAN_BIG", Const, 0},
    -		{"PR_ENDIAN_LITTLE", Const, 0},
    -		{"PR_ENDIAN_PPC_LITTLE", Const, 0},
    -		{"PR_FPEMU_NOPRINT", Const, 0},
    -		{"PR_FPEMU_SIGFPE", Const, 0},
    -		{"PR_FP_EXC_ASYNC", Const, 0},
    -		{"PR_FP_EXC_DISABLED", Const, 0},
    -		{"PR_FP_EXC_DIV", Const, 0},
    -		{"PR_FP_EXC_INV", Const, 0},
    -		{"PR_FP_EXC_NONRECOV", Const, 0},
    -		{"PR_FP_EXC_OVF", Const, 0},
    -		{"PR_FP_EXC_PRECISE", Const, 0},
    -		{"PR_FP_EXC_RES", Const, 0},
    -		{"PR_FP_EXC_SW_ENABLE", Const, 0},
    -		{"PR_FP_EXC_UND", Const, 0},
    -		{"PR_GET_DUMPABLE", Const, 0},
    -		{"PR_GET_ENDIAN", Const, 0},
    -		{"PR_GET_FPEMU", Const, 0},
    -		{"PR_GET_FPEXC", Const, 0},
    -		{"PR_GET_KEEPCAPS", Const, 0},
    -		{"PR_GET_NAME", Const, 0},
    -		{"PR_GET_PDEATHSIG", Const, 0},
    -		{"PR_GET_SECCOMP", Const, 0},
    -		{"PR_GET_SECCOMP_FILTER", Const, 0},
    -		{"PR_GET_SECUREBITS", Const, 0},
    -		{"PR_GET_TIMERSLACK", Const, 0},
    -		{"PR_GET_TIMING", Const, 0},
    -		{"PR_GET_TSC", Const, 0},
    -		{"PR_GET_UNALIGN", Const, 0},
    -		{"PR_MCE_KILL", Const, 0},
    -		{"PR_MCE_KILL_CLEAR", Const, 0},
    -		{"PR_MCE_KILL_DEFAULT", Const, 0},
    -		{"PR_MCE_KILL_EARLY", Const, 0},
    -		{"PR_MCE_KILL_GET", Const, 0},
    -		{"PR_MCE_KILL_LATE", Const, 0},
    -		{"PR_MCE_KILL_SET", Const, 0},
    -		{"PR_SECCOMP_FILTER_EVENT", Const, 0},
    -		{"PR_SECCOMP_FILTER_SYSCALL", Const, 0},
    -		{"PR_SET_DUMPABLE", Const, 0},
    -		{"PR_SET_ENDIAN", Const, 0},
    -		{"PR_SET_FPEMU", Const, 0},
    -		{"PR_SET_FPEXC", Const, 0},
    -		{"PR_SET_KEEPCAPS", Const, 0},
    -		{"PR_SET_NAME", Const, 0},
    -		{"PR_SET_PDEATHSIG", Const, 0},
    -		{"PR_SET_PTRACER", Const, 0},
    -		{"PR_SET_SECCOMP", Const, 0},
    -		{"PR_SET_SECCOMP_FILTER", Const, 0},
    -		{"PR_SET_SECUREBITS", Const, 0},
    -		{"PR_SET_TIMERSLACK", Const, 0},
    -		{"PR_SET_TIMING", Const, 0},
    -		{"PR_SET_TSC", Const, 0},
    -		{"PR_SET_UNALIGN", Const, 0},
    -		{"PR_TASK_PERF_EVENTS_DISABLE", Const, 0},
    -		{"PR_TASK_PERF_EVENTS_ENABLE", Const, 0},
    -		{"PR_TIMING_STATISTICAL", Const, 0},
    -		{"PR_TIMING_TIMESTAMP", Const, 0},
    -		{"PR_TSC_ENABLE", Const, 0},
    -		{"PR_TSC_SIGSEGV", Const, 0},
    -		{"PR_UNALIGN_NOPRINT", Const, 0},
    -		{"PR_UNALIGN_SIGBUS", Const, 0},
    -		{"PTRACE_ARCH_PRCTL", Const, 0},
    -		{"PTRACE_ATTACH", Const, 0},
    -		{"PTRACE_CONT", Const, 0},
    -		{"PTRACE_DETACH", Const, 0},
    -		{"PTRACE_EVENT_CLONE", Const, 0},
    -		{"PTRACE_EVENT_EXEC", Const, 0},
    -		{"PTRACE_EVENT_EXIT", Const, 0},
    -		{"PTRACE_EVENT_FORK", Const, 0},
    -		{"PTRACE_EVENT_VFORK", Const, 0},
    -		{"PTRACE_EVENT_VFORK_DONE", Const, 0},
    -		{"PTRACE_GETCRUNCHREGS", Const, 0},
    -		{"PTRACE_GETEVENTMSG", Const, 0},
    -		{"PTRACE_GETFPREGS", Const, 0},
    -		{"PTRACE_GETFPXREGS", Const, 0},
    -		{"PTRACE_GETHBPREGS", Const, 0},
    -		{"PTRACE_GETREGS", Const, 0},
    -		{"PTRACE_GETREGSET", Const, 0},
    -		{"PTRACE_GETSIGINFO", Const, 0},
    -		{"PTRACE_GETVFPREGS", Const, 0},
    -		{"PTRACE_GETWMMXREGS", Const, 0},
    -		{"PTRACE_GET_THREAD_AREA", Const, 0},
    -		{"PTRACE_KILL", Const, 0},
    -		{"PTRACE_OLDSETOPTIONS", Const, 0},
    -		{"PTRACE_O_MASK", Const, 0},
    -		{"PTRACE_O_TRACECLONE", Const, 0},
    -		{"PTRACE_O_TRACEEXEC", Const, 0},
    -		{"PTRACE_O_TRACEEXIT", Const, 0},
    -		{"PTRACE_O_TRACEFORK", Const, 0},
    -		{"PTRACE_O_TRACESYSGOOD", Const, 0},
    -		{"PTRACE_O_TRACEVFORK", Const, 0},
    -		{"PTRACE_O_TRACEVFORKDONE", Const, 0},
    -		{"PTRACE_PEEKDATA", Const, 0},
    -		{"PTRACE_PEEKTEXT", Const, 0},
    -		{"PTRACE_PEEKUSR", Const, 0},
    -		{"PTRACE_POKEDATA", Const, 0},
    -		{"PTRACE_POKETEXT", Const, 0},
    -		{"PTRACE_POKEUSR", Const, 0},
    -		{"PTRACE_SETCRUNCHREGS", Const, 0},
    -		{"PTRACE_SETFPREGS", Const, 0},
    -		{"PTRACE_SETFPXREGS", Const, 0},
    -		{"PTRACE_SETHBPREGS", Const, 0},
    -		{"PTRACE_SETOPTIONS", Const, 0},
    -		{"PTRACE_SETREGS", Const, 0},
    -		{"PTRACE_SETREGSET", Const, 0},
    -		{"PTRACE_SETSIGINFO", Const, 0},
    -		{"PTRACE_SETVFPREGS", Const, 0},
    -		{"PTRACE_SETWMMXREGS", Const, 0},
    -		{"PTRACE_SET_SYSCALL", Const, 0},
    -		{"PTRACE_SET_THREAD_AREA", Const, 0},
    -		{"PTRACE_SINGLEBLOCK", Const, 0},
    -		{"PTRACE_SINGLESTEP", Const, 0},
    -		{"PTRACE_SYSCALL", Const, 0},
    -		{"PTRACE_SYSEMU", Const, 0},
    -		{"PTRACE_SYSEMU_SINGLESTEP", Const, 0},
    -		{"PTRACE_TRACEME", Const, 0},
    -		{"PT_ATTACH", Const, 0},
    -		{"PT_ATTACHEXC", Const, 0},
    -		{"PT_CONTINUE", Const, 0},
    -		{"PT_DATA_ADDR", Const, 0},
    -		{"PT_DENY_ATTACH", Const, 0},
    -		{"PT_DETACH", Const, 0},
    -		{"PT_FIRSTMACH", Const, 0},
    -		{"PT_FORCEQUOTA", Const, 0},
    -		{"PT_KILL", Const, 0},
    -		{"PT_MASK", Const, 1},
    -		{"PT_READ_D", Const, 0},
    -		{"PT_READ_I", Const, 0},
    -		{"PT_READ_U", Const, 0},
    -		{"PT_SIGEXC", Const, 0},
    -		{"PT_STEP", Const, 0},
    -		{"PT_TEXT_ADDR", Const, 0},
    -		{"PT_TEXT_END_ADDR", Const, 0},
    -		{"PT_THUPDATE", Const, 0},
    -		{"PT_TRACE_ME", Const, 0},
    -		{"PT_WRITE_D", Const, 0},
    -		{"PT_WRITE_I", Const, 0},
    -		{"PT_WRITE_U", Const, 0},
    -		{"ParseDirent", Func, 0},
    -		{"ParseNetlinkMessage", Func, 0},
    -		{"ParseNetlinkRouteAttr", Func, 0},
    -		{"ParseRoutingMessage", Func, 0},
    -		{"ParseRoutingSockaddr", Func, 0},
    -		{"ParseSocketControlMessage", Func, 0},
    -		{"ParseUnixCredentials", Func, 0},
    -		{"ParseUnixRights", Func, 0},
    -		{"PathMax", Const, 0},
    -		{"Pathconf", Func, 0},
    -		{"Pause", Func, 0},
    -		{"Pipe", Func, 0},
    -		{"Pipe2", Func, 1},
    -		{"PivotRoot", Func, 0},
    -		{"Pointer", Type, 11},
    -		{"PostQueuedCompletionStatus", Func, 0},
    -		{"Pread", Func, 0},
    -		{"Proc", Type, 0},
    -		{"Proc.Dll", Field, 0},
    -		{"Proc.Name", Field, 0},
    -		{"ProcAttr", Type, 0},
    -		{"ProcAttr.Dir", Field, 0},
    -		{"ProcAttr.Env", Field, 0},
    -		{"ProcAttr.Files", Field, 0},
    -		{"ProcAttr.Sys", Field, 0},
    -		{"Process32First", Func, 4},
    -		{"Process32Next", Func, 4},
    -		{"ProcessEntry32", Type, 4},
    -		{"ProcessEntry32.DefaultHeapID", Field, 4},
    -		{"ProcessEntry32.ExeFile", Field, 4},
    -		{"ProcessEntry32.Flags", Field, 4},
    -		{"ProcessEntry32.ModuleID", Field, 4},
    -		{"ProcessEntry32.ParentProcessID", Field, 4},
    -		{"ProcessEntry32.PriClassBase", Field, 4},
    -		{"ProcessEntry32.ProcessID", Field, 4},
    -		{"ProcessEntry32.Size", Field, 4},
    -		{"ProcessEntry32.Threads", Field, 4},
    -		{"ProcessEntry32.Usage", Field, 4},
    -		{"ProcessInformation", Type, 0},
    -		{"ProcessInformation.Process", Field, 0},
    -		{"ProcessInformation.ProcessId", Field, 0},
    -		{"ProcessInformation.Thread", Field, 0},
    -		{"ProcessInformation.ThreadId", Field, 0},
    -		{"Protoent", Type, 0},
    -		{"Protoent.Aliases", Field, 0},
    -		{"Protoent.Name", Field, 0},
    -		{"Protoent.Proto", Field, 0},
    -		{"PtraceAttach", Func, 0},
    -		{"PtraceCont", Func, 0},
    -		{"PtraceDetach", Func, 0},
    -		{"PtraceGetEventMsg", Func, 0},
    -		{"PtraceGetRegs", Func, 0},
    -		{"PtracePeekData", Func, 0},
    -		{"PtracePeekText", Func, 0},
    -		{"PtracePokeData", Func, 0},
    -		{"PtracePokeText", Func, 0},
    -		{"PtraceRegs", Type, 0},
    -		{"PtraceRegs.Cs", Field, 0},
    -		{"PtraceRegs.Ds", Field, 0},
    -		{"PtraceRegs.Eax", Field, 0},
    -		{"PtraceRegs.Ebp", Field, 0},
    -		{"PtraceRegs.Ebx", Field, 0},
    -		{"PtraceRegs.Ecx", Field, 0},
    -		{"PtraceRegs.Edi", Field, 0},
    -		{"PtraceRegs.Edx", Field, 0},
    -		{"PtraceRegs.Eflags", Field, 0},
    -		{"PtraceRegs.Eip", Field, 0},
    -		{"PtraceRegs.Es", Field, 0},
    -		{"PtraceRegs.Esi", Field, 0},
    -		{"PtraceRegs.Esp", Field, 0},
    -		{"PtraceRegs.Fs", Field, 0},
    -		{"PtraceRegs.Fs_base", Field, 0},
    -		{"PtraceRegs.Gs", Field, 0},
    -		{"PtraceRegs.Gs_base", Field, 0},
    -		{"PtraceRegs.Orig_eax", Field, 0},
    -		{"PtraceRegs.Orig_rax", Field, 0},
    -		{"PtraceRegs.R10", Field, 0},
    -		{"PtraceRegs.R11", Field, 0},
    -		{"PtraceRegs.R12", Field, 0},
    -		{"PtraceRegs.R13", Field, 0},
    -		{"PtraceRegs.R14", Field, 0},
    -		{"PtraceRegs.R15", Field, 0},
    -		{"PtraceRegs.R8", Field, 0},
    -		{"PtraceRegs.R9", Field, 0},
    -		{"PtraceRegs.Rax", Field, 0},
    -		{"PtraceRegs.Rbp", Field, 0},
    -		{"PtraceRegs.Rbx", Field, 0},
    -		{"PtraceRegs.Rcx", Field, 0},
    -		{"PtraceRegs.Rdi", Field, 0},
    -		{"PtraceRegs.Rdx", Field, 0},
    -		{"PtraceRegs.Rip", Field, 0},
    -		{"PtraceRegs.Rsi", Field, 0},
    -		{"PtraceRegs.Rsp", Field, 0},
    -		{"PtraceRegs.Ss", Field, 0},
    -		{"PtraceRegs.Uregs", Field, 0},
    -		{"PtraceRegs.Xcs", Field, 0},
    -		{"PtraceRegs.Xds", Field, 0},
    -		{"PtraceRegs.Xes", Field, 0},
    -		{"PtraceRegs.Xfs", Field, 0},
    -		{"PtraceRegs.Xgs", Field, 0},
    -		{"PtraceRegs.Xss", Field, 0},
    -		{"PtraceSetOptions", Func, 0},
    -		{"PtraceSetRegs", Func, 0},
    -		{"PtraceSingleStep", Func, 0},
    -		{"PtraceSyscall", Func, 1},
    -		{"Pwrite", Func, 0},
    -		{"REG_BINARY", Const, 0},
    -		{"REG_DWORD", Const, 0},
    -		{"REG_DWORD_BIG_ENDIAN", Const, 0},
    -		{"REG_DWORD_LITTLE_ENDIAN", Const, 0},
    -		{"REG_EXPAND_SZ", Const, 0},
    -		{"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0},
    -		{"REG_LINK", Const, 0},
    -		{"REG_MULTI_SZ", Const, 0},
    -		{"REG_NONE", Const, 0},
    -		{"REG_QWORD", Const, 0},
    -		{"REG_QWORD_LITTLE_ENDIAN", Const, 0},
    -		{"REG_RESOURCE_LIST", Const, 0},
    -		{"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0},
    -		{"REG_SZ", Const, 0},
    -		{"RLIMIT_AS", Const, 0},
    -		{"RLIMIT_CORE", Const, 0},
    -		{"RLIMIT_CPU", Const, 0},
    -		{"RLIMIT_CPU_USAGE_MONITOR", Const, 16},
    -		{"RLIMIT_DATA", Const, 0},
    -		{"RLIMIT_FSIZE", Const, 0},
    -		{"RLIMIT_NOFILE", Const, 0},
    -		{"RLIMIT_STACK", Const, 0},
    -		{"RLIM_INFINITY", Const, 0},
    -		{"RTAX_ADVMSS", Const, 0},
    -		{"RTAX_AUTHOR", Const, 0},
    -		{"RTAX_BRD", Const, 0},
    -		{"RTAX_CWND", Const, 0},
    -		{"RTAX_DST", Const, 0},
    -		{"RTAX_FEATURES", Const, 0},
    -		{"RTAX_FEATURE_ALLFRAG", Const, 0},
    -		{"RTAX_FEATURE_ECN", Const, 0},
    -		{"RTAX_FEATURE_SACK", Const, 0},
    -		{"RTAX_FEATURE_TIMESTAMP", Const, 0},
    -		{"RTAX_GATEWAY", Const, 0},
    -		{"RTAX_GENMASK", Const, 0},
    -		{"RTAX_HOPLIMIT", Const, 0},
    -		{"RTAX_IFA", Const, 0},
    -		{"RTAX_IFP", Const, 0},
    -		{"RTAX_INITCWND", Const, 0},
    -		{"RTAX_INITRWND", Const, 0},
    -		{"RTAX_LABEL", Const, 1},
    -		{"RTAX_LOCK", Const, 0},
    -		{"RTAX_MAX", Const, 0},
    -		{"RTAX_MTU", Const, 0},
    -		{"RTAX_NETMASK", Const, 0},
    -		{"RTAX_REORDERING", Const, 0},
    -		{"RTAX_RTO_MIN", Const, 0},
    -		{"RTAX_RTT", Const, 0},
    -		{"RTAX_RTTVAR", Const, 0},
    -		{"RTAX_SRC", Const, 1},
    -		{"RTAX_SRCMASK", Const, 1},
    -		{"RTAX_SSTHRESH", Const, 0},
    -		{"RTAX_TAG", Const, 1},
    -		{"RTAX_UNSPEC", Const, 0},
    -		{"RTAX_WINDOW", Const, 0},
    -		{"RTA_ALIGNTO", Const, 0},
    -		{"RTA_AUTHOR", Const, 0},
    -		{"RTA_BRD", Const, 0},
    -		{"RTA_CACHEINFO", Const, 0},
    -		{"RTA_DST", Const, 0},
    -		{"RTA_FLOW", Const, 0},
    -		{"RTA_GATEWAY", Const, 0},
    -		{"RTA_GENMASK", Const, 0},
    -		{"RTA_IFA", Const, 0},
    -		{"RTA_IFP", Const, 0},
    -		{"RTA_IIF", Const, 0},
    -		{"RTA_LABEL", Const, 1},
    -		{"RTA_MAX", Const, 0},
    -		{"RTA_METRICS", Const, 0},
    -		{"RTA_MULTIPATH", Const, 0},
    -		{"RTA_NETMASK", Const, 0},
    -		{"RTA_OIF", Const, 0},
    -		{"RTA_PREFSRC", Const, 0},
    -		{"RTA_PRIORITY", Const, 0},
    -		{"RTA_SRC", Const, 0},
    -		{"RTA_SRCMASK", Const, 1},
    -		{"RTA_TABLE", Const, 0},
    -		{"RTA_TAG", Const, 1},
    -		{"RTA_UNSPEC", Const, 0},
    -		{"RTCF_DIRECTSRC", Const, 0},
    -		{"RTCF_DOREDIRECT", Const, 0},
    -		{"RTCF_LOG", Const, 0},
    -		{"RTCF_MASQ", Const, 0},
    -		{"RTCF_NAT", Const, 0},
    -		{"RTCF_VALVE", Const, 0},
    -		{"RTF_ADDRCLASSMASK", Const, 0},
    -		{"RTF_ADDRCONF", Const, 0},
    -		{"RTF_ALLONLINK", Const, 0},
    -		{"RTF_ANNOUNCE", Const, 1},
    -		{"RTF_BLACKHOLE", Const, 0},
    -		{"RTF_BROADCAST", Const, 0},
    -		{"RTF_CACHE", Const, 0},
    -		{"RTF_CLONED", Const, 1},
    -		{"RTF_CLONING", Const, 0},
    -		{"RTF_CONDEMNED", Const, 0},
    -		{"RTF_DEFAULT", Const, 0},
    -		{"RTF_DELCLONE", Const, 0},
    -		{"RTF_DONE", Const, 0},
    -		{"RTF_DYNAMIC", Const, 0},
    -		{"RTF_FLOW", Const, 0},
    -		{"RTF_FMASK", Const, 0},
    -		{"RTF_GATEWAY", Const, 0},
    -		{"RTF_GWFLAG_COMPAT", Const, 3},
    -		{"RTF_HOST", Const, 0},
    -		{"RTF_IFREF", Const, 0},
    -		{"RTF_IFSCOPE", Const, 0},
    -		{"RTF_INTERFACE", Const, 0},
    -		{"RTF_IRTT", Const, 0},
    -		{"RTF_LINKRT", Const, 0},
    -		{"RTF_LLDATA", Const, 0},
    -		{"RTF_LLINFO", Const, 0},
    -		{"RTF_LOCAL", Const, 0},
    -		{"RTF_MASK", Const, 1},
    -		{"RTF_MODIFIED", Const, 0},
    -		{"RTF_MPATH", Const, 1},
    -		{"RTF_MPLS", Const, 1},
    -		{"RTF_MSS", Const, 0},
    -		{"RTF_MTU", Const, 0},
    -		{"RTF_MULTICAST", Const, 0},
    -		{"RTF_NAT", Const, 0},
    -		{"RTF_NOFORWARD", Const, 0},
    -		{"RTF_NONEXTHOP", Const, 0},
    -		{"RTF_NOPMTUDISC", Const, 0},
    -		{"RTF_PERMANENT_ARP", Const, 1},
    -		{"RTF_PINNED", Const, 0},
    -		{"RTF_POLICY", Const, 0},
    -		{"RTF_PRCLONING", Const, 0},
    -		{"RTF_PROTO1", Const, 0},
    -		{"RTF_PROTO2", Const, 0},
    -		{"RTF_PROTO3", Const, 0},
    -		{"RTF_PROXY", Const, 16},
    -		{"RTF_REINSTATE", Const, 0},
    -		{"RTF_REJECT", Const, 0},
    -		{"RTF_RNH_LOCKED", Const, 0},
    -		{"RTF_ROUTER", Const, 16},
    -		{"RTF_SOURCE", Const, 1},
    -		{"RTF_SRC", Const, 1},
    -		{"RTF_STATIC", Const, 0},
    -		{"RTF_STICKY", Const, 0},
    -		{"RTF_THROW", Const, 0},
    -		{"RTF_TUNNEL", Const, 1},
    -		{"RTF_UP", Const, 0},
    -		{"RTF_USETRAILERS", Const, 1},
    -		{"RTF_WASCLONED", Const, 0},
    -		{"RTF_WINDOW", Const, 0},
    -		{"RTF_XRESOLVE", Const, 0},
    -		{"RTM_ADD", Const, 0},
    -		{"RTM_BASE", Const, 0},
    -		{"RTM_CHANGE", Const, 0},
    -		{"RTM_CHGADDR", Const, 1},
    -		{"RTM_DELACTION", Const, 0},
    -		{"RTM_DELADDR", Const, 0},
    -		{"RTM_DELADDRLABEL", Const, 0},
    -		{"RTM_DELETE", Const, 0},
    -		{"RTM_DELLINK", Const, 0},
    -		{"RTM_DELMADDR", Const, 0},
    -		{"RTM_DELNEIGH", Const, 0},
    -		{"RTM_DELQDISC", Const, 0},
    -		{"RTM_DELROUTE", Const, 0},
    -		{"RTM_DELRULE", Const, 0},
    -		{"RTM_DELTCLASS", Const, 0},
    -		{"RTM_DELTFILTER", Const, 0},
    -		{"RTM_DESYNC", Const, 1},
    -		{"RTM_F_CLONED", Const, 0},
    -		{"RTM_F_EQUALIZE", Const, 0},
    -		{"RTM_F_NOTIFY", Const, 0},
    -		{"RTM_F_PREFIX", Const, 0},
    -		{"RTM_GET", Const, 0},
    -		{"RTM_GET2", Const, 0},
    -		{"RTM_GETACTION", Const, 0},
    -		{"RTM_GETADDR", Const, 0},
    -		{"RTM_GETADDRLABEL", Const, 0},
    -		{"RTM_GETANYCAST", Const, 0},
    -		{"RTM_GETDCB", Const, 0},
    -		{"RTM_GETLINK", Const, 0},
    -		{"RTM_GETMULTICAST", Const, 0},
    -		{"RTM_GETNEIGH", Const, 0},
    -		{"RTM_GETNEIGHTBL", Const, 0},
    -		{"RTM_GETQDISC", Const, 0},
    -		{"RTM_GETROUTE", Const, 0},
    -		{"RTM_GETRULE", Const, 0},
    -		{"RTM_GETTCLASS", Const, 0},
    -		{"RTM_GETTFILTER", Const, 0},
    -		{"RTM_IEEE80211", Const, 0},
    -		{"RTM_IFANNOUNCE", Const, 0},
    -		{"RTM_IFINFO", Const, 0},
    -		{"RTM_IFINFO2", Const, 0},
    -		{"RTM_LLINFO_UPD", Const, 1},
    -		{"RTM_LOCK", Const, 0},
    -		{"RTM_LOSING", Const, 0},
    -		{"RTM_MAX", Const, 0},
    -		{"RTM_MAXSIZE", Const, 1},
    -		{"RTM_MISS", Const, 0},
    -		{"RTM_NEWACTION", Const, 0},
    -		{"RTM_NEWADDR", Const, 0},
    -		{"RTM_NEWADDRLABEL", Const, 0},
    -		{"RTM_NEWLINK", Const, 0},
    -		{"RTM_NEWMADDR", Const, 0},
    -		{"RTM_NEWMADDR2", Const, 0},
    -		{"RTM_NEWNDUSEROPT", Const, 0},
    -		{"RTM_NEWNEIGH", Const, 0},
    -		{"RTM_NEWNEIGHTBL", Const, 0},
    -		{"RTM_NEWPREFIX", Const, 0},
    -		{"RTM_NEWQDISC", Const, 0},
    -		{"RTM_NEWROUTE", Const, 0},
    -		{"RTM_NEWRULE", Const, 0},
    -		{"RTM_NEWTCLASS", Const, 0},
    -		{"RTM_NEWTFILTER", Const, 0},
    -		{"RTM_NR_FAMILIES", Const, 0},
    -		{"RTM_NR_MSGTYPES", Const, 0},
    -		{"RTM_OIFINFO", Const, 1},
    -		{"RTM_OLDADD", Const, 0},
    -		{"RTM_OLDDEL", Const, 0},
    -		{"RTM_OOIFINFO", Const, 1},
    -		{"RTM_REDIRECT", Const, 0},
    -		{"RTM_RESOLVE", Const, 0},
    -		{"RTM_RTTUNIT", Const, 0},
    -		{"RTM_SETDCB", Const, 0},
    -		{"RTM_SETGATE", Const, 1},
    -		{"RTM_SETLINK", Const, 0},
    -		{"RTM_SETNEIGHTBL", Const, 0},
    -		{"RTM_VERSION", Const, 0},
    -		{"RTNH_ALIGNTO", Const, 0},
    -		{"RTNH_F_DEAD", Const, 0},
    -		{"RTNH_F_ONLINK", Const, 0},
    -		{"RTNH_F_PERVASIVE", Const, 0},
    -		{"RTNLGRP_IPV4_IFADDR", Const, 1},
    -		{"RTNLGRP_IPV4_MROUTE", Const, 1},
    -		{"RTNLGRP_IPV4_ROUTE", Const, 1},
    -		{"RTNLGRP_IPV4_RULE", Const, 1},
    -		{"RTNLGRP_IPV6_IFADDR", Const, 1},
    -		{"RTNLGRP_IPV6_IFINFO", Const, 1},
    -		{"RTNLGRP_IPV6_MROUTE", Const, 1},
    -		{"RTNLGRP_IPV6_PREFIX", Const, 1},
    -		{"RTNLGRP_IPV6_ROUTE", Const, 1},
    -		{"RTNLGRP_IPV6_RULE", Const, 1},
    -		{"RTNLGRP_LINK", Const, 1},
    -		{"RTNLGRP_ND_USEROPT", Const, 1},
    -		{"RTNLGRP_NEIGH", Const, 1},
    -		{"RTNLGRP_NONE", Const, 1},
    -		{"RTNLGRP_NOTIFY", Const, 1},
    -		{"RTNLGRP_TC", Const, 1},
    -		{"RTN_ANYCAST", Const, 0},
    -		{"RTN_BLACKHOLE", Const, 0},
    -		{"RTN_BROADCAST", Const, 0},
    -		{"RTN_LOCAL", Const, 0},
    -		{"RTN_MAX", Const, 0},
    -		{"RTN_MULTICAST", Const, 0},
    -		{"RTN_NAT", Const, 0},
    -		{"RTN_PROHIBIT", Const, 0},
    -		{"RTN_THROW", Const, 0},
    -		{"RTN_UNICAST", Const, 0},
    -		{"RTN_UNREACHABLE", Const, 0},
    -		{"RTN_UNSPEC", Const, 0},
    -		{"RTN_XRESOLVE", Const, 0},
    -		{"RTPROT_BIRD", Const, 0},
    -		{"RTPROT_BOOT", Const, 0},
    -		{"RTPROT_DHCP", Const, 0},
    -		{"RTPROT_DNROUTED", Const, 0},
    -		{"RTPROT_GATED", Const, 0},
    -		{"RTPROT_KERNEL", Const, 0},
    -		{"RTPROT_MRT", Const, 0},
    -		{"RTPROT_NTK", Const, 0},
    -		{"RTPROT_RA", Const, 0},
    -		{"RTPROT_REDIRECT", Const, 0},
    -		{"RTPROT_STATIC", Const, 0},
    -		{"RTPROT_UNSPEC", Const, 0},
    -		{"RTPROT_XORP", Const, 0},
    -		{"RTPROT_ZEBRA", Const, 0},
    -		{"RTV_EXPIRE", Const, 0},
    -		{"RTV_HOPCOUNT", Const, 0},
    -		{"RTV_MTU", Const, 0},
    -		{"RTV_RPIPE", Const, 0},
    -		{"RTV_RTT", Const, 0},
    -		{"RTV_RTTVAR", Const, 0},
    -		{"RTV_SPIPE", Const, 0},
    -		{"RTV_SSTHRESH", Const, 0},
    -		{"RTV_WEIGHT", Const, 0},
    -		{"RT_CACHING_CONTEXT", Const, 1},
    -		{"RT_CLASS_DEFAULT", Const, 0},
    -		{"RT_CLASS_LOCAL", Const, 0},
    -		{"RT_CLASS_MAIN", Const, 0},
    -		{"RT_CLASS_MAX", Const, 0},
    -		{"RT_CLASS_UNSPEC", Const, 0},
    -		{"RT_DEFAULT_FIB", Const, 1},
    -		{"RT_NORTREF", Const, 1},
    -		{"RT_SCOPE_HOST", Const, 0},
    -		{"RT_SCOPE_LINK", Const, 0},
    -		{"RT_SCOPE_NOWHERE", Const, 0},
    -		{"RT_SCOPE_SITE", Const, 0},
    -		{"RT_SCOPE_UNIVERSE", Const, 0},
    -		{"RT_TABLEID_MAX", Const, 1},
    -		{"RT_TABLE_COMPAT", Const, 0},
    -		{"RT_TABLE_DEFAULT", Const, 0},
    -		{"RT_TABLE_LOCAL", Const, 0},
    -		{"RT_TABLE_MAIN", Const, 0},
    -		{"RT_TABLE_MAX", Const, 0},
    -		{"RT_TABLE_UNSPEC", Const, 0},
    -		{"RUSAGE_CHILDREN", Const, 0},
    -		{"RUSAGE_SELF", Const, 0},
    -		{"RUSAGE_THREAD", Const, 0},
    -		{"Radvisory_t", Type, 0},
    -		{"Radvisory_t.Count", Field, 0},
    -		{"Radvisory_t.Offset", Field, 0},
    -		{"Radvisory_t.Pad_cgo_0", Field, 0},
    -		{"RawConn", Type, 9},
    -		{"RawSockaddr", Type, 0},
    -		{"RawSockaddr.Data", Field, 0},
    -		{"RawSockaddr.Family", Field, 0},
    -		{"RawSockaddr.Len", Field, 0},
    -		{"RawSockaddrAny", Type, 0},
    -		{"RawSockaddrAny.Addr", Field, 0},
    -		{"RawSockaddrAny.Pad", Field, 0},
    -		{"RawSockaddrDatalink", Type, 0},
    -		{"RawSockaddrDatalink.Alen", Field, 0},
    -		{"RawSockaddrDatalink.Data", Field, 0},
    -		{"RawSockaddrDatalink.Family", Field, 0},
    -		{"RawSockaddrDatalink.Index", Field, 0},
    -		{"RawSockaddrDatalink.Len", Field, 0},
    -		{"RawSockaddrDatalink.Nlen", Field, 0},
    -		{"RawSockaddrDatalink.Pad_cgo_0", Field, 2},
    -		{"RawSockaddrDatalink.Slen", Field, 0},
    -		{"RawSockaddrDatalink.Type", Field, 0},
    -		{"RawSockaddrInet4", Type, 0},
    -		{"RawSockaddrInet4.Addr", Field, 0},
    -		{"RawSockaddrInet4.Family", Field, 0},
    -		{"RawSockaddrInet4.Len", Field, 0},
    -		{"RawSockaddrInet4.Port", Field, 0},
    -		{"RawSockaddrInet4.Zero", Field, 0},
    -		{"RawSockaddrInet6", Type, 0},
    -		{"RawSockaddrInet6.Addr", Field, 0},
    -		{"RawSockaddrInet6.Family", Field, 0},
    -		{"RawSockaddrInet6.Flowinfo", Field, 0},
    -		{"RawSockaddrInet6.Len", Field, 0},
    -		{"RawSockaddrInet6.Port", Field, 0},
    -		{"RawSockaddrInet6.Scope_id", Field, 0},
    -		{"RawSockaddrLinklayer", Type, 0},
    -		{"RawSockaddrLinklayer.Addr", Field, 0},
    -		{"RawSockaddrLinklayer.Family", Field, 0},
    -		{"RawSockaddrLinklayer.Halen", Field, 0},
    -		{"RawSockaddrLinklayer.Hatype", Field, 0},
    -		{"RawSockaddrLinklayer.Ifindex", Field, 0},
    -		{"RawSockaddrLinklayer.Pkttype", Field, 0},
    -		{"RawSockaddrLinklayer.Protocol", Field, 0},
    -		{"RawSockaddrNetlink", Type, 0},
    -		{"RawSockaddrNetlink.Family", Field, 0},
    -		{"RawSockaddrNetlink.Groups", Field, 0},
    -		{"RawSockaddrNetlink.Pad", Field, 0},
    -		{"RawSockaddrNetlink.Pid", Field, 0},
    -		{"RawSockaddrUnix", Type, 0},
    -		{"RawSockaddrUnix.Family", Field, 0},
    -		{"RawSockaddrUnix.Len", Field, 0},
    -		{"RawSockaddrUnix.Pad_cgo_0", Field, 2},
    -		{"RawSockaddrUnix.Path", Field, 0},
    -		{"RawSyscall", Func, 0},
    -		{"RawSyscall6", Func, 0},
    -		{"Read", Func, 0},
    -		{"ReadConsole", Func, 1},
    -		{"ReadDirectoryChanges", Func, 0},
    -		{"ReadDirent", Func, 0},
    -		{"ReadFile", Func, 0},
    -		{"Readlink", Func, 0},
    -		{"Reboot", Func, 0},
    -		{"Recvfrom", Func, 0},
    -		{"Recvmsg", Func, 0},
    -		{"RegCloseKey", Func, 0},
    -		{"RegEnumKeyEx", Func, 0},
    -		{"RegOpenKeyEx", Func, 0},
    -		{"RegQueryInfoKey", Func, 0},
    -		{"RegQueryValueEx", Func, 0},
    -		{"RemoveDirectory", Func, 0},
    -		{"Removexattr", Func, 1},
    -		{"Rename", Func, 0},
    -		{"Renameat", Func, 0},
    -		{"Revoke", Func, 0},
    -		{"Rlimit", Type, 0},
    -		{"Rlimit.Cur", Field, 0},
    -		{"Rlimit.Max", Field, 0},
    -		{"Rmdir", Func, 0},
    -		{"RouteMessage", Type, 0},
    -		{"RouteMessage.Data", Field, 0},
    -		{"RouteMessage.Header", Field, 0},
    -		{"RouteRIB", Func, 0},
    -		{"RoutingMessage", Type, 0},
    -		{"RtAttr", Type, 0},
    -		{"RtAttr.Len", Field, 0},
    -		{"RtAttr.Type", Field, 0},
    -		{"RtGenmsg", Type, 0},
    -		{"RtGenmsg.Family", Field, 0},
    -		{"RtMetrics", Type, 0},
    -		{"RtMetrics.Expire", Field, 0},
    -		{"RtMetrics.Filler", Field, 0},
    -		{"RtMetrics.Hopcount", Field, 0},
    -		{"RtMetrics.Locks", Field, 0},
    -		{"RtMetrics.Mtu", Field, 0},
    -		{"RtMetrics.Pad", Field, 3},
    -		{"RtMetrics.Pksent", Field, 0},
    -		{"RtMetrics.Recvpipe", Field, 0},
    -		{"RtMetrics.Refcnt", Field, 2},
    -		{"RtMetrics.Rtt", Field, 0},
    -		{"RtMetrics.Rttvar", Field, 0},
    -		{"RtMetrics.Sendpipe", Field, 0},
    -		{"RtMetrics.Ssthresh", Field, 0},
    -		{"RtMetrics.Weight", Field, 0},
    -		{"RtMsg", Type, 0},
    -		{"RtMsg.Dst_len", Field, 0},
    -		{"RtMsg.Family", Field, 0},
    -		{"RtMsg.Flags", Field, 0},
    -		{"RtMsg.Protocol", Field, 0},
    -		{"RtMsg.Scope", Field, 0},
    -		{"RtMsg.Src_len", Field, 0},
    -		{"RtMsg.Table", Field, 0},
    -		{"RtMsg.Tos", Field, 0},
    -		{"RtMsg.Type", Field, 0},
    -		{"RtMsghdr", Type, 0},
    -		{"RtMsghdr.Addrs", Field, 0},
    -		{"RtMsghdr.Errno", Field, 0},
    -		{"RtMsghdr.Flags", Field, 0},
    -		{"RtMsghdr.Fmask", Field, 0},
    -		{"RtMsghdr.Hdrlen", Field, 2},
    -		{"RtMsghdr.Index", Field, 0},
    -		{"RtMsghdr.Inits", Field, 0},
    -		{"RtMsghdr.Mpls", Field, 2},
    -		{"RtMsghdr.Msglen", Field, 0},
    -		{"RtMsghdr.Pad_cgo_0", Field, 0},
    -		{"RtMsghdr.Pad_cgo_1", Field, 2},
    -		{"RtMsghdr.Pid", Field, 0},
    -		{"RtMsghdr.Priority", Field, 2},
    -		{"RtMsghdr.Rmx", Field, 0},
    -		{"RtMsghdr.Seq", Field, 0},
    -		{"RtMsghdr.Tableid", Field, 2},
    -		{"RtMsghdr.Type", Field, 0},
    -		{"RtMsghdr.Use", Field, 0},
    -		{"RtMsghdr.Version", Field, 0},
    -		{"RtNexthop", Type, 0},
    -		{"RtNexthop.Flags", Field, 0},
    -		{"RtNexthop.Hops", Field, 0},
    -		{"RtNexthop.Ifindex", Field, 0},
    -		{"RtNexthop.Len", Field, 0},
    -		{"Rusage", Type, 0},
    -		{"Rusage.CreationTime", Field, 0},
    -		{"Rusage.ExitTime", Field, 0},
    -		{"Rusage.Idrss", Field, 0},
    -		{"Rusage.Inblock", Field, 0},
    -		{"Rusage.Isrss", Field, 0},
    -		{"Rusage.Ixrss", Field, 0},
    -		{"Rusage.KernelTime", Field, 0},
    -		{"Rusage.Majflt", Field, 0},
    -		{"Rusage.Maxrss", Field, 0},
    -		{"Rusage.Minflt", Field, 0},
    -		{"Rusage.Msgrcv", Field, 0},
    -		{"Rusage.Msgsnd", Field, 0},
    -		{"Rusage.Nivcsw", Field, 0},
    -		{"Rusage.Nsignals", Field, 0},
    -		{"Rusage.Nswap", Field, 0},
    -		{"Rusage.Nvcsw", Field, 0},
    -		{"Rusage.Oublock", Field, 0},
    -		{"Rusage.Stime", Field, 0},
    -		{"Rusage.UserTime", Field, 0},
    -		{"Rusage.Utime", Field, 0},
    -		{"SCM_BINTIME", Const, 0},
    -		{"SCM_CREDENTIALS", Const, 0},
    -		{"SCM_CREDS", Const, 0},
    -		{"SCM_RIGHTS", Const, 0},
    -		{"SCM_TIMESTAMP", Const, 0},
    -		{"SCM_TIMESTAMPING", Const, 0},
    -		{"SCM_TIMESTAMPNS", Const, 0},
    -		{"SCM_TIMESTAMP_MONOTONIC", Const, 0},
    -		{"SHUT_RD", Const, 0},
    -		{"SHUT_RDWR", Const, 0},
    -		{"SHUT_WR", Const, 0},
    -		{"SID", Type, 0},
    -		{"SIDAndAttributes", Type, 0},
    -		{"SIDAndAttributes.Attributes", Field, 0},
    -		{"SIDAndAttributes.Sid", Field, 0},
    -		{"SIGABRT", Const, 0},
    -		{"SIGALRM", Const, 0},
    -		{"SIGBUS", Const, 0},
    -		{"SIGCHLD", Const, 0},
    -		{"SIGCLD", Const, 0},
    -		{"SIGCONT", Const, 0},
    -		{"SIGEMT", Const, 0},
    -		{"SIGFPE", Const, 0},
    -		{"SIGHUP", Const, 0},
    -		{"SIGILL", Const, 0},
    -		{"SIGINFO", Const, 0},
    -		{"SIGINT", Const, 0},
    -		{"SIGIO", Const, 0},
    -		{"SIGIOT", Const, 0},
    -		{"SIGKILL", Const, 0},
    -		{"SIGLIBRT", Const, 1},
    -		{"SIGLWP", Const, 0},
    -		{"SIGPIPE", Const, 0},
    -		{"SIGPOLL", Const, 0},
    -		{"SIGPROF", Const, 0},
    -		{"SIGPWR", Const, 0},
    -		{"SIGQUIT", Const, 0},
    -		{"SIGSEGV", Const, 0},
    -		{"SIGSTKFLT", Const, 0},
    -		{"SIGSTOP", Const, 0},
    -		{"SIGSYS", Const, 0},
    -		{"SIGTERM", Const, 0},
    -		{"SIGTHR", Const, 0},
    -		{"SIGTRAP", Const, 0},
    -		{"SIGTSTP", Const, 0},
    -		{"SIGTTIN", Const, 0},
    -		{"SIGTTOU", Const, 0},
    -		{"SIGUNUSED", Const, 0},
    -		{"SIGURG", Const, 0},
    -		{"SIGUSR1", Const, 0},
    -		{"SIGUSR2", Const, 0},
    -		{"SIGVTALRM", Const, 0},
    -		{"SIGWINCH", Const, 0},
    -		{"SIGXCPU", Const, 0},
    -		{"SIGXFSZ", Const, 0},
    -		{"SIOCADDDLCI", Const, 0},
    -		{"SIOCADDMULTI", Const, 0},
    -		{"SIOCADDRT", Const, 0},
    -		{"SIOCAIFADDR", Const, 0},
    -		{"SIOCAIFGROUP", Const, 0},
    -		{"SIOCALIFADDR", Const, 0},
    -		{"SIOCARPIPLL", Const, 0},
    -		{"SIOCATMARK", Const, 0},
    -		{"SIOCAUTOADDR", Const, 0},
    -		{"SIOCAUTONETMASK", Const, 0},
    -		{"SIOCBRDGADD", Const, 1},
    -		{"SIOCBRDGADDS", Const, 1},
    -		{"SIOCBRDGARL", Const, 1},
    -		{"SIOCBRDGDADDR", Const, 1},
    -		{"SIOCBRDGDEL", Const, 1},
    -		{"SIOCBRDGDELS", Const, 1},
    -		{"SIOCBRDGFLUSH", Const, 1},
    -		{"SIOCBRDGFRL", Const, 1},
    -		{"SIOCBRDGGCACHE", Const, 1},
    -		{"SIOCBRDGGFD", Const, 1},
    -		{"SIOCBRDGGHT", Const, 1},
    -		{"SIOCBRDGGIFFLGS", Const, 1},
    -		{"SIOCBRDGGMA", Const, 1},
    -		{"SIOCBRDGGPARAM", Const, 1},
    -		{"SIOCBRDGGPRI", Const, 1},
    -		{"SIOCBRDGGRL", Const, 1},
    -		{"SIOCBRDGGSIFS", Const, 1},
    -		{"SIOCBRDGGTO", Const, 1},
    -		{"SIOCBRDGIFS", Const, 1},
    -		{"SIOCBRDGRTS", Const, 1},
    -		{"SIOCBRDGSADDR", Const, 1},
    -		{"SIOCBRDGSCACHE", Const, 1},
    -		{"SIOCBRDGSFD", Const, 1},
    -		{"SIOCBRDGSHT", Const, 1},
    -		{"SIOCBRDGSIFCOST", Const, 1},
    -		{"SIOCBRDGSIFFLGS", Const, 1},
    -		{"SIOCBRDGSIFPRIO", Const, 1},
    -		{"SIOCBRDGSMA", Const, 1},
    -		{"SIOCBRDGSPRI", Const, 1},
    -		{"SIOCBRDGSPROTO", Const, 1},
    -		{"SIOCBRDGSTO", Const, 1},
    -		{"SIOCBRDGSTXHC", Const, 1},
    -		{"SIOCDARP", Const, 0},
    -		{"SIOCDELDLCI", Const, 0},
    -		{"SIOCDELMULTI", Const, 0},
    -		{"SIOCDELRT", Const, 0},
    -		{"SIOCDEVPRIVATE", Const, 0},
    -		{"SIOCDIFADDR", Const, 0},
    -		{"SIOCDIFGROUP", Const, 0},
    -		{"SIOCDIFPHYADDR", Const, 0},
    -		{"SIOCDLIFADDR", Const, 0},
    -		{"SIOCDRARP", Const, 0},
    -		{"SIOCGARP", Const, 0},
    -		{"SIOCGDRVSPEC", Const, 0},
    -		{"SIOCGETKALIVE", Const, 1},
    -		{"SIOCGETLABEL", Const, 1},
    -		{"SIOCGETPFLOW", Const, 1},
    -		{"SIOCGETPFSYNC", Const, 1},
    -		{"SIOCGETSGCNT", Const, 0},
    -		{"SIOCGETVIFCNT", Const, 0},
    -		{"SIOCGETVLAN", Const, 0},
    -		{"SIOCGHIWAT", Const, 0},
    -		{"SIOCGIFADDR", Const, 0},
    -		{"SIOCGIFADDRPREF", Const, 1},
    -		{"SIOCGIFALIAS", Const, 1},
    -		{"SIOCGIFALTMTU", Const, 0},
    -		{"SIOCGIFASYNCMAP", Const, 0},
    -		{"SIOCGIFBOND", Const, 0},
    -		{"SIOCGIFBR", Const, 0},
    -		{"SIOCGIFBRDADDR", Const, 0},
    -		{"SIOCGIFCAP", Const, 0},
    -		{"SIOCGIFCONF", Const, 0},
    -		{"SIOCGIFCOUNT", Const, 0},
    -		{"SIOCGIFDATA", Const, 1},
    -		{"SIOCGIFDESCR", Const, 0},
    -		{"SIOCGIFDEVMTU", Const, 0},
    -		{"SIOCGIFDLT", Const, 1},
    -		{"SIOCGIFDSTADDR", Const, 0},
    -		{"SIOCGIFENCAP", Const, 0},
    -		{"SIOCGIFFIB", Const, 1},
    -		{"SIOCGIFFLAGS", Const, 0},
    -		{"SIOCGIFGATTR", Const, 1},
    -		{"SIOCGIFGENERIC", Const, 0},
    -		{"SIOCGIFGMEMB", Const, 0},
    -		{"SIOCGIFGROUP", Const, 0},
    -		{"SIOCGIFHARDMTU", Const, 3},
    -		{"SIOCGIFHWADDR", Const, 0},
    -		{"SIOCGIFINDEX", Const, 0},
    -		{"SIOCGIFKPI", Const, 0},
    -		{"SIOCGIFMAC", Const, 0},
    -		{"SIOCGIFMAP", Const, 0},
    -		{"SIOCGIFMEDIA", Const, 0},
    -		{"SIOCGIFMEM", Const, 0},
    -		{"SIOCGIFMETRIC", Const, 0},
    -		{"SIOCGIFMTU", Const, 0},
    -		{"SIOCGIFNAME", Const, 0},
    -		{"SIOCGIFNETMASK", Const, 0},
    -		{"SIOCGIFPDSTADDR", Const, 0},
    -		{"SIOCGIFPFLAGS", Const, 0},
    -		{"SIOCGIFPHYS", Const, 0},
    -		{"SIOCGIFPRIORITY", Const, 1},
    -		{"SIOCGIFPSRCADDR", Const, 0},
    -		{"SIOCGIFRDOMAIN", Const, 1},
    -		{"SIOCGIFRTLABEL", Const, 1},
    -		{"SIOCGIFSLAVE", Const, 0},
    -		{"SIOCGIFSTATUS", Const, 0},
    -		{"SIOCGIFTIMESLOT", Const, 1},
    -		{"SIOCGIFTXQLEN", Const, 0},
    -		{"SIOCGIFVLAN", Const, 0},
    -		{"SIOCGIFWAKEFLAGS", Const, 0},
    -		{"SIOCGIFXFLAGS", Const, 1},
    -		{"SIOCGLIFADDR", Const, 0},
    -		{"SIOCGLIFPHYADDR", Const, 0},
    -		{"SIOCGLIFPHYRTABLE", Const, 1},
    -		{"SIOCGLIFPHYTTL", Const, 3},
    -		{"SIOCGLINKSTR", Const, 1},
    -		{"SIOCGLOWAT", Const, 0},
    -		{"SIOCGPGRP", Const, 0},
    -		{"SIOCGPRIVATE_0", Const, 0},
    -		{"SIOCGPRIVATE_1", Const, 0},
    -		{"SIOCGRARP", Const, 0},
    -		{"SIOCGSPPPPARAMS", Const, 3},
    -		{"SIOCGSTAMP", Const, 0},
    -		{"SIOCGSTAMPNS", Const, 0},
    -		{"SIOCGVH", Const, 1},
    -		{"SIOCGVNETID", Const, 3},
    -		{"SIOCIFCREATE", Const, 0},
    -		{"SIOCIFCREATE2", Const, 0},
    -		{"SIOCIFDESTROY", Const, 0},
    -		{"SIOCIFGCLONERS", Const, 0},
    -		{"SIOCINITIFADDR", Const, 1},
    -		{"SIOCPROTOPRIVATE", Const, 0},
    -		{"SIOCRSLVMULTI", Const, 0},
    -		{"SIOCRTMSG", Const, 0},
    -		{"SIOCSARP", Const, 0},
    -		{"SIOCSDRVSPEC", Const, 0},
    -		{"SIOCSETKALIVE", Const, 1},
    -		{"SIOCSETLABEL", Const, 1},
    -		{"SIOCSETPFLOW", Const, 1},
    -		{"SIOCSETPFSYNC", Const, 1},
    -		{"SIOCSETVLAN", Const, 0},
    -		{"SIOCSHIWAT", Const, 0},
    -		{"SIOCSIFADDR", Const, 0},
    -		{"SIOCSIFADDRPREF", Const, 1},
    -		{"SIOCSIFALTMTU", Const, 0},
    -		{"SIOCSIFASYNCMAP", Const, 0},
    -		{"SIOCSIFBOND", Const, 0},
    -		{"SIOCSIFBR", Const, 0},
    -		{"SIOCSIFBRDADDR", Const, 0},
    -		{"SIOCSIFCAP", Const, 0},
    -		{"SIOCSIFDESCR", Const, 0},
    -		{"SIOCSIFDSTADDR", Const, 0},
    -		{"SIOCSIFENCAP", Const, 0},
    -		{"SIOCSIFFIB", Const, 1},
    -		{"SIOCSIFFLAGS", Const, 0},
    -		{"SIOCSIFGATTR", Const, 1},
    -		{"SIOCSIFGENERIC", Const, 0},
    -		{"SIOCSIFHWADDR", Const, 0},
    -		{"SIOCSIFHWBROADCAST", Const, 0},
    -		{"SIOCSIFKPI", Const, 0},
    -		{"SIOCSIFLINK", Const, 0},
    -		{"SIOCSIFLLADDR", Const, 0},
    -		{"SIOCSIFMAC", Const, 0},
    -		{"SIOCSIFMAP", Const, 0},
    -		{"SIOCSIFMEDIA", Const, 0},
    -		{"SIOCSIFMEM", Const, 0},
    -		{"SIOCSIFMETRIC", Const, 0},
    -		{"SIOCSIFMTU", Const, 0},
    -		{"SIOCSIFNAME", Const, 0},
    -		{"SIOCSIFNETMASK", Const, 0},
    -		{"SIOCSIFPFLAGS", Const, 0},
    -		{"SIOCSIFPHYADDR", Const, 0},
    -		{"SIOCSIFPHYS", Const, 0},
    -		{"SIOCSIFPRIORITY", Const, 1},
    -		{"SIOCSIFRDOMAIN", Const, 1},
    -		{"SIOCSIFRTLABEL", Const, 1},
    -		{"SIOCSIFRVNET", Const, 0},
    -		{"SIOCSIFSLAVE", Const, 0},
    -		{"SIOCSIFTIMESLOT", Const, 1},
    -		{"SIOCSIFTXQLEN", Const, 0},
    -		{"SIOCSIFVLAN", Const, 0},
    -		{"SIOCSIFVNET", Const, 0},
    -		{"SIOCSIFXFLAGS", Const, 1},
    -		{"SIOCSLIFPHYADDR", Const, 0},
    -		{"SIOCSLIFPHYRTABLE", Const, 1},
    -		{"SIOCSLIFPHYTTL", Const, 3},
    -		{"SIOCSLINKSTR", Const, 1},
    -		{"SIOCSLOWAT", Const, 0},
    -		{"SIOCSPGRP", Const, 0},
    -		{"SIOCSRARP", Const, 0},
    -		{"SIOCSSPPPPARAMS", Const, 3},
    -		{"SIOCSVH", Const, 1},
    -		{"SIOCSVNETID", Const, 3},
    -		{"SIOCZIFDATA", Const, 1},
    -		{"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1},
    -		{"SIO_GET_INTERFACE_LIST", Const, 0},
    -		{"SIO_KEEPALIVE_VALS", Const, 3},
    -		{"SIO_UDP_CONNRESET", Const, 4},
    -		{"SOCK_CLOEXEC", Const, 0},
    -		{"SOCK_DCCP", Const, 0},
    -		{"SOCK_DGRAM", Const, 0},
    -		{"SOCK_FLAGS_MASK", Const, 1},
    -		{"SOCK_MAXADDRLEN", Const, 0},
    -		{"SOCK_NONBLOCK", Const, 0},
    -		{"SOCK_NOSIGPIPE", Const, 1},
    -		{"SOCK_PACKET", Const, 0},
    -		{"SOCK_RAW", Const, 0},
    -		{"SOCK_RDM", Const, 0},
    -		{"SOCK_SEQPACKET", Const, 0},
    -		{"SOCK_STREAM", Const, 0},
    -		{"SOL_AAL", Const, 0},
    -		{"SOL_ATM", Const, 0},
    -		{"SOL_DECNET", Const, 0},
    -		{"SOL_ICMPV6", Const, 0},
    -		{"SOL_IP", Const, 0},
    -		{"SOL_IPV6", Const, 0},
    -		{"SOL_IRDA", Const, 0},
    -		{"SOL_PACKET", Const, 0},
    -		{"SOL_RAW", Const, 0},
    -		{"SOL_SOCKET", Const, 0},
    -		{"SOL_TCP", Const, 0},
    -		{"SOL_X25", Const, 0},
    -		{"SOMAXCONN", Const, 0},
    -		{"SO_ACCEPTCONN", Const, 0},
    -		{"SO_ACCEPTFILTER", Const, 0},
    -		{"SO_ATTACH_FILTER", Const, 0},
    -		{"SO_BINDANY", Const, 1},
    -		{"SO_BINDTODEVICE", Const, 0},
    -		{"SO_BINTIME", Const, 0},
    -		{"SO_BROADCAST", Const, 0},
    -		{"SO_BSDCOMPAT", Const, 0},
    -		{"SO_DEBUG", Const, 0},
    -		{"SO_DETACH_FILTER", Const, 0},
    -		{"SO_DOMAIN", Const, 0},
    -		{"SO_DONTROUTE", Const, 0},
    -		{"SO_DONTTRUNC", Const, 0},
    -		{"SO_ERROR", Const, 0},
    -		{"SO_KEEPALIVE", Const, 0},
    -		{"SO_LABEL", Const, 0},
    -		{"SO_LINGER", Const, 0},
    -		{"SO_LINGER_SEC", Const, 0},
    -		{"SO_LISTENINCQLEN", Const, 0},
    -		{"SO_LISTENQLEN", Const, 0},
    -		{"SO_LISTENQLIMIT", Const, 0},
    -		{"SO_MARK", Const, 0},
    -		{"SO_NETPROC", Const, 1},
    -		{"SO_NKE", Const, 0},
    -		{"SO_NOADDRERR", Const, 0},
    -		{"SO_NOHEADER", Const, 1},
    -		{"SO_NOSIGPIPE", Const, 0},
    -		{"SO_NOTIFYCONFLICT", Const, 0},
    -		{"SO_NO_CHECK", Const, 0},
    -		{"SO_NO_DDP", Const, 0},
    -		{"SO_NO_OFFLOAD", Const, 0},
    -		{"SO_NP_EXTENSIONS", Const, 0},
    -		{"SO_NREAD", Const, 0},
    -		{"SO_NUMRCVPKT", Const, 16},
    -		{"SO_NWRITE", Const, 0},
    -		{"SO_OOBINLINE", Const, 0},
    -		{"SO_OVERFLOWED", Const, 1},
    -		{"SO_PASSCRED", Const, 0},
    -		{"SO_PASSSEC", Const, 0},
    -		{"SO_PEERCRED", Const, 0},
    -		{"SO_PEERLABEL", Const, 0},
    -		{"SO_PEERNAME", Const, 0},
    -		{"SO_PEERSEC", Const, 0},
    -		{"SO_PRIORITY", Const, 0},
    -		{"SO_PROTOCOL", Const, 0},
    -		{"SO_PROTOTYPE", Const, 1},
    -		{"SO_RANDOMPORT", Const, 0},
    -		{"SO_RCVBUF", Const, 0},
    -		{"SO_RCVBUFFORCE", Const, 0},
    -		{"SO_RCVLOWAT", Const, 0},
    -		{"SO_RCVTIMEO", Const, 0},
    -		{"SO_RESTRICTIONS", Const, 0},
    -		{"SO_RESTRICT_DENYIN", Const, 0},
    -		{"SO_RESTRICT_DENYOUT", Const, 0},
    -		{"SO_RESTRICT_DENYSET", Const, 0},
    -		{"SO_REUSEADDR", Const, 0},
    -		{"SO_REUSEPORT", Const, 0},
    -		{"SO_REUSESHAREUID", Const, 0},
    -		{"SO_RTABLE", Const, 1},
    -		{"SO_RXQ_OVFL", Const, 0},
    -		{"SO_SECURITY_AUTHENTICATION", Const, 0},
    -		{"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0},
    -		{"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0},
    -		{"SO_SETFIB", Const, 0},
    -		{"SO_SNDBUF", Const, 0},
    -		{"SO_SNDBUFFORCE", Const, 0},
    -		{"SO_SNDLOWAT", Const, 0},
    -		{"SO_SNDTIMEO", Const, 0},
    -		{"SO_SPLICE", Const, 1},
    -		{"SO_TIMESTAMP", Const, 0},
    -		{"SO_TIMESTAMPING", Const, 0},
    -		{"SO_TIMESTAMPNS", Const, 0},
    -		{"SO_TIMESTAMP_MONOTONIC", Const, 0},
    -		{"SO_TYPE", Const, 0},
    -		{"SO_UPCALLCLOSEWAIT", Const, 0},
    -		{"SO_UPDATE_ACCEPT_CONTEXT", Const, 0},
    -		{"SO_UPDATE_CONNECT_CONTEXT", Const, 1},
    -		{"SO_USELOOPBACK", Const, 0},
    -		{"SO_USER_COOKIE", Const, 1},
    -		{"SO_VENDOR", Const, 3},
    -		{"SO_WANTMORE", Const, 0},
    -		{"SO_WANTOOBFLAG", Const, 0},
    -		{"SSLExtraCertChainPolicyPara", Type, 0},
    -		{"SSLExtraCertChainPolicyPara.AuthType", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.Checks", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.ServerName", Field, 0},
    -		{"SSLExtraCertChainPolicyPara.Size", Field, 0},
    -		{"STANDARD_RIGHTS_ALL", Const, 0},
    -		{"STANDARD_RIGHTS_EXECUTE", Const, 0},
    -		{"STANDARD_RIGHTS_READ", Const, 0},
    -		{"STANDARD_RIGHTS_REQUIRED", Const, 0},
    -		{"STANDARD_RIGHTS_WRITE", Const, 0},
    -		{"STARTF_USESHOWWINDOW", Const, 0},
    -		{"STARTF_USESTDHANDLES", Const, 0},
    -		{"STD_ERROR_HANDLE", Const, 0},
    -		{"STD_INPUT_HANDLE", Const, 0},
    -		{"STD_OUTPUT_HANDLE", Const, 0},
    -		{"SUBLANG_ENGLISH_US", Const, 0},
    -		{"SW_FORCEMINIMIZE", Const, 0},
    -		{"SW_HIDE", Const, 0},
    -		{"SW_MAXIMIZE", Const, 0},
    -		{"SW_MINIMIZE", Const, 0},
    -		{"SW_NORMAL", Const, 0},
    -		{"SW_RESTORE", Const, 0},
    -		{"SW_SHOW", Const, 0},
    -		{"SW_SHOWDEFAULT", Const, 0},
    -		{"SW_SHOWMAXIMIZED", Const, 0},
    -		{"SW_SHOWMINIMIZED", Const, 0},
    -		{"SW_SHOWMINNOACTIVE", Const, 0},
    -		{"SW_SHOWNA", Const, 0},
    -		{"SW_SHOWNOACTIVATE", Const, 0},
    -		{"SW_SHOWNORMAL", Const, 0},
    -		{"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4},
    -		{"SYNCHRONIZE", Const, 0},
    -		{"SYSCTL_VERSION", Const, 1},
    -		{"SYSCTL_VERS_0", Const, 1},
    -		{"SYSCTL_VERS_1", Const, 1},
    -		{"SYSCTL_VERS_MASK", Const, 1},
    -		{"SYS_ABORT2", Const, 0},
    -		{"SYS_ACCEPT", Const, 0},
    -		{"SYS_ACCEPT4", Const, 0},
    -		{"SYS_ACCEPT_NOCANCEL", Const, 0},
    -		{"SYS_ACCESS", Const, 0},
    -		{"SYS_ACCESS_EXTENDED", Const, 0},
    -		{"SYS_ACCT", Const, 0},
    -		{"SYS_ADD_KEY", Const, 0},
    -		{"SYS_ADD_PROFIL", Const, 0},
    -		{"SYS_ADJFREQ", Const, 1},
    -		{"SYS_ADJTIME", Const, 0},
    -		{"SYS_ADJTIMEX", Const, 0},
    -		{"SYS_AFS_SYSCALL", Const, 0},
    -		{"SYS_AIO_CANCEL", Const, 0},
    -		{"SYS_AIO_ERROR", Const, 0},
    -		{"SYS_AIO_FSYNC", Const, 0},
    -		{"SYS_AIO_MLOCK", Const, 14},
    -		{"SYS_AIO_READ", Const, 0},
    -		{"SYS_AIO_RETURN", Const, 0},
    -		{"SYS_AIO_SUSPEND", Const, 0},
    -		{"SYS_AIO_SUSPEND_NOCANCEL", Const, 0},
    -		{"SYS_AIO_WAITCOMPLETE", Const, 14},
    -		{"SYS_AIO_WRITE", Const, 0},
    -		{"SYS_ALARM", Const, 0},
    -		{"SYS_ARCH_PRCTL", Const, 0},
    -		{"SYS_ARM_FADVISE64_64", Const, 0},
    -		{"SYS_ARM_SYNC_FILE_RANGE", Const, 0},
    -		{"SYS_ATGETMSG", Const, 0},
    -		{"SYS_ATPGETREQ", Const, 0},
    -		{"SYS_ATPGETRSP", Const, 0},
    -		{"SYS_ATPSNDREQ", Const, 0},
    -		{"SYS_ATPSNDRSP", Const, 0},
    -		{"SYS_ATPUTMSG", Const, 0},
    -		{"SYS_ATSOCKET", Const, 0},
    -		{"SYS_AUDIT", Const, 0},
    -		{"SYS_AUDITCTL", Const, 0},
    -		{"SYS_AUDITON", Const, 0},
    -		{"SYS_AUDIT_SESSION_JOIN", Const, 0},
    -		{"SYS_AUDIT_SESSION_PORT", Const, 0},
    -		{"SYS_AUDIT_SESSION_SELF", Const, 0},
    -		{"SYS_BDFLUSH", Const, 0},
    -		{"SYS_BIND", Const, 0},
    -		{"SYS_BINDAT", Const, 3},
    -		{"SYS_BREAK", Const, 0},
    -		{"SYS_BRK", Const, 0},
    -		{"SYS_BSDTHREAD_CREATE", Const, 0},
    -		{"SYS_BSDTHREAD_REGISTER", Const, 0},
    -		{"SYS_BSDTHREAD_TERMINATE", Const, 0},
    -		{"SYS_CAPGET", Const, 0},
    -		{"SYS_CAPSET", Const, 0},
    -		{"SYS_CAP_ENTER", Const, 0},
    -		{"SYS_CAP_FCNTLS_GET", Const, 1},
    -		{"SYS_CAP_FCNTLS_LIMIT", Const, 1},
    -		{"SYS_CAP_GETMODE", Const, 0},
    -		{"SYS_CAP_GETRIGHTS", Const, 0},
    -		{"SYS_CAP_IOCTLS_GET", Const, 1},
    -		{"SYS_CAP_IOCTLS_LIMIT", Const, 1},
    -		{"SYS_CAP_NEW", Const, 0},
    -		{"SYS_CAP_RIGHTS_GET", Const, 1},
    -		{"SYS_CAP_RIGHTS_LIMIT", Const, 1},
    -		{"SYS_CHDIR", Const, 0},
    -		{"SYS_CHFLAGS", Const, 0},
    -		{"SYS_CHFLAGSAT", Const, 3},
    -		{"SYS_CHMOD", Const, 0},
    -		{"SYS_CHMOD_EXTENDED", Const, 0},
    -		{"SYS_CHOWN", Const, 0},
    -		{"SYS_CHOWN32", Const, 0},
    -		{"SYS_CHROOT", Const, 0},
    -		{"SYS_CHUD", Const, 0},
    -		{"SYS_CLOCK_ADJTIME", Const, 0},
    -		{"SYS_CLOCK_GETCPUCLOCKID2", Const, 1},
    -		{"SYS_CLOCK_GETRES", Const, 0},
    -		{"SYS_CLOCK_GETTIME", Const, 0},
    -		{"SYS_CLOCK_NANOSLEEP", Const, 0},
    -		{"SYS_CLOCK_SETTIME", Const, 0},
    -		{"SYS_CLONE", Const, 0},
    -		{"SYS_CLOSE", Const, 0},
    -		{"SYS_CLOSEFROM", Const, 0},
    -		{"SYS_CLOSE_NOCANCEL", Const, 0},
    -		{"SYS_CONNECT", Const, 0},
    -		{"SYS_CONNECTAT", Const, 3},
    -		{"SYS_CONNECT_NOCANCEL", Const, 0},
    -		{"SYS_COPYFILE", Const, 0},
    -		{"SYS_CPUSET", Const, 0},
    -		{"SYS_CPUSET_GETAFFINITY", Const, 0},
    -		{"SYS_CPUSET_GETID", Const, 0},
    -		{"SYS_CPUSET_SETAFFINITY", Const, 0},
    -		{"SYS_CPUSET_SETID", Const, 0},
    -		{"SYS_CREAT", Const, 0},
    -		{"SYS_CREATE_MODULE", Const, 0},
    -		{"SYS_CSOPS", Const, 0},
    -		{"SYS_CSOPS_AUDITTOKEN", Const, 16},
    -		{"SYS_DELETE", Const, 0},
    -		{"SYS_DELETE_MODULE", Const, 0},
    -		{"SYS_DUP", Const, 0},
    -		{"SYS_DUP2", Const, 0},
    -		{"SYS_DUP3", Const, 0},
    -		{"SYS_EACCESS", Const, 0},
    -		{"SYS_EPOLL_CREATE", Const, 0},
    -		{"SYS_EPOLL_CREATE1", Const, 0},
    -		{"SYS_EPOLL_CTL", Const, 0},
    -		{"SYS_EPOLL_CTL_OLD", Const, 0},
    -		{"SYS_EPOLL_PWAIT", Const, 0},
    -		{"SYS_EPOLL_WAIT", Const, 0},
    -		{"SYS_EPOLL_WAIT_OLD", Const, 0},
    -		{"SYS_EVENTFD", Const, 0},
    -		{"SYS_EVENTFD2", Const, 0},
    -		{"SYS_EXCHANGEDATA", Const, 0},
    -		{"SYS_EXECVE", Const, 0},
    -		{"SYS_EXIT", Const, 0},
    -		{"SYS_EXIT_GROUP", Const, 0},
    -		{"SYS_EXTATTRCTL", Const, 0},
    -		{"SYS_EXTATTR_DELETE_FD", Const, 0},
    -		{"SYS_EXTATTR_DELETE_FILE", Const, 0},
    -		{"SYS_EXTATTR_DELETE_LINK", Const, 0},
    -		{"SYS_EXTATTR_GET_FD", Const, 0},
    -		{"SYS_EXTATTR_GET_FILE", Const, 0},
    -		{"SYS_EXTATTR_GET_LINK", Const, 0},
    -		{"SYS_EXTATTR_LIST_FD", Const, 0},
    -		{"SYS_EXTATTR_LIST_FILE", Const, 0},
    -		{"SYS_EXTATTR_LIST_LINK", Const, 0},
    -		{"SYS_EXTATTR_SET_FD", Const, 0},
    -		{"SYS_EXTATTR_SET_FILE", Const, 0},
    -		{"SYS_EXTATTR_SET_LINK", Const, 0},
    -		{"SYS_FACCESSAT", Const, 0},
    -		{"SYS_FADVISE64", Const, 0},
    -		{"SYS_FADVISE64_64", Const, 0},
    -		{"SYS_FALLOCATE", Const, 0},
    -		{"SYS_FANOTIFY_INIT", Const, 0},
    -		{"SYS_FANOTIFY_MARK", Const, 0},
    -		{"SYS_FCHDIR", Const, 0},
    -		{"SYS_FCHFLAGS", Const, 0},
    -		{"SYS_FCHMOD", Const, 0},
    -		{"SYS_FCHMODAT", Const, 0},
    -		{"SYS_FCHMOD_EXTENDED", Const, 0},
    -		{"SYS_FCHOWN", Const, 0},
    -		{"SYS_FCHOWN32", Const, 0},
    -		{"SYS_FCHOWNAT", Const, 0},
    -		{"SYS_FCHROOT", Const, 1},
    -		{"SYS_FCNTL", Const, 0},
    -		{"SYS_FCNTL64", Const, 0},
    -		{"SYS_FCNTL_NOCANCEL", Const, 0},
    -		{"SYS_FDATASYNC", Const, 0},
    -		{"SYS_FEXECVE", Const, 0},
    -		{"SYS_FFCLOCK_GETCOUNTER", Const, 0},
    -		{"SYS_FFCLOCK_GETESTIMATE", Const, 0},
    -		{"SYS_FFCLOCK_SETESTIMATE", Const, 0},
    -		{"SYS_FFSCTL", Const, 0},
    -		{"SYS_FGETATTRLIST", Const, 0},
    -		{"SYS_FGETXATTR", Const, 0},
    -		{"SYS_FHOPEN", Const, 0},
    -		{"SYS_FHSTAT", Const, 0},
    -		{"SYS_FHSTATFS", Const, 0},
    -		{"SYS_FILEPORT_MAKEFD", Const, 0},
    -		{"SYS_FILEPORT_MAKEPORT", Const, 0},
    -		{"SYS_FKTRACE", Const, 1},
    -		{"SYS_FLISTXATTR", Const, 0},
    -		{"SYS_FLOCK", Const, 0},
    -		{"SYS_FORK", Const, 0},
    -		{"SYS_FPATHCONF", Const, 0},
    -		{"SYS_FREEBSD6_FTRUNCATE", Const, 0},
    -		{"SYS_FREEBSD6_LSEEK", Const, 0},
    -		{"SYS_FREEBSD6_MMAP", Const, 0},
    -		{"SYS_FREEBSD6_PREAD", Const, 0},
    -		{"SYS_FREEBSD6_PWRITE", Const, 0},
    -		{"SYS_FREEBSD6_TRUNCATE", Const, 0},
    -		{"SYS_FREMOVEXATTR", Const, 0},
    -		{"SYS_FSCTL", Const, 0},
    -		{"SYS_FSETATTRLIST", Const, 0},
    -		{"SYS_FSETXATTR", Const, 0},
    -		{"SYS_FSGETPATH", Const, 0},
    -		{"SYS_FSTAT", Const, 0},
    -		{"SYS_FSTAT64", Const, 0},
    -		{"SYS_FSTAT64_EXTENDED", Const, 0},
    -		{"SYS_FSTATAT", Const, 0},
    -		{"SYS_FSTATAT64", Const, 0},
    -		{"SYS_FSTATFS", Const, 0},
    -		{"SYS_FSTATFS64", Const, 0},
    -		{"SYS_FSTATV", Const, 0},
    -		{"SYS_FSTATVFS1", Const, 1},
    -		{"SYS_FSTAT_EXTENDED", Const, 0},
    -		{"SYS_FSYNC", Const, 0},
    -		{"SYS_FSYNC_NOCANCEL", Const, 0},
    -		{"SYS_FSYNC_RANGE", Const, 1},
    -		{"SYS_FTIME", Const, 0},
    -		{"SYS_FTRUNCATE", Const, 0},
    -		{"SYS_FTRUNCATE64", Const, 0},
    -		{"SYS_FUTEX", Const, 0},
    -		{"SYS_FUTIMENS", Const, 1},
    -		{"SYS_FUTIMES", Const, 0},
    -		{"SYS_FUTIMESAT", Const, 0},
    -		{"SYS_GETATTRLIST", Const, 0},
    -		{"SYS_GETAUDIT", Const, 0},
    -		{"SYS_GETAUDIT_ADDR", Const, 0},
    -		{"SYS_GETAUID", Const, 0},
    -		{"SYS_GETCONTEXT", Const, 0},
    -		{"SYS_GETCPU", Const, 0},
    -		{"SYS_GETCWD", Const, 0},
    -		{"SYS_GETDENTS", Const, 0},
    -		{"SYS_GETDENTS64", Const, 0},
    -		{"SYS_GETDIRENTRIES", Const, 0},
    -		{"SYS_GETDIRENTRIES64", Const, 0},
    -		{"SYS_GETDIRENTRIESATTR", Const, 0},
    -		{"SYS_GETDTABLECOUNT", Const, 1},
    -		{"SYS_GETDTABLESIZE", Const, 0},
    -		{"SYS_GETEGID", Const, 0},
    -		{"SYS_GETEGID32", Const, 0},
    -		{"SYS_GETEUID", Const, 0},
    -		{"SYS_GETEUID32", Const, 0},
    -		{"SYS_GETFH", Const, 0},
    -		{"SYS_GETFSSTAT", Const, 0},
    -		{"SYS_GETFSSTAT64", Const, 0},
    -		{"SYS_GETGID", Const, 0},
    -		{"SYS_GETGID32", Const, 0},
    -		{"SYS_GETGROUPS", Const, 0},
    -		{"SYS_GETGROUPS32", Const, 0},
    -		{"SYS_GETHOSTUUID", Const, 0},
    -		{"SYS_GETITIMER", Const, 0},
    -		{"SYS_GETLCID", Const, 0},
    -		{"SYS_GETLOGIN", Const, 0},
    -		{"SYS_GETLOGINCLASS", Const, 0},
    -		{"SYS_GETPEERNAME", Const, 0},
    -		{"SYS_GETPGID", Const, 0},
    -		{"SYS_GETPGRP", Const, 0},
    -		{"SYS_GETPID", Const, 0},
    -		{"SYS_GETPMSG", Const, 0},
    -		{"SYS_GETPPID", Const, 0},
    -		{"SYS_GETPRIORITY", Const, 0},
    -		{"SYS_GETRESGID", Const, 0},
    -		{"SYS_GETRESGID32", Const, 0},
    -		{"SYS_GETRESUID", Const, 0},
    -		{"SYS_GETRESUID32", Const, 0},
    -		{"SYS_GETRLIMIT", Const, 0},
    -		{"SYS_GETRTABLE", Const, 1},
    -		{"SYS_GETRUSAGE", Const, 0},
    -		{"SYS_GETSGROUPS", Const, 0},
    -		{"SYS_GETSID", Const, 0},
    -		{"SYS_GETSOCKNAME", Const, 0},
    -		{"SYS_GETSOCKOPT", Const, 0},
    -		{"SYS_GETTHRID", Const, 1},
    -		{"SYS_GETTID", Const, 0},
    -		{"SYS_GETTIMEOFDAY", Const, 0},
    -		{"SYS_GETUID", Const, 0},
    -		{"SYS_GETUID32", Const, 0},
    -		{"SYS_GETVFSSTAT", Const, 1},
    -		{"SYS_GETWGROUPS", Const, 0},
    -		{"SYS_GETXATTR", Const, 0},
    -		{"SYS_GET_KERNEL_SYMS", Const, 0},
    -		{"SYS_GET_MEMPOLICY", Const, 0},
    -		{"SYS_GET_ROBUST_LIST", Const, 0},
    -		{"SYS_GET_THREAD_AREA", Const, 0},
    -		{"SYS_GSSD_SYSCALL", Const, 14},
    -		{"SYS_GTTY", Const, 0},
    -		{"SYS_IDENTITYSVC", Const, 0},
    -		{"SYS_IDLE", Const, 0},
    -		{"SYS_INITGROUPS", Const, 0},
    -		{"SYS_INIT_MODULE", Const, 0},
    -		{"SYS_INOTIFY_ADD_WATCH", Const, 0},
    -		{"SYS_INOTIFY_INIT", Const, 0},
    -		{"SYS_INOTIFY_INIT1", Const, 0},
    -		{"SYS_INOTIFY_RM_WATCH", Const, 0},
    -		{"SYS_IOCTL", Const, 0},
    -		{"SYS_IOPERM", Const, 0},
    -		{"SYS_IOPL", Const, 0},
    -		{"SYS_IOPOLICYSYS", Const, 0},
    -		{"SYS_IOPRIO_GET", Const, 0},
    -		{"SYS_IOPRIO_SET", Const, 0},
    -		{"SYS_IO_CANCEL", Const, 0},
    -		{"SYS_IO_DESTROY", Const, 0},
    -		{"SYS_IO_GETEVENTS", Const, 0},
    -		{"SYS_IO_SETUP", Const, 0},
    -		{"SYS_IO_SUBMIT", Const, 0},
    -		{"SYS_IPC", Const, 0},
    -		{"SYS_ISSETUGID", Const, 0},
    -		{"SYS_JAIL", Const, 0},
    -		{"SYS_JAIL_ATTACH", Const, 0},
    -		{"SYS_JAIL_GET", Const, 0},
    -		{"SYS_JAIL_REMOVE", Const, 0},
    -		{"SYS_JAIL_SET", Const, 0},
    -		{"SYS_KAS_INFO", Const, 16},
    -		{"SYS_KDEBUG_TRACE", Const, 0},
    -		{"SYS_KENV", Const, 0},
    -		{"SYS_KEVENT", Const, 0},
    -		{"SYS_KEVENT64", Const, 0},
    -		{"SYS_KEXEC_LOAD", Const, 0},
    -		{"SYS_KEYCTL", Const, 0},
    -		{"SYS_KILL", Const, 0},
    -		{"SYS_KLDFIND", Const, 0},
    -		{"SYS_KLDFIRSTMOD", Const, 0},
    -		{"SYS_KLDLOAD", Const, 0},
    -		{"SYS_KLDNEXT", Const, 0},
    -		{"SYS_KLDSTAT", Const, 0},
    -		{"SYS_KLDSYM", Const, 0},
    -		{"SYS_KLDUNLOAD", Const, 0},
    -		{"SYS_KLDUNLOADF", Const, 0},
    -		{"SYS_KMQ_NOTIFY", Const, 14},
    -		{"SYS_KMQ_OPEN", Const, 14},
    -		{"SYS_KMQ_SETATTR", Const, 14},
    -		{"SYS_KMQ_TIMEDRECEIVE", Const, 14},
    -		{"SYS_KMQ_TIMEDSEND", Const, 14},
    -		{"SYS_KMQ_UNLINK", Const, 14},
    -		{"SYS_KQUEUE", Const, 0},
    -		{"SYS_KQUEUE1", Const, 1},
    -		{"SYS_KSEM_CLOSE", Const, 14},
    -		{"SYS_KSEM_DESTROY", Const, 14},
    -		{"SYS_KSEM_GETVALUE", Const, 14},
    -		{"SYS_KSEM_INIT", Const, 14},
    -		{"SYS_KSEM_OPEN", Const, 14},
    -		{"SYS_KSEM_POST", Const, 14},
    -		{"SYS_KSEM_TIMEDWAIT", Const, 14},
    -		{"SYS_KSEM_TRYWAIT", Const, 14},
    -		{"SYS_KSEM_UNLINK", Const, 14},
    -		{"SYS_KSEM_WAIT", Const, 14},
    -		{"SYS_KTIMER_CREATE", Const, 0},
    -		{"SYS_KTIMER_DELETE", Const, 0},
    -		{"SYS_KTIMER_GETOVERRUN", Const, 0},
    -		{"SYS_KTIMER_GETTIME", Const, 0},
    -		{"SYS_KTIMER_SETTIME", Const, 0},
    -		{"SYS_KTRACE", Const, 0},
    -		{"SYS_LCHFLAGS", Const, 0},
    -		{"SYS_LCHMOD", Const, 0},
    -		{"SYS_LCHOWN", Const, 0},
    -		{"SYS_LCHOWN32", Const, 0},
    -		{"SYS_LEDGER", Const, 16},
    -		{"SYS_LGETFH", Const, 0},
    -		{"SYS_LGETXATTR", Const, 0},
    -		{"SYS_LINK", Const, 0},
    -		{"SYS_LINKAT", Const, 0},
    -		{"SYS_LIO_LISTIO", Const, 0},
    -		{"SYS_LISTEN", Const, 0},
    -		{"SYS_LISTXATTR", Const, 0},
    -		{"SYS_LLISTXATTR", Const, 0},
    -		{"SYS_LOCK", Const, 0},
    -		{"SYS_LOOKUP_DCOOKIE", Const, 0},
    -		{"SYS_LPATHCONF", Const, 0},
    -		{"SYS_LREMOVEXATTR", Const, 0},
    -		{"SYS_LSEEK", Const, 0},
    -		{"SYS_LSETXATTR", Const, 0},
    -		{"SYS_LSTAT", Const, 0},
    -		{"SYS_LSTAT64", Const, 0},
    -		{"SYS_LSTAT64_EXTENDED", Const, 0},
    -		{"SYS_LSTATV", Const, 0},
    -		{"SYS_LSTAT_EXTENDED", Const, 0},
    -		{"SYS_LUTIMES", Const, 0},
    -		{"SYS_MAC_SYSCALL", Const, 0},
    -		{"SYS_MADVISE", Const, 0},
    -		{"SYS_MADVISE1", Const, 0},
    -		{"SYS_MAXSYSCALL", Const, 0},
    -		{"SYS_MBIND", Const, 0},
    -		{"SYS_MIGRATE_PAGES", Const, 0},
    -		{"SYS_MINCORE", Const, 0},
    -		{"SYS_MINHERIT", Const, 0},
    -		{"SYS_MKCOMPLEX", Const, 0},
    -		{"SYS_MKDIR", Const, 0},
    -		{"SYS_MKDIRAT", Const, 0},
    -		{"SYS_MKDIR_EXTENDED", Const, 0},
    -		{"SYS_MKFIFO", Const, 0},
    -		{"SYS_MKFIFOAT", Const, 0},
    -		{"SYS_MKFIFO_EXTENDED", Const, 0},
    -		{"SYS_MKNOD", Const, 0},
    -		{"SYS_MKNODAT", Const, 0},
    -		{"SYS_MLOCK", Const, 0},
    -		{"SYS_MLOCKALL", Const, 0},
    -		{"SYS_MMAP", Const, 0},
    -		{"SYS_MMAP2", Const, 0},
    -		{"SYS_MODCTL", Const, 1},
    -		{"SYS_MODFIND", Const, 0},
    -		{"SYS_MODFNEXT", Const, 0},
    -		{"SYS_MODIFY_LDT", Const, 0},
    -		{"SYS_MODNEXT", Const, 0},
    -		{"SYS_MODSTAT", Const, 0},
    -		{"SYS_MODWATCH", Const, 0},
    -		{"SYS_MOUNT", Const, 0},
    -		{"SYS_MOVE_PAGES", Const, 0},
    -		{"SYS_MPROTECT", Const, 0},
    -		{"SYS_MPX", Const, 0},
    -		{"SYS_MQUERY", Const, 1},
    -		{"SYS_MQ_GETSETATTR", Const, 0},
    -		{"SYS_MQ_NOTIFY", Const, 0},
    -		{"SYS_MQ_OPEN", Const, 0},
    -		{"SYS_MQ_TIMEDRECEIVE", Const, 0},
    -		{"SYS_MQ_TIMEDSEND", Const, 0},
    -		{"SYS_MQ_UNLINK", Const, 0},
    -		{"SYS_MREMAP", Const, 0},
    -		{"SYS_MSGCTL", Const, 0},
    -		{"SYS_MSGGET", Const, 0},
    -		{"SYS_MSGRCV", Const, 0},
    -		{"SYS_MSGRCV_NOCANCEL", Const, 0},
    -		{"SYS_MSGSND", Const, 0},
    -		{"SYS_MSGSND_NOCANCEL", Const, 0},
    -		{"SYS_MSGSYS", Const, 0},
    -		{"SYS_MSYNC", Const, 0},
    -		{"SYS_MSYNC_NOCANCEL", Const, 0},
    -		{"SYS_MUNLOCK", Const, 0},
    -		{"SYS_MUNLOCKALL", Const, 0},
    -		{"SYS_MUNMAP", Const, 0},
    -		{"SYS_NAME_TO_HANDLE_AT", Const, 0},
    -		{"SYS_NANOSLEEP", Const, 0},
    -		{"SYS_NEWFSTATAT", Const, 0},
    -		{"SYS_NFSCLNT", Const, 0},
    -		{"SYS_NFSSERVCTL", Const, 0},
    -		{"SYS_NFSSVC", Const, 0},
    -		{"SYS_NFSTAT", Const, 0},
    -		{"SYS_NICE", Const, 0},
    -		{"SYS_NLM_SYSCALL", Const, 14},
    -		{"SYS_NLSTAT", Const, 0},
    -		{"SYS_NMOUNT", Const, 0},
    -		{"SYS_NSTAT", Const, 0},
    -		{"SYS_NTP_ADJTIME", Const, 0},
    -		{"SYS_NTP_GETTIME", Const, 0},
    -		{"SYS_NUMA_GETAFFINITY", Const, 14},
    -		{"SYS_NUMA_SETAFFINITY", Const, 14},
    -		{"SYS_OABI_SYSCALL_BASE", Const, 0},
    -		{"SYS_OBREAK", Const, 0},
    -		{"SYS_OLDFSTAT", Const, 0},
    -		{"SYS_OLDLSTAT", Const, 0},
    -		{"SYS_OLDOLDUNAME", Const, 0},
    -		{"SYS_OLDSTAT", Const, 0},
    -		{"SYS_OLDUNAME", Const, 0},
    -		{"SYS_OPEN", Const, 0},
    -		{"SYS_OPENAT", Const, 0},
    -		{"SYS_OPENBSD_POLL", Const, 0},
    -		{"SYS_OPEN_BY_HANDLE_AT", Const, 0},
    -		{"SYS_OPEN_DPROTECTED_NP", Const, 16},
    -		{"SYS_OPEN_EXTENDED", Const, 0},
    -		{"SYS_OPEN_NOCANCEL", Const, 0},
    -		{"SYS_OVADVISE", Const, 0},
    -		{"SYS_PACCEPT", Const, 1},
    -		{"SYS_PATHCONF", Const, 0},
    -		{"SYS_PAUSE", Const, 0},
    -		{"SYS_PCICONFIG_IOBASE", Const, 0},
    -		{"SYS_PCICONFIG_READ", Const, 0},
    -		{"SYS_PCICONFIG_WRITE", Const, 0},
    -		{"SYS_PDFORK", Const, 0},
    -		{"SYS_PDGETPID", Const, 0},
    -		{"SYS_PDKILL", Const, 0},
    -		{"SYS_PERF_EVENT_OPEN", Const, 0},
    -		{"SYS_PERSONALITY", Const, 0},
    -		{"SYS_PID_HIBERNATE", Const, 0},
    -		{"SYS_PID_RESUME", Const, 0},
    -		{"SYS_PID_SHUTDOWN_SOCKETS", Const, 0},
    -		{"SYS_PID_SUSPEND", Const, 0},
    -		{"SYS_PIPE", Const, 0},
    -		{"SYS_PIPE2", Const, 0},
    -		{"SYS_PIVOT_ROOT", Const, 0},
    -		{"SYS_PMC_CONTROL", Const, 1},
    -		{"SYS_PMC_GET_INFO", Const, 1},
    -		{"SYS_POLL", Const, 0},
    -		{"SYS_POLLTS", Const, 1},
    -		{"SYS_POLL_NOCANCEL", Const, 0},
    -		{"SYS_POSIX_FADVISE", Const, 0},
    -		{"SYS_POSIX_FALLOCATE", Const, 0},
    -		{"SYS_POSIX_OPENPT", Const, 0},
    -		{"SYS_POSIX_SPAWN", Const, 0},
    -		{"SYS_PPOLL", Const, 0},
    -		{"SYS_PRCTL", Const, 0},
    -		{"SYS_PREAD", Const, 0},
    -		{"SYS_PREAD64", Const, 0},
    -		{"SYS_PREADV", Const, 0},
    -		{"SYS_PREAD_NOCANCEL", Const, 0},
    -		{"SYS_PRLIMIT64", Const, 0},
    -		{"SYS_PROCCTL", Const, 3},
    -		{"SYS_PROCESS_POLICY", Const, 0},
    -		{"SYS_PROCESS_VM_READV", Const, 0},
    -		{"SYS_PROCESS_VM_WRITEV", Const, 0},
    -		{"SYS_PROC_INFO", Const, 0},
    -		{"SYS_PROF", Const, 0},
    -		{"SYS_PROFIL", Const, 0},
    -		{"SYS_PSELECT", Const, 0},
    -		{"SYS_PSELECT6", Const, 0},
    -		{"SYS_PSET_ASSIGN", Const, 1},
    -		{"SYS_PSET_CREATE", Const, 1},
    -		{"SYS_PSET_DESTROY", Const, 1},
    -		{"SYS_PSYNCH_CVBROAD", Const, 0},
    -		{"SYS_PSYNCH_CVCLRPREPOST", Const, 0},
    -		{"SYS_PSYNCH_CVSIGNAL", Const, 0},
    -		{"SYS_PSYNCH_CVWAIT", Const, 0},
    -		{"SYS_PSYNCH_MUTEXDROP", Const, 0},
    -		{"SYS_PSYNCH_MUTEXWAIT", Const, 0},
    -		{"SYS_PSYNCH_RW_DOWNGRADE", Const, 0},
    -		{"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_RDLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_UNLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_UNLOCK2", Const, 0},
    -		{"SYS_PSYNCH_RW_UPGRADE", Const, 0},
    -		{"SYS_PSYNCH_RW_WRLOCK", Const, 0},
    -		{"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0},
    -		{"SYS_PTRACE", Const, 0},
    -		{"SYS_PUTPMSG", Const, 0},
    -		{"SYS_PWRITE", Const, 0},
    -		{"SYS_PWRITE64", Const, 0},
    -		{"SYS_PWRITEV", Const, 0},
    -		{"SYS_PWRITE_NOCANCEL", Const, 0},
    -		{"SYS_QUERY_MODULE", Const, 0},
    -		{"SYS_QUOTACTL", Const, 0},
    -		{"SYS_RASCTL", Const, 1},
    -		{"SYS_RCTL_ADD_RULE", Const, 0},
    -		{"SYS_RCTL_GET_LIMITS", Const, 0},
    -		{"SYS_RCTL_GET_RACCT", Const, 0},
    -		{"SYS_RCTL_GET_RULES", Const, 0},
    -		{"SYS_RCTL_REMOVE_RULE", Const, 0},
    -		{"SYS_READ", Const, 0},
    -		{"SYS_READAHEAD", Const, 0},
    -		{"SYS_READDIR", Const, 0},
    -		{"SYS_READLINK", Const, 0},
    -		{"SYS_READLINKAT", Const, 0},
    -		{"SYS_READV", Const, 0},
    -		{"SYS_READV_NOCANCEL", Const, 0},
    -		{"SYS_READ_NOCANCEL", Const, 0},
    -		{"SYS_REBOOT", Const, 0},
    -		{"SYS_RECV", Const, 0},
    -		{"SYS_RECVFROM", Const, 0},
    -		{"SYS_RECVFROM_NOCANCEL", Const, 0},
    -		{"SYS_RECVMMSG", Const, 0},
    -		{"SYS_RECVMSG", Const, 0},
    -		{"SYS_RECVMSG_NOCANCEL", Const, 0},
    -		{"SYS_REMAP_FILE_PAGES", Const, 0},
    -		{"SYS_REMOVEXATTR", Const, 0},
    -		{"SYS_RENAME", Const, 0},
    -		{"SYS_RENAMEAT", Const, 0},
    -		{"SYS_REQUEST_KEY", Const, 0},
    -		{"SYS_RESTART_SYSCALL", Const, 0},
    -		{"SYS_REVOKE", Const, 0},
    -		{"SYS_RFORK", Const, 0},
    -		{"SYS_RMDIR", Const, 0},
    -		{"SYS_RTPRIO", Const, 0},
    -		{"SYS_RTPRIO_THREAD", Const, 0},
    -		{"SYS_RT_SIGACTION", Const, 0},
    -		{"SYS_RT_SIGPENDING", Const, 0},
    -		{"SYS_RT_SIGPROCMASK", Const, 0},
    -		{"SYS_RT_SIGQUEUEINFO", Const, 0},
    -		{"SYS_RT_SIGRETURN", Const, 0},
    -		{"SYS_RT_SIGSUSPEND", Const, 0},
    -		{"SYS_RT_SIGTIMEDWAIT", Const, 0},
    -		{"SYS_RT_TGSIGQUEUEINFO", Const, 0},
    -		{"SYS_SBRK", Const, 0},
    -		{"SYS_SCHED_GETAFFINITY", Const, 0},
    -		{"SYS_SCHED_GETPARAM", Const, 0},
    -		{"SYS_SCHED_GETSCHEDULER", Const, 0},
    -		{"SYS_SCHED_GET_PRIORITY_MAX", Const, 0},
    -		{"SYS_SCHED_GET_PRIORITY_MIN", Const, 0},
    -		{"SYS_SCHED_RR_GET_INTERVAL", Const, 0},
    -		{"SYS_SCHED_SETAFFINITY", Const, 0},
    -		{"SYS_SCHED_SETPARAM", Const, 0},
    -		{"SYS_SCHED_SETSCHEDULER", Const, 0},
    -		{"SYS_SCHED_YIELD", Const, 0},
    -		{"SYS_SCTP_GENERIC_RECVMSG", Const, 0},
    -		{"SYS_SCTP_GENERIC_SENDMSG", Const, 0},
    -		{"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0},
    -		{"SYS_SCTP_PEELOFF", Const, 0},
    -		{"SYS_SEARCHFS", Const, 0},
    -		{"SYS_SECURITY", Const, 0},
    -		{"SYS_SELECT", Const, 0},
    -		{"SYS_SELECT_NOCANCEL", Const, 0},
    -		{"SYS_SEMCONFIG", Const, 1},
    -		{"SYS_SEMCTL", Const, 0},
    -		{"SYS_SEMGET", Const, 0},
    -		{"SYS_SEMOP", Const, 0},
    -		{"SYS_SEMSYS", Const, 0},
    -		{"SYS_SEMTIMEDOP", Const, 0},
    -		{"SYS_SEM_CLOSE", Const, 0},
    -		{"SYS_SEM_DESTROY", Const, 0},
    -		{"SYS_SEM_GETVALUE", Const, 0},
    -		{"SYS_SEM_INIT", Const, 0},
    -		{"SYS_SEM_OPEN", Const, 0},
    -		{"SYS_SEM_POST", Const, 0},
    -		{"SYS_SEM_TRYWAIT", Const, 0},
    -		{"SYS_SEM_UNLINK", Const, 0},
    -		{"SYS_SEM_WAIT", Const, 0},
    -		{"SYS_SEM_WAIT_NOCANCEL", Const, 0},
    -		{"SYS_SEND", Const, 0},
    -		{"SYS_SENDFILE", Const, 0},
    -		{"SYS_SENDFILE64", Const, 0},
    -		{"SYS_SENDMMSG", Const, 0},
    -		{"SYS_SENDMSG", Const, 0},
    -		{"SYS_SENDMSG_NOCANCEL", Const, 0},
    -		{"SYS_SENDTO", Const, 0},
    -		{"SYS_SENDTO_NOCANCEL", Const, 0},
    -		{"SYS_SETATTRLIST", Const, 0},
    -		{"SYS_SETAUDIT", Const, 0},
    -		{"SYS_SETAUDIT_ADDR", Const, 0},
    -		{"SYS_SETAUID", Const, 0},
    -		{"SYS_SETCONTEXT", Const, 0},
    -		{"SYS_SETDOMAINNAME", Const, 0},
    -		{"SYS_SETEGID", Const, 0},
    -		{"SYS_SETEUID", Const, 0},
    -		{"SYS_SETFIB", Const, 0},
    -		{"SYS_SETFSGID", Const, 0},
    -		{"SYS_SETFSGID32", Const, 0},
    -		{"SYS_SETFSUID", Const, 0},
    -		{"SYS_SETFSUID32", Const, 0},
    -		{"SYS_SETGID", Const, 0},
    -		{"SYS_SETGID32", Const, 0},
    -		{"SYS_SETGROUPS", Const, 0},
    -		{"SYS_SETGROUPS32", Const, 0},
    -		{"SYS_SETHOSTNAME", Const, 0},
    -		{"SYS_SETITIMER", Const, 0},
    -		{"SYS_SETLCID", Const, 0},
    -		{"SYS_SETLOGIN", Const, 0},
    -		{"SYS_SETLOGINCLASS", Const, 0},
    -		{"SYS_SETNS", Const, 0},
    -		{"SYS_SETPGID", Const, 0},
    -		{"SYS_SETPRIORITY", Const, 0},
    -		{"SYS_SETPRIVEXEC", Const, 0},
    -		{"SYS_SETREGID", Const, 0},
    -		{"SYS_SETREGID32", Const, 0},
    -		{"SYS_SETRESGID", Const, 0},
    -		{"SYS_SETRESGID32", Const, 0},
    -		{"SYS_SETRESUID", Const, 0},
    -		{"SYS_SETRESUID32", Const, 0},
    -		{"SYS_SETREUID", Const, 0},
    -		{"SYS_SETREUID32", Const, 0},
    -		{"SYS_SETRLIMIT", Const, 0},
    -		{"SYS_SETRTABLE", Const, 1},
    -		{"SYS_SETSGROUPS", Const, 0},
    -		{"SYS_SETSID", Const, 0},
    -		{"SYS_SETSOCKOPT", Const, 0},
    -		{"SYS_SETTID", Const, 0},
    -		{"SYS_SETTID_WITH_PID", Const, 0},
    -		{"SYS_SETTIMEOFDAY", Const, 0},
    -		{"SYS_SETUID", Const, 0},
    -		{"SYS_SETUID32", Const, 0},
    -		{"SYS_SETWGROUPS", Const, 0},
    -		{"SYS_SETXATTR", Const, 0},
    -		{"SYS_SET_MEMPOLICY", Const, 0},
    -		{"SYS_SET_ROBUST_LIST", Const, 0},
    -		{"SYS_SET_THREAD_AREA", Const, 0},
    -		{"SYS_SET_TID_ADDRESS", Const, 0},
    -		{"SYS_SGETMASK", Const, 0},
    -		{"SYS_SHARED_REGION_CHECK_NP", Const, 0},
    -		{"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0},
    -		{"SYS_SHMAT", Const, 0},
    -		{"SYS_SHMCTL", Const, 0},
    -		{"SYS_SHMDT", Const, 0},
    -		{"SYS_SHMGET", Const, 0},
    -		{"SYS_SHMSYS", Const, 0},
    -		{"SYS_SHM_OPEN", Const, 0},
    -		{"SYS_SHM_UNLINK", Const, 0},
    -		{"SYS_SHUTDOWN", Const, 0},
    -		{"SYS_SIGACTION", Const, 0},
    -		{"SYS_SIGALTSTACK", Const, 0},
    -		{"SYS_SIGNAL", Const, 0},
    -		{"SYS_SIGNALFD", Const, 0},
    -		{"SYS_SIGNALFD4", Const, 0},
    -		{"SYS_SIGPENDING", Const, 0},
    -		{"SYS_SIGPROCMASK", Const, 0},
    -		{"SYS_SIGQUEUE", Const, 0},
    -		{"SYS_SIGQUEUEINFO", Const, 1},
    -		{"SYS_SIGRETURN", Const, 0},
    -		{"SYS_SIGSUSPEND", Const, 0},
    -		{"SYS_SIGSUSPEND_NOCANCEL", Const, 0},
    -		{"SYS_SIGTIMEDWAIT", Const, 0},
    -		{"SYS_SIGWAIT", Const, 0},
    -		{"SYS_SIGWAITINFO", Const, 0},
    -		{"SYS_SOCKET", Const, 0},
    -		{"SYS_SOCKETCALL", Const, 0},
    -		{"SYS_SOCKETPAIR", Const, 0},
    -		{"SYS_SPLICE", Const, 0},
    -		{"SYS_SSETMASK", Const, 0},
    -		{"SYS_SSTK", Const, 0},
    -		{"SYS_STACK_SNAPSHOT", Const, 0},
    -		{"SYS_STAT", Const, 0},
    -		{"SYS_STAT64", Const, 0},
    -		{"SYS_STAT64_EXTENDED", Const, 0},
    -		{"SYS_STATFS", Const, 0},
    -		{"SYS_STATFS64", Const, 0},
    -		{"SYS_STATV", Const, 0},
    -		{"SYS_STATVFS1", Const, 1},
    -		{"SYS_STAT_EXTENDED", Const, 0},
    -		{"SYS_STIME", Const, 0},
    -		{"SYS_STTY", Const, 0},
    -		{"SYS_SWAPCONTEXT", Const, 0},
    -		{"SYS_SWAPCTL", Const, 1},
    -		{"SYS_SWAPOFF", Const, 0},
    -		{"SYS_SWAPON", Const, 0},
    -		{"SYS_SYMLINK", Const, 0},
    -		{"SYS_SYMLINKAT", Const, 0},
    -		{"SYS_SYNC", Const, 0},
    -		{"SYS_SYNCFS", Const, 0},
    -		{"SYS_SYNC_FILE_RANGE", Const, 0},
    -		{"SYS_SYSARCH", Const, 0},
    -		{"SYS_SYSCALL", Const, 0},
    -		{"SYS_SYSCALL_BASE", Const, 0},
    -		{"SYS_SYSFS", Const, 0},
    -		{"SYS_SYSINFO", Const, 0},
    -		{"SYS_SYSLOG", Const, 0},
    -		{"SYS_TEE", Const, 0},
    -		{"SYS_TGKILL", Const, 0},
    -		{"SYS_THREAD_SELFID", Const, 0},
    -		{"SYS_THR_CREATE", Const, 0},
    -		{"SYS_THR_EXIT", Const, 0},
    -		{"SYS_THR_KILL", Const, 0},
    -		{"SYS_THR_KILL2", Const, 0},
    -		{"SYS_THR_NEW", Const, 0},
    -		{"SYS_THR_SELF", Const, 0},
    -		{"SYS_THR_SET_NAME", Const, 0},
    -		{"SYS_THR_SUSPEND", Const, 0},
    -		{"SYS_THR_WAKE", Const, 0},
    -		{"SYS_TIME", Const, 0},
    -		{"SYS_TIMERFD_CREATE", Const, 0},
    -		{"SYS_TIMERFD_GETTIME", Const, 0},
    -		{"SYS_TIMERFD_SETTIME", Const, 0},
    -		{"SYS_TIMER_CREATE", Const, 0},
    -		{"SYS_TIMER_DELETE", Const, 0},
    -		{"SYS_TIMER_GETOVERRUN", Const, 0},
    -		{"SYS_TIMER_GETTIME", Const, 0},
    -		{"SYS_TIMER_SETTIME", Const, 0},
    -		{"SYS_TIMES", Const, 0},
    -		{"SYS_TKILL", Const, 0},
    -		{"SYS_TRUNCATE", Const, 0},
    -		{"SYS_TRUNCATE64", Const, 0},
    -		{"SYS_TUXCALL", Const, 0},
    -		{"SYS_UGETRLIMIT", Const, 0},
    -		{"SYS_ULIMIT", Const, 0},
    -		{"SYS_UMASK", Const, 0},
    -		{"SYS_UMASK_EXTENDED", Const, 0},
    -		{"SYS_UMOUNT", Const, 0},
    -		{"SYS_UMOUNT2", Const, 0},
    -		{"SYS_UNAME", Const, 0},
    -		{"SYS_UNDELETE", Const, 0},
    -		{"SYS_UNLINK", Const, 0},
    -		{"SYS_UNLINKAT", Const, 0},
    -		{"SYS_UNMOUNT", Const, 0},
    -		{"SYS_UNSHARE", Const, 0},
    -		{"SYS_USELIB", Const, 0},
    -		{"SYS_USTAT", Const, 0},
    -		{"SYS_UTIME", Const, 0},
    -		{"SYS_UTIMENSAT", Const, 0},
    -		{"SYS_UTIMES", Const, 0},
    -		{"SYS_UTRACE", Const, 0},
    -		{"SYS_UUIDGEN", Const, 0},
    -		{"SYS_VADVISE", Const, 1},
    -		{"SYS_VFORK", Const, 0},
    -		{"SYS_VHANGUP", Const, 0},
    -		{"SYS_VM86", Const, 0},
    -		{"SYS_VM86OLD", Const, 0},
    -		{"SYS_VMSPLICE", Const, 0},
    -		{"SYS_VM_PRESSURE_MONITOR", Const, 0},
    -		{"SYS_VSERVER", Const, 0},
    -		{"SYS_WAIT4", Const, 0},
    -		{"SYS_WAIT4_NOCANCEL", Const, 0},
    -		{"SYS_WAIT6", Const, 1},
    -		{"SYS_WAITEVENT", Const, 0},
    -		{"SYS_WAITID", Const, 0},
    -		{"SYS_WAITID_NOCANCEL", Const, 0},
    -		{"SYS_WAITPID", Const, 0},
    -		{"SYS_WATCHEVENT", Const, 0},
    -		{"SYS_WORKQ_KERNRETURN", Const, 0},
    -		{"SYS_WORKQ_OPEN", Const, 0},
    -		{"SYS_WRITE", Const, 0},
    -		{"SYS_WRITEV", Const, 0},
    -		{"SYS_WRITEV_NOCANCEL", Const, 0},
    -		{"SYS_WRITE_NOCANCEL", Const, 0},
    -		{"SYS_YIELD", Const, 0},
    -		{"SYS__LLSEEK", Const, 0},
    -		{"SYS__LWP_CONTINUE", Const, 1},
    -		{"SYS__LWP_CREATE", Const, 1},
    -		{"SYS__LWP_CTL", Const, 1},
    -		{"SYS__LWP_DETACH", Const, 1},
    -		{"SYS__LWP_EXIT", Const, 1},
    -		{"SYS__LWP_GETNAME", Const, 1},
    -		{"SYS__LWP_GETPRIVATE", Const, 1},
    -		{"SYS__LWP_KILL", Const, 1},
    -		{"SYS__LWP_PARK", Const, 1},
    -		{"SYS__LWP_SELF", Const, 1},
    -		{"SYS__LWP_SETNAME", Const, 1},
    -		{"SYS__LWP_SETPRIVATE", Const, 1},
    -		{"SYS__LWP_SUSPEND", Const, 1},
    -		{"SYS__LWP_UNPARK", Const, 1},
    -		{"SYS__LWP_UNPARK_ALL", Const, 1},
    -		{"SYS__LWP_WAIT", Const, 1},
    -		{"SYS__LWP_WAKEUP", Const, 1},
    -		{"SYS__NEWSELECT", Const, 0},
    -		{"SYS__PSET_BIND", Const, 1},
    -		{"SYS__SCHED_GETAFFINITY", Const, 1},
    -		{"SYS__SCHED_GETPARAM", Const, 1},
    -		{"SYS__SCHED_SETAFFINITY", Const, 1},
    -		{"SYS__SCHED_SETPARAM", Const, 1},
    -		{"SYS__SYSCTL", Const, 0},
    -		{"SYS__UMTX_LOCK", Const, 0},
    -		{"SYS__UMTX_OP", Const, 0},
    -		{"SYS__UMTX_UNLOCK", Const, 0},
    -		{"SYS___ACL_ACLCHECK_FD", Const, 0},
    -		{"SYS___ACL_ACLCHECK_FILE", Const, 0},
    -		{"SYS___ACL_ACLCHECK_LINK", Const, 0},
    -		{"SYS___ACL_DELETE_FD", Const, 0},
    -		{"SYS___ACL_DELETE_FILE", Const, 0},
    -		{"SYS___ACL_DELETE_LINK", Const, 0},
    -		{"SYS___ACL_GET_FD", Const, 0},
    -		{"SYS___ACL_GET_FILE", Const, 0},
    -		{"SYS___ACL_GET_LINK", Const, 0},
    -		{"SYS___ACL_SET_FD", Const, 0},
    -		{"SYS___ACL_SET_FILE", Const, 0},
    -		{"SYS___ACL_SET_LINK", Const, 0},
    -		{"SYS___CAP_RIGHTS_GET", Const, 14},
    -		{"SYS___CLONE", Const, 1},
    -		{"SYS___DISABLE_THREADSIGNAL", Const, 0},
    -		{"SYS___GETCWD", Const, 0},
    -		{"SYS___GETLOGIN", Const, 1},
    -		{"SYS___GET_TCB", Const, 1},
    -		{"SYS___MAC_EXECVE", Const, 0},
    -		{"SYS___MAC_GETFSSTAT", Const, 0},
    -		{"SYS___MAC_GET_FD", Const, 0},
    -		{"SYS___MAC_GET_FILE", Const, 0},
    -		{"SYS___MAC_GET_LCID", Const, 0},
    -		{"SYS___MAC_GET_LCTX", Const, 0},
    -		{"SYS___MAC_GET_LINK", Const, 0},
    -		{"SYS___MAC_GET_MOUNT", Const, 0},
    -		{"SYS___MAC_GET_PID", Const, 0},
    -		{"SYS___MAC_GET_PROC", Const, 0},
    -		{"SYS___MAC_MOUNT", Const, 0},
    -		{"SYS___MAC_SET_FD", Const, 0},
    -		{"SYS___MAC_SET_FILE", Const, 0},
    -		{"SYS___MAC_SET_LCTX", Const, 0},
    -		{"SYS___MAC_SET_LINK", Const, 0},
    -		{"SYS___MAC_SET_PROC", Const, 0},
    -		{"SYS___MAC_SYSCALL", Const, 0},
    -		{"SYS___OLD_SEMWAIT_SIGNAL", Const, 0},
    -		{"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0},
    -		{"SYS___POSIX_CHOWN", Const, 1},
    -		{"SYS___POSIX_FCHOWN", Const, 1},
    -		{"SYS___POSIX_LCHOWN", Const, 1},
    -		{"SYS___POSIX_RENAME", Const, 1},
    -		{"SYS___PTHREAD_CANCELED", Const, 0},
    -		{"SYS___PTHREAD_CHDIR", Const, 0},
    -		{"SYS___PTHREAD_FCHDIR", Const, 0},
    -		{"SYS___PTHREAD_KILL", Const, 0},
    -		{"SYS___PTHREAD_MARKCANCEL", Const, 0},
    -		{"SYS___PTHREAD_SIGMASK", Const, 0},
    -		{"SYS___QUOTACTL", Const, 1},
    -		{"SYS___SEMCTL", Const, 1},
    -		{"SYS___SEMWAIT_SIGNAL", Const, 0},
    -		{"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0},
    -		{"SYS___SETLOGIN", Const, 1},
    -		{"SYS___SETUGID", Const, 0},
    -		{"SYS___SET_TCB", Const, 1},
    -		{"SYS___SIGACTION_SIGTRAMP", Const, 1},
    -		{"SYS___SIGTIMEDWAIT", Const, 1},
    -		{"SYS___SIGWAIT", Const, 0},
    -		{"SYS___SIGWAIT_NOCANCEL", Const, 0},
    -		{"SYS___SYSCTL", Const, 0},
    -		{"SYS___TFORK", Const, 1},
    -		{"SYS___THREXIT", Const, 1},
    -		{"SYS___THRSIGDIVERT", Const, 1},
    -		{"SYS___THRSLEEP", Const, 1},
    -		{"SYS___THRWAKEUP", Const, 1},
    -		{"S_ARCH1", Const, 1},
    -		{"S_ARCH2", Const, 1},
    -		{"S_BLKSIZE", Const, 0},
    -		{"S_IEXEC", Const, 0},
    -		{"S_IFBLK", Const, 0},
    -		{"S_IFCHR", Const, 0},
    -		{"S_IFDIR", Const, 0},
    -		{"S_IFIFO", Const, 0},
    -		{"S_IFLNK", Const, 0},
    -		{"S_IFMT", Const, 0},
    -		{"S_IFREG", Const, 0},
    -		{"S_IFSOCK", Const, 0},
    -		{"S_IFWHT", Const, 0},
    -		{"S_IREAD", Const, 0},
    -		{"S_IRGRP", Const, 0},
    -		{"S_IROTH", Const, 0},
    -		{"S_IRUSR", Const, 0},
    -		{"S_IRWXG", Const, 0},
    -		{"S_IRWXO", Const, 0},
    -		{"S_IRWXU", Const, 0},
    -		{"S_ISGID", Const, 0},
    -		{"S_ISTXT", Const, 0},
    -		{"S_ISUID", Const, 0},
    -		{"S_ISVTX", Const, 0},
    -		{"S_IWGRP", Const, 0},
    -		{"S_IWOTH", Const, 0},
    -		{"S_IWRITE", Const, 0},
    -		{"S_IWUSR", Const, 0},
    -		{"S_IXGRP", Const, 0},
    -		{"S_IXOTH", Const, 0},
    -		{"S_IXUSR", Const, 0},
    -		{"S_LOGIN_SET", Const, 1},
    -		{"SecurityAttributes", Type, 0},
    -		{"SecurityAttributes.InheritHandle", Field, 0},
    -		{"SecurityAttributes.Length", Field, 0},
    -		{"SecurityAttributes.SecurityDescriptor", Field, 0},
    -		{"Seek", Func, 0},
    -		{"Select", Func, 0},
    -		{"Sendfile", Func, 0},
    -		{"Sendmsg", Func, 0},
    -		{"SendmsgN", Func, 3},
    -		{"Sendto", Func, 0},
    -		{"Servent", Type, 0},
    -		{"Servent.Aliases", Field, 0},
    -		{"Servent.Name", Field, 0},
    -		{"Servent.Port", Field, 0},
    -		{"Servent.Proto", Field, 0},
    -		{"SetBpf", Func, 0},
    -		{"SetBpfBuflen", Func, 0},
    -		{"SetBpfDatalink", Func, 0},
    -		{"SetBpfHeadercmpl", Func, 0},
    -		{"SetBpfImmediate", Func, 0},
    -		{"SetBpfInterface", Func, 0},
    -		{"SetBpfPromisc", Func, 0},
    -		{"SetBpfTimeout", Func, 0},
    -		{"SetCurrentDirectory", Func, 0},
    -		{"SetEndOfFile", Func, 0},
    -		{"SetEnvironmentVariable", Func, 0},
    -		{"SetFileAttributes", Func, 0},
    -		{"SetFileCompletionNotificationModes", Func, 2},
    -		{"SetFilePointer", Func, 0},
    -		{"SetFileTime", Func, 0},
    -		{"SetHandleInformation", Func, 0},
    -		{"SetKevent", Func, 0},
    -		{"SetLsfPromisc", Func, 0},
    -		{"SetNonblock", Func, 0},
    -		{"Setdomainname", Func, 0},
    -		{"Setegid", Func, 0},
    -		{"Setenv", Func, 0},
    -		{"Seteuid", Func, 0},
    -		{"Setfsgid", Func, 0},
    -		{"Setfsuid", Func, 0},
    -		{"Setgid", Func, 0},
    -		{"Setgroups", Func, 0},
    -		{"Sethostname", Func, 0},
    -		{"Setlogin", Func, 0},
    -		{"Setpgid", Func, 0},
    -		{"Setpriority", Func, 0},
    -		{"Setprivexec", Func, 0},
    -		{"Setregid", Func, 0},
    -		{"Setresgid", Func, 0},
    -		{"Setresuid", Func, 0},
    -		{"Setreuid", Func, 0},
    -		{"Setrlimit", Func, 0},
    -		{"Setsid", Func, 0},
    -		{"Setsockopt", Func, 0},
    -		{"SetsockoptByte", Func, 0},
    -		{"SetsockoptICMPv6Filter", Func, 2},
    -		{"SetsockoptIPMreq", Func, 0},
    -		{"SetsockoptIPMreqn", Func, 0},
    -		{"SetsockoptIPv6Mreq", Func, 0},
    -		{"SetsockoptInet4Addr", Func, 0},
    -		{"SetsockoptInt", Func, 0},
    -		{"SetsockoptLinger", Func, 0},
    -		{"SetsockoptString", Func, 0},
    -		{"SetsockoptTimeval", Func, 0},
    -		{"Settimeofday", Func, 0},
    -		{"Setuid", Func, 0},
    -		{"Setxattr", Func, 1},
    -		{"Shutdown", Func, 0},
    -		{"SidTypeAlias", Const, 0},
    -		{"SidTypeComputer", Const, 0},
    -		{"SidTypeDeletedAccount", Const, 0},
    -		{"SidTypeDomain", Const, 0},
    -		{"SidTypeGroup", Const, 0},
    -		{"SidTypeInvalid", Const, 0},
    -		{"SidTypeLabel", Const, 0},
    -		{"SidTypeUnknown", Const, 0},
    -		{"SidTypeUser", Const, 0},
    -		{"SidTypeWellKnownGroup", Const, 0},
    -		{"Signal", Type, 0},
    -		{"SizeofBpfHdr", Const, 0},
    -		{"SizeofBpfInsn", Const, 0},
    -		{"SizeofBpfProgram", Const, 0},
    -		{"SizeofBpfStat", Const, 0},
    -		{"SizeofBpfVersion", Const, 0},
    -		{"SizeofBpfZbuf", Const, 0},
    -		{"SizeofBpfZbufHeader", Const, 0},
    -		{"SizeofCmsghdr", Const, 0},
    -		{"SizeofICMPv6Filter", Const, 2},
    -		{"SizeofIPMreq", Const, 0},
    -		{"SizeofIPMreqn", Const, 0},
    -		{"SizeofIPv6MTUInfo", Const, 2},
    -		{"SizeofIPv6Mreq", Const, 0},
    -		{"SizeofIfAddrmsg", Const, 0},
    -		{"SizeofIfAnnounceMsghdr", Const, 1},
    -		{"SizeofIfData", Const, 0},
    -		{"SizeofIfInfomsg", Const, 0},
    -		{"SizeofIfMsghdr", Const, 0},
    -		{"SizeofIfaMsghdr", Const, 0},
    -		{"SizeofIfmaMsghdr", Const, 0},
    -		{"SizeofIfmaMsghdr2", Const, 0},
    -		{"SizeofInet4Pktinfo", Const, 0},
    -		{"SizeofInet6Pktinfo", Const, 0},
    -		{"SizeofInotifyEvent", Const, 0},
    -		{"SizeofLinger", Const, 0},
    -		{"SizeofMsghdr", Const, 0},
    -		{"SizeofNlAttr", Const, 0},
    -		{"SizeofNlMsgerr", Const, 0},
    -		{"SizeofNlMsghdr", Const, 0},
    -		{"SizeofRtAttr", Const, 0},
    -		{"SizeofRtGenmsg", Const, 0},
    -		{"SizeofRtMetrics", Const, 0},
    -		{"SizeofRtMsg", Const, 0},
    -		{"SizeofRtMsghdr", Const, 0},
    -		{"SizeofRtNexthop", Const, 0},
    -		{"SizeofSockFilter", Const, 0},
    -		{"SizeofSockFprog", Const, 0},
    -		{"SizeofSockaddrAny", Const, 0},
    -		{"SizeofSockaddrDatalink", Const, 0},
    -		{"SizeofSockaddrInet4", Const, 0},
    -		{"SizeofSockaddrInet6", Const, 0},
    -		{"SizeofSockaddrLinklayer", Const, 0},
    -		{"SizeofSockaddrNetlink", Const, 0},
    -		{"SizeofSockaddrUnix", Const, 0},
    -		{"SizeofTCPInfo", Const, 1},
    -		{"SizeofUcred", Const, 0},
    -		{"SlicePtrFromStrings", Func, 1},
    -		{"SockFilter", Type, 0},
    -		{"SockFilter.Code", Field, 0},
    -		{"SockFilter.Jf", Field, 0},
    -		{"SockFilter.Jt", Field, 0},
    -		{"SockFilter.K", Field, 0},
    -		{"SockFprog", Type, 0},
    -		{"SockFprog.Filter", Field, 0},
    -		{"SockFprog.Len", Field, 0},
    -		{"SockFprog.Pad_cgo_0", Field, 0},
    -		{"Sockaddr", Type, 0},
    -		{"SockaddrDatalink", Type, 0},
    -		{"SockaddrDatalink.Alen", Field, 0},
    -		{"SockaddrDatalink.Data", Field, 0},
    -		{"SockaddrDatalink.Family", Field, 0},
    -		{"SockaddrDatalink.Index", Field, 0},
    -		{"SockaddrDatalink.Len", Field, 0},
    -		{"SockaddrDatalink.Nlen", Field, 0},
    -		{"SockaddrDatalink.Slen", Field, 0},
    -		{"SockaddrDatalink.Type", Field, 0},
    -		{"SockaddrGen", Type, 0},
    -		{"SockaddrInet4", Type, 0},
    -		{"SockaddrInet4.Addr", Field, 0},
    -		{"SockaddrInet4.Port", Field, 0},
    -		{"SockaddrInet6", Type, 0},
    -		{"SockaddrInet6.Addr", Field, 0},
    -		{"SockaddrInet6.Port", Field, 0},
    -		{"SockaddrInet6.ZoneId", Field, 0},
    -		{"SockaddrLinklayer", Type, 0},
    -		{"SockaddrLinklayer.Addr", Field, 0},
    -		{"SockaddrLinklayer.Halen", Field, 0},
    -		{"SockaddrLinklayer.Hatype", Field, 0},
    -		{"SockaddrLinklayer.Ifindex", Field, 0},
    -		{"SockaddrLinklayer.Pkttype", Field, 0},
    -		{"SockaddrLinklayer.Protocol", Field, 0},
    -		{"SockaddrNetlink", Type, 0},
    -		{"SockaddrNetlink.Family", Field, 0},
    -		{"SockaddrNetlink.Groups", Field, 0},
    -		{"SockaddrNetlink.Pad", Field, 0},
    -		{"SockaddrNetlink.Pid", Field, 0},
    -		{"SockaddrUnix", Type, 0},
    -		{"SockaddrUnix.Name", Field, 0},
    -		{"Socket", Func, 0},
    -		{"SocketControlMessage", Type, 0},
    -		{"SocketControlMessage.Data", Field, 0},
    -		{"SocketControlMessage.Header", Field, 0},
    -		{"SocketDisableIPv6", Var, 0},
    -		{"Socketpair", Func, 0},
    -		{"Splice", Func, 0},
    -		{"StartProcess", Func, 0},
    -		{"StartupInfo", Type, 0},
    -		{"StartupInfo.Cb", Field, 0},
    -		{"StartupInfo.Desktop", Field, 0},
    -		{"StartupInfo.FillAttribute", Field, 0},
    -		{"StartupInfo.Flags", Field, 0},
    -		{"StartupInfo.ShowWindow", Field, 0},
    -		{"StartupInfo.StdErr", Field, 0},
    -		{"StartupInfo.StdInput", Field, 0},
    -		{"StartupInfo.StdOutput", Field, 0},
    -		{"StartupInfo.Title", Field, 0},
    -		{"StartupInfo.X", Field, 0},
    -		{"StartupInfo.XCountChars", Field, 0},
    -		{"StartupInfo.XSize", Field, 0},
    -		{"StartupInfo.Y", Field, 0},
    -		{"StartupInfo.YCountChars", Field, 0},
    -		{"StartupInfo.YSize", Field, 0},
    -		{"Stat", Func, 0},
    -		{"Stat_t", Type, 0},
    -		{"Stat_t.Atim", Field, 0},
    -		{"Stat_t.Atim_ext", Field, 12},
    -		{"Stat_t.Atimespec", Field, 0},
    -		{"Stat_t.Birthtimespec", Field, 0},
    -		{"Stat_t.Blksize", Field, 0},
    -		{"Stat_t.Blocks", Field, 0},
    -		{"Stat_t.Btim_ext", Field, 12},
    -		{"Stat_t.Ctim", Field, 0},
    -		{"Stat_t.Ctim_ext", Field, 12},
    -		{"Stat_t.Ctimespec", Field, 0},
    -		{"Stat_t.Dev", Field, 0},
    -		{"Stat_t.Flags", Field, 0},
    -		{"Stat_t.Gen", Field, 0},
    -		{"Stat_t.Gid", Field, 0},
    -		{"Stat_t.Ino", Field, 0},
    -		{"Stat_t.Lspare", Field, 0},
    -		{"Stat_t.Lspare0", Field, 2},
    -		{"Stat_t.Lspare1", Field, 2},
    -		{"Stat_t.Mode", Field, 0},
    -		{"Stat_t.Mtim", Field, 0},
    -		{"Stat_t.Mtim_ext", Field, 12},
    -		{"Stat_t.Mtimespec", Field, 0},
    -		{"Stat_t.Nlink", Field, 0},
    -		{"Stat_t.Pad_cgo_0", Field, 0},
    -		{"Stat_t.Pad_cgo_1", Field, 0},
    -		{"Stat_t.Pad_cgo_2", Field, 0},
    -		{"Stat_t.Padding0", Field, 12},
    -		{"Stat_t.Padding1", Field, 12},
    -		{"Stat_t.Qspare", Field, 0},
    -		{"Stat_t.Rdev", Field, 0},
    -		{"Stat_t.Size", Field, 0},
    -		{"Stat_t.Spare", Field, 2},
    -		{"Stat_t.Uid", Field, 0},
    -		{"Stat_t.X__pad0", Field, 0},
    -		{"Stat_t.X__pad1", Field, 0},
    -		{"Stat_t.X__pad2", Field, 0},
    -		{"Stat_t.X__st_birthtim", Field, 2},
    -		{"Stat_t.X__st_ino", Field, 0},
    -		{"Stat_t.X__unused", Field, 0},
    -		{"Statfs", Func, 0},
    -		{"Statfs_t", Type, 0},
    -		{"Statfs_t.Asyncreads", Field, 0},
    -		{"Statfs_t.Asyncwrites", Field, 0},
    -		{"Statfs_t.Bavail", Field, 0},
    -		{"Statfs_t.Bfree", Field, 0},
    -		{"Statfs_t.Blocks", Field, 0},
    -		{"Statfs_t.Bsize", Field, 0},
    -		{"Statfs_t.Charspare", Field, 0},
    -		{"Statfs_t.F_asyncreads", Field, 2},
    -		{"Statfs_t.F_asyncwrites", Field, 2},
    -		{"Statfs_t.F_bavail", Field, 2},
    -		{"Statfs_t.F_bfree", Field, 2},
    -		{"Statfs_t.F_blocks", Field, 2},
    -		{"Statfs_t.F_bsize", Field, 2},
    -		{"Statfs_t.F_ctime", Field, 2},
    -		{"Statfs_t.F_favail", Field, 2},
    -		{"Statfs_t.F_ffree", Field, 2},
    -		{"Statfs_t.F_files", Field, 2},
    -		{"Statfs_t.F_flags", Field, 2},
    -		{"Statfs_t.F_fsid", Field, 2},
    -		{"Statfs_t.F_fstypename", Field, 2},
    -		{"Statfs_t.F_iosize", Field, 2},
    -		{"Statfs_t.F_mntfromname", Field, 2},
    -		{"Statfs_t.F_mntfromspec", Field, 3},
    -		{"Statfs_t.F_mntonname", Field, 2},
    -		{"Statfs_t.F_namemax", Field, 2},
    -		{"Statfs_t.F_owner", Field, 2},
    -		{"Statfs_t.F_spare", Field, 2},
    -		{"Statfs_t.F_syncreads", Field, 2},
    -		{"Statfs_t.F_syncwrites", Field, 2},
    -		{"Statfs_t.Ffree", Field, 0},
    -		{"Statfs_t.Files", Field, 0},
    -		{"Statfs_t.Flags", Field, 0},
    -		{"Statfs_t.Frsize", Field, 0},
    -		{"Statfs_t.Fsid", Field, 0},
    -		{"Statfs_t.Fssubtype", Field, 0},
    -		{"Statfs_t.Fstypename", Field, 0},
    -		{"Statfs_t.Iosize", Field, 0},
    -		{"Statfs_t.Mntfromname", Field, 0},
    -		{"Statfs_t.Mntonname", Field, 0},
    -		{"Statfs_t.Mount_info", Field, 2},
    -		{"Statfs_t.Namelen", Field, 0},
    -		{"Statfs_t.Namemax", Field, 0},
    -		{"Statfs_t.Owner", Field, 0},
    -		{"Statfs_t.Pad_cgo_0", Field, 0},
    -		{"Statfs_t.Pad_cgo_1", Field, 2},
    -		{"Statfs_t.Reserved", Field, 0},
    -		{"Statfs_t.Spare", Field, 0},
    -		{"Statfs_t.Syncreads", Field, 0},
    -		{"Statfs_t.Syncwrites", Field, 0},
    -		{"Statfs_t.Type", Field, 0},
    -		{"Statfs_t.Version", Field, 0},
    -		{"Stderr", Var, 0},
    -		{"Stdin", Var, 0},
    -		{"Stdout", Var, 0},
    -		{"StringBytePtr", Func, 0},
    -		{"StringByteSlice", Func, 0},
    -		{"StringSlicePtr", Func, 0},
    -		{"StringToSid", Func, 0},
    -		{"StringToUTF16", Func, 0},
    -		{"StringToUTF16Ptr", Func, 0},
    -		{"Symlink", Func, 0},
    -		{"Sync", Func, 0},
    -		{"SyncFileRange", Func, 0},
    -		{"SysProcAttr", Type, 0},
    -		{"SysProcAttr.AdditionalInheritedHandles", Field, 17},
    -		{"SysProcAttr.AmbientCaps", Field, 9},
    -		{"SysProcAttr.CgroupFD", Field, 20},
    -		{"SysProcAttr.Chroot", Field, 0},
    -		{"SysProcAttr.Cloneflags", Field, 2},
    -		{"SysProcAttr.CmdLine", Field, 0},
    -		{"SysProcAttr.CreationFlags", Field, 1},
    -		{"SysProcAttr.Credential", Field, 0},
    -		{"SysProcAttr.Ctty", Field, 1},
    -		{"SysProcAttr.Foreground", Field, 5},
    -		{"SysProcAttr.GidMappings", Field, 4},
    -		{"SysProcAttr.GidMappingsEnableSetgroups", Field, 5},
    -		{"SysProcAttr.HideWindow", Field, 0},
    -		{"SysProcAttr.Jail", Field, 21},
    -		{"SysProcAttr.NoInheritHandles", Field, 16},
    -		{"SysProcAttr.Noctty", Field, 0},
    -		{"SysProcAttr.ParentProcess", Field, 17},
    -		{"SysProcAttr.Pdeathsig", Field, 0},
    -		{"SysProcAttr.Pgid", Field, 5},
    -		{"SysProcAttr.PidFD", Field, 22},
    -		{"SysProcAttr.ProcessAttributes", Field, 13},
    -		{"SysProcAttr.Ptrace", Field, 0},
    -		{"SysProcAttr.Setctty", Field, 0},
    -		{"SysProcAttr.Setpgid", Field, 0},
    -		{"SysProcAttr.Setsid", Field, 0},
    -		{"SysProcAttr.ThreadAttributes", Field, 13},
    -		{"SysProcAttr.Token", Field, 10},
    -		{"SysProcAttr.UidMappings", Field, 4},
    -		{"SysProcAttr.Unshareflags", Field, 7},
    -		{"SysProcAttr.UseCgroupFD", Field, 20},
    -		{"SysProcIDMap", Type, 4},
    -		{"SysProcIDMap.ContainerID", Field, 4},
    -		{"SysProcIDMap.HostID", Field, 4},
    -		{"SysProcIDMap.Size", Field, 4},
    -		{"Syscall", Func, 0},
    -		{"Syscall12", Func, 0},
    -		{"Syscall15", Func, 0},
    -		{"Syscall18", Func, 12},
    -		{"Syscall6", Func, 0},
    -		{"Syscall9", Func, 0},
    -		{"SyscallN", Func, 18},
    -		{"Sysctl", Func, 0},
    -		{"SysctlUint32", Func, 0},
    -		{"Sysctlnode", Type, 2},
    -		{"Sysctlnode.Flags", Field, 2},
    -		{"Sysctlnode.Name", Field, 2},
    -		{"Sysctlnode.Num", Field, 2},
    -		{"Sysctlnode.Un", Field, 2},
    -		{"Sysctlnode.Ver", Field, 2},
    -		{"Sysctlnode.X__rsvd", Field, 2},
    -		{"Sysctlnode.X_sysctl_desc", Field, 2},
    -		{"Sysctlnode.X_sysctl_func", Field, 2},
    -		{"Sysctlnode.X_sysctl_parent", Field, 2},
    -		{"Sysctlnode.X_sysctl_size", Field, 2},
    -		{"Sysinfo", Func, 0},
    -		{"Sysinfo_t", Type, 0},
    -		{"Sysinfo_t.Bufferram", Field, 0},
    -		{"Sysinfo_t.Freehigh", Field, 0},
    -		{"Sysinfo_t.Freeram", Field, 0},
    -		{"Sysinfo_t.Freeswap", Field, 0},
    -		{"Sysinfo_t.Loads", Field, 0},
    -		{"Sysinfo_t.Pad", Field, 0},
    -		{"Sysinfo_t.Pad_cgo_0", Field, 0},
    -		{"Sysinfo_t.Pad_cgo_1", Field, 0},
    -		{"Sysinfo_t.Procs", Field, 0},
    -		{"Sysinfo_t.Sharedram", Field, 0},
    -		{"Sysinfo_t.Totalhigh", Field, 0},
    -		{"Sysinfo_t.Totalram", Field, 0},
    -		{"Sysinfo_t.Totalswap", Field, 0},
    -		{"Sysinfo_t.Unit", Field, 0},
    -		{"Sysinfo_t.Uptime", Field, 0},
    -		{"Sysinfo_t.X_f", Field, 0},
    -		{"Systemtime", Type, 0},
    -		{"Systemtime.Day", Field, 0},
    -		{"Systemtime.DayOfWeek", Field, 0},
    -		{"Systemtime.Hour", Field, 0},
    -		{"Systemtime.Milliseconds", Field, 0},
    -		{"Systemtime.Minute", Field, 0},
    -		{"Systemtime.Month", Field, 0},
    -		{"Systemtime.Second", Field, 0},
    -		{"Systemtime.Year", Field, 0},
    -		{"TCGETS", Const, 0},
    -		{"TCIFLUSH", Const, 1},
    -		{"TCIOFLUSH", Const, 1},
    -		{"TCOFLUSH", Const, 1},
    -		{"TCPInfo", Type, 1},
    -		{"TCPInfo.Advmss", Field, 1},
    -		{"TCPInfo.Ato", Field, 1},
    -		{"TCPInfo.Backoff", Field, 1},
    -		{"TCPInfo.Ca_state", Field, 1},
    -		{"TCPInfo.Fackets", Field, 1},
    -		{"TCPInfo.Last_ack_recv", Field, 1},
    -		{"TCPInfo.Last_ack_sent", Field, 1},
    -		{"TCPInfo.Last_data_recv", Field, 1},
    -		{"TCPInfo.Last_data_sent", Field, 1},
    -		{"TCPInfo.Lost", Field, 1},
    -		{"TCPInfo.Options", Field, 1},
    -		{"TCPInfo.Pad_cgo_0", Field, 1},
    -		{"TCPInfo.Pmtu", Field, 1},
    -		{"TCPInfo.Probes", Field, 1},
    -		{"TCPInfo.Rcv_mss", Field, 1},
    -		{"TCPInfo.Rcv_rtt", Field, 1},
    -		{"TCPInfo.Rcv_space", Field, 1},
    -		{"TCPInfo.Rcv_ssthresh", Field, 1},
    -		{"TCPInfo.Reordering", Field, 1},
    -		{"TCPInfo.Retrans", Field, 1},
    -		{"TCPInfo.Retransmits", Field, 1},
    -		{"TCPInfo.Rto", Field, 1},
    -		{"TCPInfo.Rtt", Field, 1},
    -		{"TCPInfo.Rttvar", Field, 1},
    -		{"TCPInfo.Sacked", Field, 1},
    -		{"TCPInfo.Snd_cwnd", Field, 1},
    -		{"TCPInfo.Snd_mss", Field, 1},
    -		{"TCPInfo.Snd_ssthresh", Field, 1},
    -		{"TCPInfo.State", Field, 1},
    -		{"TCPInfo.Total_retrans", Field, 1},
    -		{"TCPInfo.Unacked", Field, 1},
    -		{"TCPKeepalive", Type, 3},
    -		{"TCPKeepalive.Interval", Field, 3},
    -		{"TCPKeepalive.OnOff", Field, 3},
    -		{"TCPKeepalive.Time", Field, 3},
    -		{"TCP_CA_NAME_MAX", Const, 0},
    -		{"TCP_CONGCTL", Const, 1},
    -		{"TCP_CONGESTION", Const, 0},
    -		{"TCP_CONNECTIONTIMEOUT", Const, 0},
    -		{"TCP_CORK", Const, 0},
    -		{"TCP_DEFER_ACCEPT", Const, 0},
    -		{"TCP_ENABLE_ECN", Const, 16},
    -		{"TCP_INFO", Const, 0},
    -		{"TCP_KEEPALIVE", Const, 0},
    -		{"TCP_KEEPCNT", Const, 0},
    -		{"TCP_KEEPIDLE", Const, 0},
    -		{"TCP_KEEPINIT", Const, 1},
    -		{"TCP_KEEPINTVL", Const, 0},
    -		{"TCP_LINGER2", Const, 0},
    -		{"TCP_MAXBURST", Const, 0},
    -		{"TCP_MAXHLEN", Const, 0},
    -		{"TCP_MAXOLEN", Const, 0},
    -		{"TCP_MAXSEG", Const, 0},
    -		{"TCP_MAXWIN", Const, 0},
    -		{"TCP_MAX_SACK", Const, 0},
    -		{"TCP_MAX_WINSHIFT", Const, 0},
    -		{"TCP_MD5SIG", Const, 0},
    -		{"TCP_MD5SIG_MAXKEYLEN", Const, 0},
    -		{"TCP_MINMSS", Const, 0},
    -		{"TCP_MINMSSOVERLOAD", Const, 0},
    -		{"TCP_MSS", Const, 0},
    -		{"TCP_NODELAY", Const, 0},
    -		{"TCP_NOOPT", Const, 0},
    -		{"TCP_NOPUSH", Const, 0},
    -		{"TCP_NOTSENT_LOWAT", Const, 16},
    -		{"TCP_NSTATES", Const, 1},
    -		{"TCP_QUICKACK", Const, 0},
    -		{"TCP_RXT_CONNDROPTIME", Const, 0},
    -		{"TCP_RXT_FINDROP", Const, 0},
    -		{"TCP_SACK_ENABLE", Const, 1},
    -		{"TCP_SENDMOREACKS", Const, 16},
    -		{"TCP_SYNCNT", Const, 0},
    -		{"TCP_VENDOR", Const, 3},
    -		{"TCP_WINDOW_CLAMP", Const, 0},
    -		{"TCSAFLUSH", Const, 1},
    -		{"TCSETS", Const, 0},
    -		{"TF_DISCONNECT", Const, 0},
    -		{"TF_REUSE_SOCKET", Const, 0},
    -		{"TF_USE_DEFAULT_WORKER", Const, 0},
    -		{"TF_USE_KERNEL_APC", Const, 0},
    -		{"TF_USE_SYSTEM_THREAD", Const, 0},
    -		{"TF_WRITE_BEHIND", Const, 0},
    -		{"TH32CS_INHERIT", Const, 4},
    -		{"TH32CS_SNAPALL", Const, 4},
    -		{"TH32CS_SNAPHEAPLIST", Const, 4},
    -		{"TH32CS_SNAPMODULE", Const, 4},
    -		{"TH32CS_SNAPMODULE32", Const, 4},
    -		{"TH32CS_SNAPPROCESS", Const, 4},
    -		{"TH32CS_SNAPTHREAD", Const, 4},
    -		{"TIME_ZONE_ID_DAYLIGHT", Const, 0},
    -		{"TIME_ZONE_ID_STANDARD", Const, 0},
    -		{"TIME_ZONE_ID_UNKNOWN", Const, 0},
    -		{"TIOCCBRK", Const, 0},
    -		{"TIOCCDTR", Const, 0},
    -		{"TIOCCONS", Const, 0},
    -		{"TIOCDCDTIMESTAMP", Const, 0},
    -		{"TIOCDRAIN", Const, 0},
    -		{"TIOCDSIMICROCODE", Const, 0},
    -		{"TIOCEXCL", Const, 0},
    -		{"TIOCEXT", Const, 0},
    -		{"TIOCFLAG_CDTRCTS", Const, 1},
    -		{"TIOCFLAG_CLOCAL", Const, 1},
    -		{"TIOCFLAG_CRTSCTS", Const, 1},
    -		{"TIOCFLAG_MDMBUF", Const, 1},
    -		{"TIOCFLAG_PPS", Const, 1},
    -		{"TIOCFLAG_SOFTCAR", Const, 1},
    -		{"TIOCFLUSH", Const, 0},
    -		{"TIOCGDEV", Const, 0},
    -		{"TIOCGDRAINWAIT", Const, 0},
    -		{"TIOCGETA", Const, 0},
    -		{"TIOCGETD", Const, 0},
    -		{"TIOCGFLAGS", Const, 1},
    -		{"TIOCGICOUNT", Const, 0},
    -		{"TIOCGLCKTRMIOS", Const, 0},
    -		{"TIOCGLINED", Const, 1},
    -		{"TIOCGPGRP", Const, 0},
    -		{"TIOCGPTN", Const, 0},
    -		{"TIOCGQSIZE", Const, 1},
    -		{"TIOCGRANTPT", Const, 1},
    -		{"TIOCGRS485", Const, 0},
    -		{"TIOCGSERIAL", Const, 0},
    -		{"TIOCGSID", Const, 0},
    -		{"TIOCGSIZE", Const, 1},
    -		{"TIOCGSOFTCAR", Const, 0},
    -		{"TIOCGTSTAMP", Const, 1},
    -		{"TIOCGWINSZ", Const, 0},
    -		{"TIOCINQ", Const, 0},
    -		{"TIOCIXOFF", Const, 0},
    -		{"TIOCIXON", Const, 0},
    -		{"TIOCLINUX", Const, 0},
    -		{"TIOCMBIC", Const, 0},
    -		{"TIOCMBIS", Const, 0},
    -		{"TIOCMGDTRWAIT", Const, 0},
    -		{"TIOCMGET", Const, 0},
    -		{"TIOCMIWAIT", Const, 0},
    -		{"TIOCMODG", Const, 0},
    -		{"TIOCMODS", Const, 0},
    -		{"TIOCMSDTRWAIT", Const, 0},
    -		{"TIOCMSET", Const, 0},
    -		{"TIOCM_CAR", Const, 0},
    -		{"TIOCM_CD", Const, 0},
    -		{"TIOCM_CTS", Const, 0},
    -		{"TIOCM_DCD", Const, 0},
    -		{"TIOCM_DSR", Const, 0},
    -		{"TIOCM_DTR", Const, 0},
    -		{"TIOCM_LE", Const, 0},
    -		{"TIOCM_RI", Const, 0},
    -		{"TIOCM_RNG", Const, 0},
    -		{"TIOCM_RTS", Const, 0},
    -		{"TIOCM_SR", Const, 0},
    -		{"TIOCM_ST", Const, 0},
    -		{"TIOCNOTTY", Const, 0},
    -		{"TIOCNXCL", Const, 0},
    -		{"TIOCOUTQ", Const, 0},
    -		{"TIOCPKT", Const, 0},
    -		{"TIOCPKT_DATA", Const, 0},
    -		{"TIOCPKT_DOSTOP", Const, 0},
    -		{"TIOCPKT_FLUSHREAD", Const, 0},
    -		{"TIOCPKT_FLUSHWRITE", Const, 0},
    -		{"TIOCPKT_IOCTL", Const, 0},
    -		{"TIOCPKT_NOSTOP", Const, 0},
    -		{"TIOCPKT_START", Const, 0},
    -		{"TIOCPKT_STOP", Const, 0},
    -		{"TIOCPTMASTER", Const, 0},
    -		{"TIOCPTMGET", Const, 1},
    -		{"TIOCPTSNAME", Const, 1},
    -		{"TIOCPTYGNAME", Const, 0},
    -		{"TIOCPTYGRANT", Const, 0},
    -		{"TIOCPTYUNLK", Const, 0},
    -		{"TIOCRCVFRAME", Const, 1},
    -		{"TIOCREMOTE", Const, 0},
    -		{"TIOCSBRK", Const, 0},
    -		{"TIOCSCONS", Const, 0},
    -		{"TIOCSCTTY", Const, 0},
    -		{"TIOCSDRAINWAIT", Const, 0},
    -		{"TIOCSDTR", Const, 0},
    -		{"TIOCSERCONFIG", Const, 0},
    -		{"TIOCSERGETLSR", Const, 0},
    -		{"TIOCSERGETMULTI", Const, 0},
    -		{"TIOCSERGSTRUCT", Const, 0},
    -		{"TIOCSERGWILD", Const, 0},
    -		{"TIOCSERSETMULTI", Const, 0},
    -		{"TIOCSERSWILD", Const, 0},
    -		{"TIOCSER_TEMT", Const, 0},
    -		{"TIOCSETA", Const, 0},
    -		{"TIOCSETAF", Const, 0},
    -		{"TIOCSETAW", Const, 0},
    -		{"TIOCSETD", Const, 0},
    -		{"TIOCSFLAGS", Const, 1},
    -		{"TIOCSIG", Const, 0},
    -		{"TIOCSLCKTRMIOS", Const, 0},
    -		{"TIOCSLINED", Const, 1},
    -		{"TIOCSPGRP", Const, 0},
    -		{"TIOCSPTLCK", Const, 0},
    -		{"TIOCSQSIZE", Const, 1},
    -		{"TIOCSRS485", Const, 0},
    -		{"TIOCSSERIAL", Const, 0},
    -		{"TIOCSSIZE", Const, 1},
    -		{"TIOCSSOFTCAR", Const, 0},
    -		{"TIOCSTART", Const, 0},
    -		{"TIOCSTAT", Const, 0},
    -		{"TIOCSTI", Const, 0},
    -		{"TIOCSTOP", Const, 0},
    -		{"TIOCSTSTAMP", Const, 1},
    -		{"TIOCSWINSZ", Const, 0},
    -		{"TIOCTIMESTAMP", Const, 0},
    -		{"TIOCUCNTL", Const, 0},
    -		{"TIOCVHANGUP", Const, 0},
    -		{"TIOCXMTFRAME", Const, 1},
    -		{"TOKEN_ADJUST_DEFAULT", Const, 0},
    -		{"TOKEN_ADJUST_GROUPS", Const, 0},
    -		{"TOKEN_ADJUST_PRIVILEGES", Const, 0},
    -		{"TOKEN_ADJUST_SESSIONID", Const, 11},
    -		{"TOKEN_ALL_ACCESS", Const, 0},
    -		{"TOKEN_ASSIGN_PRIMARY", Const, 0},
    -		{"TOKEN_DUPLICATE", Const, 0},
    -		{"TOKEN_EXECUTE", Const, 0},
    -		{"TOKEN_IMPERSONATE", Const, 0},
    -		{"TOKEN_QUERY", Const, 0},
    -		{"TOKEN_QUERY_SOURCE", Const, 0},
    -		{"TOKEN_READ", Const, 0},
    -		{"TOKEN_WRITE", Const, 0},
    -		{"TOSTOP", Const, 0},
    -		{"TRUNCATE_EXISTING", Const, 0},
    -		{"TUNATTACHFILTER", Const, 0},
    -		{"TUNDETACHFILTER", Const, 0},
    -		{"TUNGETFEATURES", Const, 0},
    -		{"TUNGETIFF", Const, 0},
    -		{"TUNGETSNDBUF", Const, 0},
    -		{"TUNGETVNETHDRSZ", Const, 0},
    -		{"TUNSETDEBUG", Const, 0},
    -		{"TUNSETGROUP", Const, 0},
    -		{"TUNSETIFF", Const, 0},
    -		{"TUNSETLINK", Const, 0},
    -		{"TUNSETNOCSUM", Const, 0},
    -		{"TUNSETOFFLOAD", Const, 0},
    -		{"TUNSETOWNER", Const, 0},
    -		{"TUNSETPERSIST", Const, 0},
    -		{"TUNSETSNDBUF", Const, 0},
    -		{"TUNSETTXFILTER", Const, 0},
    -		{"TUNSETVNETHDRSZ", Const, 0},
    -		{"Tee", Func, 0},
    -		{"TerminateProcess", Func, 0},
    -		{"Termios", Type, 0},
    -		{"Termios.Cc", Field, 0},
    -		{"Termios.Cflag", Field, 0},
    -		{"Termios.Iflag", Field, 0},
    -		{"Termios.Ispeed", Field, 0},
    -		{"Termios.Lflag", Field, 0},
    -		{"Termios.Line", Field, 0},
    -		{"Termios.Oflag", Field, 0},
    -		{"Termios.Ospeed", Field, 0},
    -		{"Termios.Pad_cgo_0", Field, 0},
    -		{"Tgkill", Func, 0},
    -		{"Time", Func, 0},
    -		{"Time_t", Type, 0},
    -		{"Times", Func, 0},
    -		{"Timespec", Type, 0},
    -		{"Timespec.Nsec", Field, 0},
    -		{"Timespec.Pad_cgo_0", Field, 2},
    -		{"Timespec.Sec", Field, 0},
    -		{"TimespecToNsec", Func, 0},
    -		{"Timeval", Type, 0},
    -		{"Timeval.Pad_cgo_0", Field, 0},
    -		{"Timeval.Sec", Field, 0},
    -		{"Timeval.Usec", Field, 0},
    -		{"Timeval32", Type, 0},
    -		{"Timeval32.Sec", Field, 0},
    -		{"Timeval32.Usec", Field, 0},
    -		{"TimevalToNsec", Func, 0},
    -		{"Timex", Type, 0},
    -		{"Timex.Calcnt", Field, 0},
    -		{"Timex.Constant", Field, 0},
    -		{"Timex.Errcnt", Field, 0},
    -		{"Timex.Esterror", Field, 0},
    -		{"Timex.Freq", Field, 0},
    -		{"Timex.Jitcnt", Field, 0},
    -		{"Timex.Jitter", Field, 0},
    -		{"Timex.Maxerror", Field, 0},
    -		{"Timex.Modes", Field, 0},
    -		{"Timex.Offset", Field, 0},
    -		{"Timex.Pad_cgo_0", Field, 0},
    -		{"Timex.Pad_cgo_1", Field, 0},
    -		{"Timex.Pad_cgo_2", Field, 0},
    -		{"Timex.Pad_cgo_3", Field, 0},
    -		{"Timex.Ppsfreq", Field, 0},
    -		{"Timex.Precision", Field, 0},
    -		{"Timex.Shift", Field, 0},
    -		{"Timex.Stabil", Field, 0},
    -		{"Timex.Status", Field, 0},
    -		{"Timex.Stbcnt", Field, 0},
    -		{"Timex.Tai", Field, 0},
    -		{"Timex.Tick", Field, 0},
    -		{"Timex.Time", Field, 0},
    -		{"Timex.Tolerance", Field, 0},
    -		{"Timezoneinformation", Type, 0},
    -		{"Timezoneinformation.Bias", Field, 0},
    -		{"Timezoneinformation.DaylightBias", Field, 0},
    -		{"Timezoneinformation.DaylightDate", Field, 0},
    -		{"Timezoneinformation.DaylightName", Field, 0},
    -		{"Timezoneinformation.StandardBias", Field, 0},
    -		{"Timezoneinformation.StandardDate", Field, 0},
    -		{"Timezoneinformation.StandardName", Field, 0},
    -		{"Tms", Type, 0},
    -		{"Tms.Cstime", Field, 0},
    -		{"Tms.Cutime", Field, 0},
    -		{"Tms.Stime", Field, 0},
    -		{"Tms.Utime", Field, 0},
    -		{"Token", Type, 0},
    -		{"TokenAccessInformation", Const, 0},
    -		{"TokenAuditPolicy", Const, 0},
    -		{"TokenDefaultDacl", Const, 0},
    -		{"TokenElevation", Const, 0},
    -		{"TokenElevationType", Const, 0},
    -		{"TokenGroups", Const, 0},
    -		{"TokenGroupsAndPrivileges", Const, 0},
    -		{"TokenHasRestrictions", Const, 0},
    -		{"TokenImpersonationLevel", Const, 0},
    -		{"TokenIntegrityLevel", Const, 0},
    -		{"TokenLinkedToken", Const, 0},
    -		{"TokenLogonSid", Const, 0},
    -		{"TokenMandatoryPolicy", Const, 0},
    -		{"TokenOrigin", Const, 0},
    -		{"TokenOwner", Const, 0},
    -		{"TokenPrimaryGroup", Const, 0},
    -		{"TokenPrivileges", Const, 0},
    -		{"TokenRestrictedSids", Const, 0},
    -		{"TokenSandBoxInert", Const, 0},
    -		{"TokenSessionId", Const, 0},
    -		{"TokenSessionReference", Const, 0},
    -		{"TokenSource", Const, 0},
    -		{"TokenStatistics", Const, 0},
    -		{"TokenType", Const, 0},
    -		{"TokenUIAccess", Const, 0},
    -		{"TokenUser", Const, 0},
    -		{"TokenVirtualizationAllowed", Const, 0},
    -		{"TokenVirtualizationEnabled", Const, 0},
    -		{"Tokenprimarygroup", Type, 0},
    -		{"Tokenprimarygroup.PrimaryGroup", Field, 0},
    -		{"Tokenuser", Type, 0},
    -		{"Tokenuser.User", Field, 0},
    -		{"TranslateAccountName", Func, 0},
    -		{"TranslateName", Func, 0},
    -		{"TransmitFile", Func, 0},
    -		{"TransmitFileBuffers", Type, 0},
    -		{"TransmitFileBuffers.Head", Field, 0},
    -		{"TransmitFileBuffers.HeadLength", Field, 0},
    -		{"TransmitFileBuffers.Tail", Field, 0},
    -		{"TransmitFileBuffers.TailLength", Field, 0},
    -		{"Truncate", Func, 0},
    -		{"UNIX_PATH_MAX", Const, 12},
    -		{"USAGE_MATCH_TYPE_AND", Const, 0},
    -		{"USAGE_MATCH_TYPE_OR", Const, 0},
    -		{"UTF16FromString", Func, 1},
    -		{"UTF16PtrFromString", Func, 1},
    -		{"UTF16ToString", Func, 0},
    -		{"Ucred", Type, 0},
    -		{"Ucred.Gid", Field, 0},
    -		{"Ucred.Pid", Field, 0},
    -		{"Ucred.Uid", Field, 0},
    -		{"Umask", Func, 0},
    -		{"Uname", Func, 0},
    -		{"Undelete", Func, 0},
    -		{"UnixCredentials", Func, 0},
    -		{"UnixRights", Func, 0},
    -		{"Unlink", Func, 0},
    -		{"Unlinkat", Func, 0},
    -		{"UnmapViewOfFile", Func, 0},
    -		{"Unmount", Func, 0},
    -		{"Unsetenv", Func, 4},
    -		{"Unshare", Func, 0},
    -		{"UserInfo10", Type, 0},
    -		{"UserInfo10.Comment", Field, 0},
    -		{"UserInfo10.FullName", Field, 0},
    -		{"UserInfo10.Name", Field, 0},
    -		{"UserInfo10.UsrComment", Field, 0},
    -		{"Ustat", Func, 0},
    -		{"Ustat_t", Type, 0},
    -		{"Ustat_t.Fname", Field, 0},
    -		{"Ustat_t.Fpack", Field, 0},
    -		{"Ustat_t.Pad_cgo_0", Field, 0},
    -		{"Ustat_t.Pad_cgo_1", Field, 0},
    -		{"Ustat_t.Tfree", Field, 0},
    -		{"Ustat_t.Tinode", Field, 0},
    -		{"Utimbuf", Type, 0},
    -		{"Utimbuf.Actime", Field, 0},
    -		{"Utimbuf.Modtime", Field, 0},
    -		{"Utime", Func, 0},
    -		{"Utimes", Func, 0},
    -		{"UtimesNano", Func, 1},
    -		{"Utsname", Type, 0},
    -		{"Utsname.Domainname", Field, 0},
    -		{"Utsname.Machine", Field, 0},
    -		{"Utsname.Nodename", Field, 0},
    -		{"Utsname.Release", Field, 0},
    -		{"Utsname.Sysname", Field, 0},
    -		{"Utsname.Version", Field, 0},
    -		{"VDISCARD", Const, 0},
    -		{"VDSUSP", Const, 1},
    -		{"VEOF", Const, 0},
    -		{"VEOL", Const, 0},
    -		{"VEOL2", Const, 0},
    -		{"VERASE", Const, 0},
    -		{"VERASE2", Const, 1},
    -		{"VINTR", Const, 0},
    -		{"VKILL", Const, 0},
    -		{"VLNEXT", Const, 0},
    -		{"VMIN", Const, 0},
    -		{"VQUIT", Const, 0},
    -		{"VREPRINT", Const, 0},
    -		{"VSTART", Const, 0},
    -		{"VSTATUS", Const, 1},
    -		{"VSTOP", Const, 0},
    -		{"VSUSP", Const, 0},
    -		{"VSWTC", Const, 0},
    -		{"VT0", Const, 1},
    -		{"VT1", Const, 1},
    -		{"VTDLY", Const, 1},
    -		{"VTIME", Const, 0},
    -		{"VWERASE", Const, 0},
    -		{"VirtualLock", Func, 0},
    -		{"VirtualUnlock", Func, 0},
    -		{"WAIT_ABANDONED", Const, 0},
    -		{"WAIT_FAILED", Const, 0},
    -		{"WAIT_OBJECT_0", Const, 0},
    -		{"WAIT_TIMEOUT", Const, 0},
    -		{"WALL", Const, 0},
    -		{"WALLSIG", Const, 1},
    -		{"WALTSIG", Const, 1},
    -		{"WCLONE", Const, 0},
    -		{"WCONTINUED", Const, 0},
    -		{"WCOREFLAG", Const, 0},
    -		{"WEXITED", Const, 0},
    -		{"WLINUXCLONE", Const, 0},
    -		{"WNOHANG", Const, 0},
    -		{"WNOTHREAD", Const, 0},
    -		{"WNOWAIT", Const, 0},
    -		{"WNOZOMBIE", Const, 1},
    -		{"WOPTSCHECKED", Const, 1},
    -		{"WORDSIZE", Const, 0},
    -		{"WSABuf", Type, 0},
    -		{"WSABuf.Buf", Field, 0},
    -		{"WSABuf.Len", Field, 0},
    -		{"WSACleanup", Func, 0},
    -		{"WSADESCRIPTION_LEN", Const, 0},
    -		{"WSAData", Type, 0},
    -		{"WSAData.Description", Field, 0},
    -		{"WSAData.HighVersion", Field, 0},
    -		{"WSAData.MaxSockets", Field, 0},
    -		{"WSAData.MaxUdpDg", Field, 0},
    -		{"WSAData.SystemStatus", Field, 0},
    -		{"WSAData.VendorInfo", Field, 0},
    -		{"WSAData.Version", Field, 0},
    -		{"WSAEACCES", Const, 2},
    -		{"WSAECONNABORTED", Const, 9},
    -		{"WSAECONNRESET", Const, 3},
    -		{"WSAENOPROTOOPT", Const, 23},
    -		{"WSAEnumProtocols", Func, 2},
    -		{"WSAID_CONNECTEX", Var, 1},
    -		{"WSAIoctl", Func, 0},
    -		{"WSAPROTOCOL_LEN", Const, 2},
    -		{"WSAProtocolChain", Type, 2},
    -		{"WSAProtocolChain.ChainEntries", Field, 2},
    -		{"WSAProtocolChain.ChainLen", Field, 2},
    -		{"WSAProtocolInfo", Type, 2},
    -		{"WSAProtocolInfo.AddressFamily", Field, 2},
    -		{"WSAProtocolInfo.CatalogEntryId", Field, 2},
    -		{"WSAProtocolInfo.MaxSockAddr", Field, 2},
    -		{"WSAProtocolInfo.MessageSize", Field, 2},
    -		{"WSAProtocolInfo.MinSockAddr", Field, 2},
    -		{"WSAProtocolInfo.NetworkByteOrder", Field, 2},
    -		{"WSAProtocolInfo.Protocol", Field, 2},
    -		{"WSAProtocolInfo.ProtocolChain", Field, 2},
    -		{"WSAProtocolInfo.ProtocolMaxOffset", Field, 2},
    -		{"WSAProtocolInfo.ProtocolName", Field, 2},
    -		{"WSAProtocolInfo.ProviderFlags", Field, 2},
    -		{"WSAProtocolInfo.ProviderId", Field, 2},
    -		{"WSAProtocolInfo.ProviderReserved", Field, 2},
    -		{"WSAProtocolInfo.SecurityScheme", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags1", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags2", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags3", Field, 2},
    -		{"WSAProtocolInfo.ServiceFlags4", Field, 2},
    -		{"WSAProtocolInfo.SocketType", Field, 2},
    -		{"WSAProtocolInfo.Version", Field, 2},
    -		{"WSARecv", Func, 0},
    -		{"WSARecvFrom", Func, 0},
    -		{"WSASYS_STATUS_LEN", Const, 0},
    -		{"WSASend", Func, 0},
    -		{"WSASendTo", Func, 0},
    -		{"WSASendto", Func, 0},
    -		{"WSAStartup", Func, 0},
    -		{"WSTOPPED", Const, 0},
    -		{"WTRAPPED", Const, 1},
    -		{"WUNTRACED", Const, 0},
    -		{"Wait4", Func, 0},
    -		{"WaitForSingleObject", Func, 0},
    -		{"WaitStatus", Type, 0},
    -		{"WaitStatus.ExitCode", Field, 0},
    -		{"Win32FileAttributeData", Type, 0},
    -		{"Win32FileAttributeData.CreationTime", Field, 0},
    -		{"Win32FileAttributeData.FileAttributes", Field, 0},
    -		{"Win32FileAttributeData.FileSizeHigh", Field, 0},
    -		{"Win32FileAttributeData.FileSizeLow", Field, 0},
    -		{"Win32FileAttributeData.LastAccessTime", Field, 0},
    -		{"Win32FileAttributeData.LastWriteTime", Field, 0},
    -		{"Win32finddata", Type, 0},
    -		{"Win32finddata.AlternateFileName", Field, 0},
    -		{"Win32finddata.CreationTime", Field, 0},
    -		{"Win32finddata.FileAttributes", Field, 0},
    -		{"Win32finddata.FileName", Field, 0},
    -		{"Win32finddata.FileSizeHigh", Field, 0},
    -		{"Win32finddata.FileSizeLow", Field, 0},
    -		{"Win32finddata.LastAccessTime", Field, 0},
    -		{"Win32finddata.LastWriteTime", Field, 0},
    -		{"Win32finddata.Reserved0", Field, 0},
    -		{"Win32finddata.Reserved1", Field, 0},
    -		{"Write", Func, 0},
    -		{"WriteConsole", Func, 1},
    -		{"WriteFile", Func, 0},
    -		{"X509_ASN_ENCODING", Const, 0},
    -		{"XCASE", Const, 0},
    -		{"XP1_CONNECTIONLESS", Const, 2},
    -		{"XP1_CONNECT_DATA", Const, 2},
    -		{"XP1_DISCONNECT_DATA", Const, 2},
    -		{"XP1_EXPEDITED_DATA", Const, 2},
    -		{"XP1_GRACEFUL_CLOSE", Const, 2},
    -		{"XP1_GUARANTEED_DELIVERY", Const, 2},
    -		{"XP1_GUARANTEED_ORDER", Const, 2},
    -		{"XP1_IFS_HANDLES", Const, 2},
    -		{"XP1_MESSAGE_ORIENTED", Const, 2},
    -		{"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2},
    -		{"XP1_MULTIPOINT_DATA_PLANE", Const, 2},
    -		{"XP1_PARTIAL_MESSAGE", Const, 2},
    -		{"XP1_PSEUDO_STREAM", Const, 2},
    -		{"XP1_QOS_SUPPORTED", Const, 2},
    -		{"XP1_SAN_SUPPORT_SDP", Const, 2},
    -		{"XP1_SUPPORT_BROADCAST", Const, 2},
    -		{"XP1_SUPPORT_MULTIPOINT", Const, 2},
    -		{"XP1_UNI_RECV", Const, 2},
    -		{"XP1_UNI_SEND", Const, 2},
    +		{"(*Cmsghdr).SetLen", Method, 0, ""},
    +		{"(*DLL).FindProc", Method, 0, ""},
    +		{"(*DLL).MustFindProc", Method, 0, ""},
    +		{"(*DLL).Release", Method, 0, ""},
    +		{"(*DLLError).Error", Method, 0, ""},
    +		{"(*DLLError).Unwrap", Method, 16, ""},
    +		{"(*Filetime).Nanoseconds", Method, 0, ""},
    +		{"(*Iovec).SetLen", Method, 0, ""},
    +		{"(*LazyDLL).Handle", Method, 0, ""},
    +		{"(*LazyDLL).Load", Method, 0, ""},
    +		{"(*LazyDLL).NewProc", Method, 0, ""},
    +		{"(*LazyProc).Addr", Method, 0, ""},
    +		{"(*LazyProc).Call", Method, 0, ""},
    +		{"(*LazyProc).Find", Method, 0, ""},
    +		{"(*Msghdr).SetControllen", Method, 0, ""},
    +		{"(*Proc).Addr", Method, 0, ""},
    +		{"(*Proc).Call", Method, 0, ""},
    +		{"(*PtraceRegs).PC", Method, 0, ""},
    +		{"(*PtraceRegs).SetPC", Method, 0, ""},
    +		{"(*RawSockaddrAny).Sockaddr", Method, 0, ""},
    +		{"(*SID).Copy", Method, 0, ""},
    +		{"(*SID).Len", Method, 0, ""},
    +		{"(*SID).LookupAccount", Method, 0, ""},
    +		{"(*SID).String", Method, 0, ""},
    +		{"(*Timespec).Nano", Method, 0, ""},
    +		{"(*Timespec).Unix", Method, 0, ""},
    +		{"(*Timeval).Nano", Method, 0, ""},
    +		{"(*Timeval).Nanoseconds", Method, 0, ""},
    +		{"(*Timeval).Unix", Method, 0, ""},
    +		{"(Errno).Error", Method, 0, ""},
    +		{"(Errno).Is", Method, 13, ""},
    +		{"(Errno).Temporary", Method, 0, ""},
    +		{"(Errno).Timeout", Method, 0, ""},
    +		{"(Signal).Signal", Method, 0, ""},
    +		{"(Signal).String", Method, 0, ""},
    +		{"(Token).Close", Method, 0, ""},
    +		{"(Token).GetTokenPrimaryGroup", Method, 0, ""},
    +		{"(Token).GetTokenUser", Method, 0, ""},
    +		{"(Token).GetUserProfileDirectory", Method, 0, ""},
    +		{"(WaitStatus).Continued", Method, 0, ""},
    +		{"(WaitStatus).CoreDump", Method, 0, ""},
    +		{"(WaitStatus).ExitStatus", Method, 0, ""},
    +		{"(WaitStatus).Exited", Method, 0, ""},
    +		{"(WaitStatus).Signal", Method, 0, ""},
    +		{"(WaitStatus).Signaled", Method, 0, ""},
    +		{"(WaitStatus).StopSignal", Method, 0, ""},
    +		{"(WaitStatus).Stopped", Method, 0, ""},
    +		{"(WaitStatus).TrapCause", Method, 0, ""},
    +		{"AF_ALG", Const, 0, ""},
    +		{"AF_APPLETALK", Const, 0, ""},
    +		{"AF_ARP", Const, 0, ""},
    +		{"AF_ASH", Const, 0, ""},
    +		{"AF_ATM", Const, 0, ""},
    +		{"AF_ATMPVC", Const, 0, ""},
    +		{"AF_ATMSVC", Const, 0, ""},
    +		{"AF_AX25", Const, 0, ""},
    +		{"AF_BLUETOOTH", Const, 0, ""},
    +		{"AF_BRIDGE", Const, 0, ""},
    +		{"AF_CAIF", Const, 0, ""},
    +		{"AF_CAN", Const, 0, ""},
    +		{"AF_CCITT", Const, 0, ""},
    +		{"AF_CHAOS", Const, 0, ""},
    +		{"AF_CNT", Const, 0, ""},
    +		{"AF_COIP", Const, 0, ""},
    +		{"AF_DATAKIT", Const, 0, ""},
    +		{"AF_DECnet", Const, 0, ""},
    +		{"AF_DLI", Const, 0, ""},
    +		{"AF_E164", Const, 0, ""},
    +		{"AF_ECMA", Const, 0, ""},
    +		{"AF_ECONET", Const, 0, ""},
    +		{"AF_ENCAP", Const, 1, ""},
    +		{"AF_FILE", Const, 0, ""},
    +		{"AF_HYLINK", Const, 0, ""},
    +		{"AF_IEEE80211", Const, 0, ""},
    +		{"AF_IEEE802154", Const, 0, ""},
    +		{"AF_IMPLINK", Const, 0, ""},
    +		{"AF_INET", Const, 0, ""},
    +		{"AF_INET6", Const, 0, ""},
    +		{"AF_INET6_SDP", Const, 3, ""},
    +		{"AF_INET_SDP", Const, 3, ""},
    +		{"AF_IPX", Const, 0, ""},
    +		{"AF_IRDA", Const, 0, ""},
    +		{"AF_ISDN", Const, 0, ""},
    +		{"AF_ISO", Const, 0, ""},
    +		{"AF_IUCV", Const, 0, ""},
    +		{"AF_KEY", Const, 0, ""},
    +		{"AF_LAT", Const, 0, ""},
    +		{"AF_LINK", Const, 0, ""},
    +		{"AF_LLC", Const, 0, ""},
    +		{"AF_LOCAL", Const, 0, ""},
    +		{"AF_MAX", Const, 0, ""},
    +		{"AF_MPLS", Const, 1, ""},
    +		{"AF_NATM", Const, 0, ""},
    +		{"AF_NDRV", Const, 0, ""},
    +		{"AF_NETBEUI", Const, 0, ""},
    +		{"AF_NETBIOS", Const, 0, ""},
    +		{"AF_NETGRAPH", Const, 0, ""},
    +		{"AF_NETLINK", Const, 0, ""},
    +		{"AF_NETROM", Const, 0, ""},
    +		{"AF_NS", Const, 0, ""},
    +		{"AF_OROUTE", Const, 1, ""},
    +		{"AF_OSI", Const, 0, ""},
    +		{"AF_PACKET", Const, 0, ""},
    +		{"AF_PHONET", Const, 0, ""},
    +		{"AF_PPP", Const, 0, ""},
    +		{"AF_PPPOX", Const, 0, ""},
    +		{"AF_PUP", Const, 0, ""},
    +		{"AF_RDS", Const, 0, ""},
    +		{"AF_RESERVED_36", Const, 0, ""},
    +		{"AF_ROSE", Const, 0, ""},
    +		{"AF_ROUTE", Const, 0, ""},
    +		{"AF_RXRPC", Const, 0, ""},
    +		{"AF_SCLUSTER", Const, 0, ""},
    +		{"AF_SECURITY", Const, 0, ""},
    +		{"AF_SIP", Const, 0, ""},
    +		{"AF_SLOW", Const, 0, ""},
    +		{"AF_SNA", Const, 0, ""},
    +		{"AF_SYSTEM", Const, 0, ""},
    +		{"AF_TIPC", Const, 0, ""},
    +		{"AF_UNIX", Const, 0, ""},
    +		{"AF_UNSPEC", Const, 0, ""},
    +		{"AF_UTUN", Const, 16, ""},
    +		{"AF_VENDOR00", Const, 0, ""},
    +		{"AF_VENDOR01", Const, 0, ""},
    +		{"AF_VENDOR02", Const, 0, ""},
    +		{"AF_VENDOR03", Const, 0, ""},
    +		{"AF_VENDOR04", Const, 0, ""},
    +		{"AF_VENDOR05", Const, 0, ""},
    +		{"AF_VENDOR06", Const, 0, ""},
    +		{"AF_VENDOR07", Const, 0, ""},
    +		{"AF_VENDOR08", Const, 0, ""},
    +		{"AF_VENDOR09", Const, 0, ""},
    +		{"AF_VENDOR10", Const, 0, ""},
    +		{"AF_VENDOR11", Const, 0, ""},
    +		{"AF_VENDOR12", Const, 0, ""},
    +		{"AF_VENDOR13", Const, 0, ""},
    +		{"AF_VENDOR14", Const, 0, ""},
    +		{"AF_VENDOR15", Const, 0, ""},
    +		{"AF_VENDOR16", Const, 0, ""},
    +		{"AF_VENDOR17", Const, 0, ""},
    +		{"AF_VENDOR18", Const, 0, ""},
    +		{"AF_VENDOR19", Const, 0, ""},
    +		{"AF_VENDOR20", Const, 0, ""},
    +		{"AF_VENDOR21", Const, 0, ""},
    +		{"AF_VENDOR22", Const, 0, ""},
    +		{"AF_VENDOR23", Const, 0, ""},
    +		{"AF_VENDOR24", Const, 0, ""},
    +		{"AF_VENDOR25", Const, 0, ""},
    +		{"AF_VENDOR26", Const, 0, ""},
    +		{"AF_VENDOR27", Const, 0, ""},
    +		{"AF_VENDOR28", Const, 0, ""},
    +		{"AF_VENDOR29", Const, 0, ""},
    +		{"AF_VENDOR30", Const, 0, ""},
    +		{"AF_VENDOR31", Const, 0, ""},
    +		{"AF_VENDOR32", Const, 0, ""},
    +		{"AF_VENDOR33", Const, 0, ""},
    +		{"AF_VENDOR34", Const, 0, ""},
    +		{"AF_VENDOR35", Const, 0, ""},
    +		{"AF_VENDOR36", Const, 0, ""},
    +		{"AF_VENDOR37", Const, 0, ""},
    +		{"AF_VENDOR38", Const, 0, ""},
    +		{"AF_VENDOR39", Const, 0, ""},
    +		{"AF_VENDOR40", Const, 0, ""},
    +		{"AF_VENDOR41", Const, 0, ""},
    +		{"AF_VENDOR42", Const, 0, ""},
    +		{"AF_VENDOR43", Const, 0, ""},
    +		{"AF_VENDOR44", Const, 0, ""},
    +		{"AF_VENDOR45", Const, 0, ""},
    +		{"AF_VENDOR46", Const, 0, ""},
    +		{"AF_VENDOR47", Const, 0, ""},
    +		{"AF_WANPIPE", Const, 0, ""},
    +		{"AF_X25", Const, 0, ""},
    +		{"AI_CANONNAME", Const, 1, ""},
    +		{"AI_NUMERICHOST", Const, 1, ""},
    +		{"AI_PASSIVE", Const, 1, ""},
    +		{"APPLICATION_ERROR", Const, 0, ""},
    +		{"ARPHRD_ADAPT", Const, 0, ""},
    +		{"ARPHRD_APPLETLK", Const, 0, ""},
    +		{"ARPHRD_ARCNET", Const, 0, ""},
    +		{"ARPHRD_ASH", Const, 0, ""},
    +		{"ARPHRD_ATM", Const, 0, ""},
    +		{"ARPHRD_AX25", Const, 0, ""},
    +		{"ARPHRD_BIF", Const, 0, ""},
    +		{"ARPHRD_CHAOS", Const, 0, ""},
    +		{"ARPHRD_CISCO", Const, 0, ""},
    +		{"ARPHRD_CSLIP", Const, 0, ""},
    +		{"ARPHRD_CSLIP6", Const, 0, ""},
    +		{"ARPHRD_DDCMP", Const, 0, ""},
    +		{"ARPHRD_DLCI", Const, 0, ""},
    +		{"ARPHRD_ECONET", Const, 0, ""},
    +		{"ARPHRD_EETHER", Const, 0, ""},
    +		{"ARPHRD_ETHER", Const, 0, ""},
    +		{"ARPHRD_EUI64", Const, 0, ""},
    +		{"ARPHRD_FCAL", Const, 0, ""},
    +		{"ARPHRD_FCFABRIC", Const, 0, ""},
    +		{"ARPHRD_FCPL", Const, 0, ""},
    +		{"ARPHRD_FCPP", Const, 0, ""},
    +		{"ARPHRD_FDDI", Const, 0, ""},
    +		{"ARPHRD_FRAD", Const, 0, ""},
    +		{"ARPHRD_FRELAY", Const, 1, ""},
    +		{"ARPHRD_HDLC", Const, 0, ""},
    +		{"ARPHRD_HIPPI", Const, 0, ""},
    +		{"ARPHRD_HWX25", Const, 0, ""},
    +		{"ARPHRD_IEEE1394", Const, 0, ""},
    +		{"ARPHRD_IEEE802", Const, 0, ""},
    +		{"ARPHRD_IEEE80211", Const, 0, ""},
    +		{"ARPHRD_IEEE80211_PRISM", Const, 0, ""},
    +		{"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""},
    +		{"ARPHRD_IEEE802154", Const, 0, ""},
    +		{"ARPHRD_IEEE802154_PHY", Const, 0, ""},
    +		{"ARPHRD_IEEE802_TR", Const, 0, ""},
    +		{"ARPHRD_INFINIBAND", Const, 0, ""},
    +		{"ARPHRD_IPDDP", Const, 0, ""},
    +		{"ARPHRD_IPGRE", Const, 0, ""},
    +		{"ARPHRD_IRDA", Const, 0, ""},
    +		{"ARPHRD_LAPB", Const, 0, ""},
    +		{"ARPHRD_LOCALTLK", Const, 0, ""},
    +		{"ARPHRD_LOOPBACK", Const, 0, ""},
    +		{"ARPHRD_METRICOM", Const, 0, ""},
    +		{"ARPHRD_NETROM", Const, 0, ""},
    +		{"ARPHRD_NONE", Const, 0, ""},
    +		{"ARPHRD_PIMREG", Const, 0, ""},
    +		{"ARPHRD_PPP", Const, 0, ""},
    +		{"ARPHRD_PRONET", Const, 0, ""},
    +		{"ARPHRD_RAWHDLC", Const, 0, ""},
    +		{"ARPHRD_ROSE", Const, 0, ""},
    +		{"ARPHRD_RSRVD", Const, 0, ""},
    +		{"ARPHRD_SIT", Const, 0, ""},
    +		{"ARPHRD_SKIP", Const, 0, ""},
    +		{"ARPHRD_SLIP", Const, 0, ""},
    +		{"ARPHRD_SLIP6", Const, 0, ""},
    +		{"ARPHRD_STRIP", Const, 1, ""},
    +		{"ARPHRD_TUNNEL", Const, 0, ""},
    +		{"ARPHRD_TUNNEL6", Const, 0, ""},
    +		{"ARPHRD_VOID", Const, 0, ""},
    +		{"ARPHRD_X25", Const, 0, ""},
    +		{"AUTHTYPE_CLIENT", Const, 0, ""},
    +		{"AUTHTYPE_SERVER", Const, 0, ""},
    +		{"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"},
    +		{"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"},
    +		{"AcceptEx", Func, 0, ""},
    +		{"Access", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Acct", Func, 0, "func(path string) (err error)"},
    +		{"AddrinfoW", Type, 1, ""},
    +		{"AddrinfoW.Addr", Field, 1, ""},
    +		{"AddrinfoW.Addrlen", Field, 1, ""},
    +		{"AddrinfoW.Canonname", Field, 1, ""},
    +		{"AddrinfoW.Family", Field, 1, ""},
    +		{"AddrinfoW.Flags", Field, 1, ""},
    +		{"AddrinfoW.Next", Field, 1, ""},
    +		{"AddrinfoW.Protocol", Field, 1, ""},
    +		{"AddrinfoW.Socktype", Field, 1, ""},
    +		{"Adjtime", Func, 0, ""},
    +		{"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"},
    +		{"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"},
    +		{"B0", Const, 0, ""},
    +		{"B1000000", Const, 0, ""},
    +		{"B110", Const, 0, ""},
    +		{"B115200", Const, 0, ""},
    +		{"B1152000", Const, 0, ""},
    +		{"B1200", Const, 0, ""},
    +		{"B134", Const, 0, ""},
    +		{"B14400", Const, 1, ""},
    +		{"B150", Const, 0, ""},
    +		{"B1500000", Const, 0, ""},
    +		{"B1800", Const, 0, ""},
    +		{"B19200", Const, 0, ""},
    +		{"B200", Const, 0, ""},
    +		{"B2000000", Const, 0, ""},
    +		{"B230400", Const, 0, ""},
    +		{"B2400", Const, 0, ""},
    +		{"B2500000", Const, 0, ""},
    +		{"B28800", Const, 1, ""},
    +		{"B300", Const, 0, ""},
    +		{"B3000000", Const, 0, ""},
    +		{"B3500000", Const, 0, ""},
    +		{"B38400", Const, 0, ""},
    +		{"B4000000", Const, 0, ""},
    +		{"B460800", Const, 0, ""},
    +		{"B4800", Const, 0, ""},
    +		{"B50", Const, 0, ""},
    +		{"B500000", Const, 0, ""},
    +		{"B57600", Const, 0, ""},
    +		{"B576000", Const, 0, ""},
    +		{"B600", Const, 0, ""},
    +		{"B7200", Const, 1, ""},
    +		{"B75", Const, 0, ""},
    +		{"B76800", Const, 1, ""},
    +		{"B921600", Const, 0, ""},
    +		{"B9600", Const, 0, ""},
    +		{"BASE_PROTOCOL", Const, 2, ""},
    +		{"BIOCFEEDBACK", Const, 0, ""},
    +		{"BIOCFLUSH", Const, 0, ""},
    +		{"BIOCGBLEN", Const, 0, ""},
    +		{"BIOCGDIRECTION", Const, 0, ""},
    +		{"BIOCGDIRFILT", Const, 1, ""},
    +		{"BIOCGDLT", Const, 0, ""},
    +		{"BIOCGDLTLIST", Const, 0, ""},
    +		{"BIOCGETBUFMODE", Const, 0, ""},
    +		{"BIOCGETIF", Const, 0, ""},
    +		{"BIOCGETZMAX", Const, 0, ""},
    +		{"BIOCGFEEDBACK", Const, 1, ""},
    +		{"BIOCGFILDROP", Const, 1, ""},
    +		{"BIOCGHDRCMPLT", Const, 0, ""},
    +		{"BIOCGRSIG", Const, 0, ""},
    +		{"BIOCGRTIMEOUT", Const, 0, ""},
    +		{"BIOCGSEESENT", Const, 0, ""},
    +		{"BIOCGSTATS", Const, 0, ""},
    +		{"BIOCGSTATSOLD", Const, 1, ""},
    +		{"BIOCGTSTAMP", Const, 1, ""},
    +		{"BIOCIMMEDIATE", Const, 0, ""},
    +		{"BIOCLOCK", Const, 0, ""},
    +		{"BIOCPROMISC", Const, 0, ""},
    +		{"BIOCROTZBUF", Const, 0, ""},
    +		{"BIOCSBLEN", Const, 0, ""},
    +		{"BIOCSDIRECTION", Const, 0, ""},
    +		{"BIOCSDIRFILT", Const, 1, ""},
    +		{"BIOCSDLT", Const, 0, ""},
    +		{"BIOCSETBUFMODE", Const, 0, ""},
    +		{"BIOCSETF", Const, 0, ""},
    +		{"BIOCSETFNR", Const, 0, ""},
    +		{"BIOCSETIF", Const, 0, ""},
    +		{"BIOCSETWF", Const, 0, ""},
    +		{"BIOCSETZBUF", Const, 0, ""},
    +		{"BIOCSFEEDBACK", Const, 1, ""},
    +		{"BIOCSFILDROP", Const, 1, ""},
    +		{"BIOCSHDRCMPLT", Const, 0, ""},
    +		{"BIOCSRSIG", Const, 0, ""},
    +		{"BIOCSRTIMEOUT", Const, 0, ""},
    +		{"BIOCSSEESENT", Const, 0, ""},
    +		{"BIOCSTCPF", Const, 1, ""},
    +		{"BIOCSTSTAMP", Const, 1, ""},
    +		{"BIOCSUDPF", Const, 1, ""},
    +		{"BIOCVERSION", Const, 0, ""},
    +		{"BPF_A", Const, 0, ""},
    +		{"BPF_ABS", Const, 0, ""},
    +		{"BPF_ADD", Const, 0, ""},
    +		{"BPF_ALIGNMENT", Const, 0, ""},
    +		{"BPF_ALIGNMENT32", Const, 1, ""},
    +		{"BPF_ALU", Const, 0, ""},
    +		{"BPF_AND", Const, 0, ""},
    +		{"BPF_B", Const, 0, ""},
    +		{"BPF_BUFMODE_BUFFER", Const, 0, ""},
    +		{"BPF_BUFMODE_ZBUF", Const, 0, ""},
    +		{"BPF_DFLTBUFSIZE", Const, 1, ""},
    +		{"BPF_DIRECTION_IN", Const, 1, ""},
    +		{"BPF_DIRECTION_OUT", Const, 1, ""},
    +		{"BPF_DIV", Const, 0, ""},
    +		{"BPF_H", Const, 0, ""},
    +		{"BPF_IMM", Const, 0, ""},
    +		{"BPF_IND", Const, 0, ""},
    +		{"BPF_JA", Const, 0, ""},
    +		{"BPF_JEQ", Const, 0, ""},
    +		{"BPF_JGE", Const, 0, ""},
    +		{"BPF_JGT", Const, 0, ""},
    +		{"BPF_JMP", Const, 0, ""},
    +		{"BPF_JSET", Const, 0, ""},
    +		{"BPF_K", Const, 0, ""},
    +		{"BPF_LD", Const, 0, ""},
    +		{"BPF_LDX", Const, 0, ""},
    +		{"BPF_LEN", Const, 0, ""},
    +		{"BPF_LSH", Const, 0, ""},
    +		{"BPF_MAJOR_VERSION", Const, 0, ""},
    +		{"BPF_MAXBUFSIZE", Const, 0, ""},
    +		{"BPF_MAXINSNS", Const, 0, ""},
    +		{"BPF_MEM", Const, 0, ""},
    +		{"BPF_MEMWORDS", Const, 0, ""},
    +		{"BPF_MINBUFSIZE", Const, 0, ""},
    +		{"BPF_MINOR_VERSION", Const, 0, ""},
    +		{"BPF_MISC", Const, 0, ""},
    +		{"BPF_MSH", Const, 0, ""},
    +		{"BPF_MUL", Const, 0, ""},
    +		{"BPF_NEG", Const, 0, ""},
    +		{"BPF_OR", Const, 0, ""},
    +		{"BPF_RELEASE", Const, 0, ""},
    +		{"BPF_RET", Const, 0, ""},
    +		{"BPF_RSH", Const, 0, ""},
    +		{"BPF_ST", Const, 0, ""},
    +		{"BPF_STX", Const, 0, ""},
    +		{"BPF_SUB", Const, 0, ""},
    +		{"BPF_TAX", Const, 0, ""},
    +		{"BPF_TXA", Const, 0, ""},
    +		{"BPF_T_BINTIME", Const, 1, ""},
    +		{"BPF_T_BINTIME_FAST", Const, 1, ""},
    +		{"BPF_T_BINTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_FAST", Const, 1, ""},
    +		{"BPF_T_FLAG_MASK", Const, 1, ""},
    +		{"BPF_T_FORMAT_MASK", Const, 1, ""},
    +		{"BPF_T_MICROTIME", Const, 1, ""},
    +		{"BPF_T_MICROTIME_FAST", Const, 1, ""},
    +		{"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_NANOTIME", Const, 1, ""},
    +		{"BPF_T_NANOTIME_FAST", Const, 1, ""},
    +		{"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""},
    +		{"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""},
    +		{"BPF_T_NONE", Const, 1, ""},
    +		{"BPF_T_NORMAL", Const, 1, ""},
    +		{"BPF_W", Const, 0, ""},
    +		{"BPF_X", Const, 0, ""},
    +		{"BRKINT", Const, 0, ""},
    +		{"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
    +		{"BindToDevice", Func, 0, "func(fd int, device string) (err error)"},
    +		{"BpfBuflen", Func, 0, ""},
    +		{"BpfDatalink", Func, 0, ""},
    +		{"BpfHdr", Type, 0, ""},
    +		{"BpfHdr.Caplen", Field, 0, ""},
    +		{"BpfHdr.Datalen", Field, 0, ""},
    +		{"BpfHdr.Hdrlen", Field, 0, ""},
    +		{"BpfHdr.Pad_cgo_0", Field, 0, ""},
    +		{"BpfHdr.Tstamp", Field, 0, ""},
    +		{"BpfHeadercmpl", Func, 0, ""},
    +		{"BpfInsn", Type, 0, ""},
    +		{"BpfInsn.Code", Field, 0, ""},
    +		{"BpfInsn.Jf", Field, 0, ""},
    +		{"BpfInsn.Jt", Field, 0, ""},
    +		{"BpfInsn.K", Field, 0, ""},
    +		{"BpfInterface", Func, 0, ""},
    +		{"BpfJump", Func, 0, ""},
    +		{"BpfProgram", Type, 0, ""},
    +		{"BpfProgram.Insns", Field, 0, ""},
    +		{"BpfProgram.Len", Field, 0, ""},
    +		{"BpfProgram.Pad_cgo_0", Field, 0, ""},
    +		{"BpfStat", Type, 0, ""},
    +		{"BpfStat.Capt", Field, 2, ""},
    +		{"BpfStat.Drop", Field, 0, ""},
    +		{"BpfStat.Padding", Field, 2, ""},
    +		{"BpfStat.Recv", Field, 0, ""},
    +		{"BpfStats", Func, 0, ""},
    +		{"BpfStmt", Func, 0, ""},
    +		{"BpfTimeout", Func, 0, ""},
    +		{"BpfTimeval", Type, 2, ""},
    +		{"BpfTimeval.Sec", Field, 2, ""},
    +		{"BpfTimeval.Usec", Field, 2, ""},
    +		{"BpfVersion", Type, 0, ""},
    +		{"BpfVersion.Major", Field, 0, ""},
    +		{"BpfVersion.Minor", Field, 0, ""},
    +		{"BpfZbuf", Type, 0, ""},
    +		{"BpfZbuf.Bufa", Field, 0, ""},
    +		{"BpfZbuf.Bufb", Field, 0, ""},
    +		{"BpfZbuf.Buflen", Field, 0, ""},
    +		{"BpfZbufHeader", Type, 0, ""},
    +		{"BpfZbufHeader.Kernel_gen", Field, 0, ""},
    +		{"BpfZbufHeader.Kernel_len", Field, 0, ""},
    +		{"BpfZbufHeader.User_gen", Field, 0, ""},
    +		{"BpfZbufHeader.X_bzh_pad", Field, 0, ""},
    +		{"ByHandleFileInformation", Type, 0, ""},
    +		{"ByHandleFileInformation.CreationTime", Field, 0, ""},
    +		{"ByHandleFileInformation.FileAttributes", Field, 0, ""},
    +		{"ByHandleFileInformation.FileIndexHigh", Field, 0, ""},
    +		{"ByHandleFileInformation.FileIndexLow", Field, 0, ""},
    +		{"ByHandleFileInformation.FileSizeHigh", Field, 0, ""},
    +		{"ByHandleFileInformation.FileSizeLow", Field, 0, ""},
    +		{"ByHandleFileInformation.LastAccessTime", Field, 0, ""},
    +		{"ByHandleFileInformation.LastWriteTime", Field, 0, ""},
    +		{"ByHandleFileInformation.NumberOfLinks", Field, 0, ""},
    +		{"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""},
    +		{"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"},
    +		{"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"},
    +		{"CCR0_FLUSH", Const, 1, ""},
    +		{"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_BASE", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_EV", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""},
    +		{"CERT_CHAIN_POLICY_SSL", Const, 0, ""},
    +		{"CERT_E_CN_NO_MATCH", Const, 0, ""},
    +		{"CERT_E_EXPIRED", Const, 0, ""},
    +		{"CERT_E_PURPOSE", Const, 0, ""},
    +		{"CERT_E_ROLE", Const, 0, ""},
    +		{"CERT_E_UNTRUSTEDROOT", Const, 0, ""},
    +		{"CERT_STORE_ADD_ALWAYS", Const, 0, ""},
    +		{"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""},
    +		{"CERT_STORE_PROV_MEMORY", Const, 0, ""},
    +		{"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""},
    +		{"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""},
    +		{"CERT_TRUST_IS_CYCLIC", Const, 0, ""},
    +		{"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""},
    +		{"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""},
    +		{"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""},
    +		{"CERT_TRUST_IS_REVOKED", Const, 0, ""},
    +		{"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""},
    +		{"CERT_TRUST_NO_ERROR", Const, 0, ""},
    +		{"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""},
    +		{"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""},
    +		{"CFLUSH", Const, 1, ""},
    +		{"CLOCAL", Const, 0, ""},
    +		{"CLONE_CHILD_CLEARTID", Const, 2, ""},
    +		{"CLONE_CHILD_SETTID", Const, 2, ""},
    +		{"CLONE_CLEAR_SIGHAND", Const, 20, ""},
    +		{"CLONE_CSIGNAL", Const, 3, ""},
    +		{"CLONE_DETACHED", Const, 2, ""},
    +		{"CLONE_FILES", Const, 2, ""},
    +		{"CLONE_FS", Const, 2, ""},
    +		{"CLONE_INTO_CGROUP", Const, 20, ""},
    +		{"CLONE_IO", Const, 2, ""},
    +		{"CLONE_NEWCGROUP", Const, 20, ""},
    +		{"CLONE_NEWIPC", Const, 2, ""},
    +		{"CLONE_NEWNET", Const, 2, ""},
    +		{"CLONE_NEWNS", Const, 2, ""},
    +		{"CLONE_NEWPID", Const, 2, ""},
    +		{"CLONE_NEWTIME", Const, 20, ""},
    +		{"CLONE_NEWUSER", Const, 2, ""},
    +		{"CLONE_NEWUTS", Const, 2, ""},
    +		{"CLONE_PARENT", Const, 2, ""},
    +		{"CLONE_PARENT_SETTID", Const, 2, ""},
    +		{"CLONE_PID", Const, 3, ""},
    +		{"CLONE_PIDFD", Const, 20, ""},
    +		{"CLONE_PTRACE", Const, 2, ""},
    +		{"CLONE_SETTLS", Const, 2, ""},
    +		{"CLONE_SIGHAND", Const, 2, ""},
    +		{"CLONE_SYSVSEM", Const, 2, ""},
    +		{"CLONE_THREAD", Const, 2, ""},
    +		{"CLONE_UNTRACED", Const, 2, ""},
    +		{"CLONE_VFORK", Const, 2, ""},
    +		{"CLONE_VM", Const, 2, ""},
    +		{"CPUID_CFLUSH", Const, 1, ""},
    +		{"CREAD", Const, 0, ""},
    +		{"CREATE_ALWAYS", Const, 0, ""},
    +		{"CREATE_NEW", Const, 0, ""},
    +		{"CREATE_NEW_PROCESS_GROUP", Const, 1, ""},
    +		{"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""},
    +		{"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""},
    +		{"CRYPT_DELETEKEYSET", Const, 0, ""},
    +		{"CRYPT_MACHINE_KEYSET", Const, 0, ""},
    +		{"CRYPT_NEWKEYSET", Const, 0, ""},
    +		{"CRYPT_SILENT", Const, 0, ""},
    +		{"CRYPT_VERIFYCONTEXT", Const, 0, ""},
    +		{"CS5", Const, 0, ""},
    +		{"CS6", Const, 0, ""},
    +		{"CS7", Const, 0, ""},
    +		{"CS8", Const, 0, ""},
    +		{"CSIZE", Const, 0, ""},
    +		{"CSTART", Const, 1, ""},
    +		{"CSTATUS", Const, 1, ""},
    +		{"CSTOP", Const, 1, ""},
    +		{"CSTOPB", Const, 0, ""},
    +		{"CSUSP", Const, 1, ""},
    +		{"CTL_MAXNAME", Const, 0, ""},
    +		{"CTL_NET", Const, 0, ""},
    +		{"CTL_QUERY", Const, 1, ""},
    +		{"CTRL_BREAK_EVENT", Const, 1, ""},
    +		{"CTRL_CLOSE_EVENT", Const, 14, ""},
    +		{"CTRL_C_EVENT", Const, 1, ""},
    +		{"CTRL_LOGOFF_EVENT", Const, 14, ""},
    +		{"CTRL_SHUTDOWN_EVENT", Const, 14, ""},
    +		{"CancelIo", Func, 0, ""},
    +		{"CancelIoEx", Func, 1, ""},
    +		{"CertAddCertificateContextToStore", Func, 0, ""},
    +		{"CertChainContext", Type, 0, ""},
    +		{"CertChainContext.ChainCount", Field, 0, ""},
    +		{"CertChainContext.Chains", Field, 0, ""},
    +		{"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainContext.LowerQualityChainCount", Field, 0, ""},
    +		{"CertChainContext.LowerQualityChains", Field, 0, ""},
    +		{"CertChainContext.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainContext.Size", Field, 0, ""},
    +		{"CertChainContext.TrustStatus", Field, 0, ""},
    +		{"CertChainElement", Type, 0, ""},
    +		{"CertChainElement.ApplicationUsage", Field, 0, ""},
    +		{"CertChainElement.CertContext", Field, 0, ""},
    +		{"CertChainElement.ExtendedErrorInfo", Field, 0, ""},
    +		{"CertChainElement.IssuanceUsage", Field, 0, ""},
    +		{"CertChainElement.RevocationInfo", Field, 0, ""},
    +		{"CertChainElement.Size", Field, 0, ""},
    +		{"CertChainElement.TrustStatus", Field, 0, ""},
    +		{"CertChainPara", Type, 0, ""},
    +		{"CertChainPara.CacheResync", Field, 0, ""},
    +		{"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainPara.RequestedUsage", Field, 0, ""},
    +		{"CertChainPara.RequstedIssuancePolicy", Field, 0, ""},
    +		{"CertChainPara.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertChainPara.Size", Field, 0, ""},
    +		{"CertChainPara.URLRetrievalTimeout", Field, 0, ""},
    +		{"CertChainPolicyPara", Type, 0, ""},
    +		{"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""},
    +		{"CertChainPolicyPara.Flags", Field, 0, ""},
    +		{"CertChainPolicyPara.Size", Field, 0, ""},
    +		{"CertChainPolicyStatus", Type, 0, ""},
    +		{"CertChainPolicyStatus.ChainIndex", Field, 0, ""},
    +		{"CertChainPolicyStatus.ElementIndex", Field, 0, ""},
    +		{"CertChainPolicyStatus.Error", Field, 0, ""},
    +		{"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""},
    +		{"CertChainPolicyStatus.Size", Field, 0, ""},
    +		{"CertCloseStore", Func, 0, ""},
    +		{"CertContext", Type, 0, ""},
    +		{"CertContext.CertInfo", Field, 0, ""},
    +		{"CertContext.EncodedCert", Field, 0, ""},
    +		{"CertContext.EncodingType", Field, 0, ""},
    +		{"CertContext.Length", Field, 0, ""},
    +		{"CertContext.Store", Field, 0, ""},
    +		{"CertCreateCertificateContext", Func, 0, ""},
    +		{"CertEnhKeyUsage", Type, 0, ""},
    +		{"CertEnhKeyUsage.Length", Field, 0, ""},
    +		{"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""},
    +		{"CertEnumCertificatesInStore", Func, 0, ""},
    +		{"CertFreeCertificateChain", Func, 0, ""},
    +		{"CertFreeCertificateContext", Func, 0, ""},
    +		{"CertGetCertificateChain", Func, 0, ""},
    +		{"CertInfo", Type, 11, ""},
    +		{"CertOpenStore", Func, 0, ""},
    +		{"CertOpenSystemStore", Func, 0, ""},
    +		{"CertRevocationCrlInfo", Type, 11, ""},
    +		{"CertRevocationInfo", Type, 0, ""},
    +		{"CertRevocationInfo.CrlInfo", Field, 0, ""},
    +		{"CertRevocationInfo.FreshnessTime", Field, 0, ""},
    +		{"CertRevocationInfo.HasFreshnessTime", Field, 0, ""},
    +		{"CertRevocationInfo.OidSpecificInfo", Field, 0, ""},
    +		{"CertRevocationInfo.RevocationOid", Field, 0, ""},
    +		{"CertRevocationInfo.RevocationResult", Field, 0, ""},
    +		{"CertRevocationInfo.Size", Field, 0, ""},
    +		{"CertSimpleChain", Type, 0, ""},
    +		{"CertSimpleChain.Elements", Field, 0, ""},
    +		{"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""},
    +		{"CertSimpleChain.NumElements", Field, 0, ""},
    +		{"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""},
    +		{"CertSimpleChain.Size", Field, 0, ""},
    +		{"CertSimpleChain.TrustListInfo", Field, 0, ""},
    +		{"CertSimpleChain.TrustStatus", Field, 0, ""},
    +		{"CertTrustListInfo", Type, 11, ""},
    +		{"CertTrustStatus", Type, 0, ""},
    +		{"CertTrustStatus.ErrorStatus", Field, 0, ""},
    +		{"CertTrustStatus.InfoStatus", Field, 0, ""},
    +		{"CertUsageMatch", Type, 0, ""},
    +		{"CertUsageMatch.Type", Field, 0, ""},
    +		{"CertUsageMatch.Usage", Field, 0, ""},
    +		{"CertVerifyCertificateChainPolicy", Func, 0, ""},
    +		{"Chdir", Func, 0, "func(path string) (err error)"},
    +		{"CheckBpfVersion", Func, 0, ""},
    +		{"Chflags", Func, 0, ""},
    +		{"Chmod", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"},
    +		{"Chroot", Func, 0, "func(path string) (err error)"},
    +		{"Clearenv", Func, 0, "func()"},
    +		{"Close", Func, 0, "func(fd int) (err error)"},
    +		{"CloseHandle", Func, 0, ""},
    +		{"CloseOnExec", Func, 0, "func(fd int)"},
    +		{"Closesocket", Func, 0, ""},
    +		{"CmsgLen", Func, 0, "func(datalen int) int"},
    +		{"CmsgSpace", Func, 0, "func(datalen int) int"},
    +		{"Cmsghdr", Type, 0, ""},
    +		{"Cmsghdr.Len", Field, 0, ""},
    +		{"Cmsghdr.Level", Field, 0, ""},
    +		{"Cmsghdr.Type", Field, 0, ""},
    +		{"Cmsghdr.X__cmsg_data", Field, 0, ""},
    +		{"CommandLineToArgv", Func, 0, ""},
    +		{"ComputerName", Func, 0, ""},
    +		{"Conn", Type, 9, ""},
    +		{"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
    +		{"ConnectEx", Func, 1, ""},
    +		{"ConvertSidToStringSid", Func, 0, ""},
    +		{"ConvertStringSidToSid", Func, 0, ""},
    +		{"CopySid", Func, 0, ""},
    +		{"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"},
    +		{"CreateDirectory", Func, 0, ""},
    +		{"CreateFile", Func, 0, ""},
    +		{"CreateFileMapping", Func, 0, ""},
    +		{"CreateHardLink", Func, 4, ""},
    +		{"CreateIoCompletionPort", Func, 0, ""},
    +		{"CreatePipe", Func, 0, ""},
    +		{"CreateProcess", Func, 0, ""},
    +		{"CreateProcessAsUser", Func, 10, ""},
    +		{"CreateSymbolicLink", Func, 4, ""},
    +		{"CreateToolhelp32Snapshot", Func, 4, ""},
    +		{"Credential", Type, 0, ""},
    +		{"Credential.Gid", Field, 0, ""},
    +		{"Credential.Groups", Field, 0, ""},
    +		{"Credential.NoSetGroups", Field, 9, ""},
    +		{"Credential.Uid", Field, 0, ""},
    +		{"CryptAcquireContext", Func, 0, ""},
    +		{"CryptGenRandom", Func, 0, ""},
    +		{"CryptReleaseContext", Func, 0, ""},
    +		{"DIOCBSFLUSH", Const, 1, ""},
    +		{"DIOCOSFPFLUSH", Const, 1, ""},
    +		{"DLL", Type, 0, ""},
    +		{"DLL.Handle", Field, 0, ""},
    +		{"DLL.Name", Field, 0, ""},
    +		{"DLLError", Type, 0, ""},
    +		{"DLLError.Err", Field, 0, ""},
    +		{"DLLError.Msg", Field, 0, ""},
    +		{"DLLError.ObjName", Field, 0, ""},
    +		{"DLT_A429", Const, 0, ""},
    +		{"DLT_A653_ICM", Const, 0, ""},
    +		{"DLT_AIRONET_HEADER", Const, 0, ""},
    +		{"DLT_AOS", Const, 1, ""},
    +		{"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""},
    +		{"DLT_ARCNET", Const, 0, ""},
    +		{"DLT_ARCNET_LINUX", Const, 0, ""},
    +		{"DLT_ATM_CLIP", Const, 0, ""},
    +		{"DLT_ATM_RFC1483", Const, 0, ""},
    +		{"DLT_AURORA", Const, 0, ""},
    +		{"DLT_AX25", Const, 0, ""},
    +		{"DLT_AX25_KISS", Const, 0, ""},
    +		{"DLT_BACNET_MS_TP", Const, 0, ""},
    +		{"DLT_BLUETOOTH_HCI_H4", Const, 0, ""},
    +		{"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""},
    +		{"DLT_CAN20B", Const, 0, ""},
    +		{"DLT_CAN_SOCKETCAN", Const, 1, ""},
    +		{"DLT_CHAOS", Const, 0, ""},
    +		{"DLT_CHDLC", Const, 0, ""},
    +		{"DLT_CISCO_IOS", Const, 0, ""},
    +		{"DLT_C_HDLC", Const, 0, ""},
    +		{"DLT_C_HDLC_WITH_DIR", Const, 0, ""},
    +		{"DLT_DBUS", Const, 1, ""},
    +		{"DLT_DECT", Const, 1, ""},
    +		{"DLT_DOCSIS", Const, 0, ""},
    +		{"DLT_DVB_CI", Const, 1, ""},
    +		{"DLT_ECONET", Const, 0, ""},
    +		{"DLT_EN10MB", Const, 0, ""},
    +		{"DLT_EN3MB", Const, 0, ""},
    +		{"DLT_ENC", Const, 0, ""},
    +		{"DLT_ERF", Const, 0, ""},
    +		{"DLT_ERF_ETH", Const, 0, ""},
    +		{"DLT_ERF_POS", Const, 0, ""},
    +		{"DLT_FC_2", Const, 1, ""},
    +		{"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""},
    +		{"DLT_FDDI", Const, 0, ""},
    +		{"DLT_FLEXRAY", Const, 0, ""},
    +		{"DLT_FRELAY", Const, 0, ""},
    +		{"DLT_FRELAY_WITH_DIR", Const, 0, ""},
    +		{"DLT_GCOM_SERIAL", Const, 0, ""},
    +		{"DLT_GCOM_T1E1", Const, 0, ""},
    +		{"DLT_GPF_F", Const, 0, ""},
    +		{"DLT_GPF_T", Const, 0, ""},
    +		{"DLT_GPRS_LLC", Const, 0, ""},
    +		{"DLT_GSMTAP_ABIS", Const, 1, ""},
    +		{"DLT_GSMTAP_UM", Const, 1, ""},
    +		{"DLT_HDLC", Const, 1, ""},
    +		{"DLT_HHDLC", Const, 0, ""},
    +		{"DLT_HIPPI", Const, 1, ""},
    +		{"DLT_IBM_SN", Const, 0, ""},
    +		{"DLT_IBM_SP", Const, 0, ""},
    +		{"DLT_IEEE802", Const, 0, ""},
    +		{"DLT_IEEE802_11", Const, 0, ""},
    +		{"DLT_IEEE802_11_RADIO", Const, 0, ""},
    +		{"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""},
    +		{"DLT_IEEE802_15_4", Const, 0, ""},
    +		{"DLT_IEEE802_15_4_LINUX", Const, 0, ""},
    +		{"DLT_IEEE802_15_4_NOFCS", Const, 1, ""},
    +		{"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""},
    +		{"DLT_IEEE802_16_MAC_CPS", Const, 0, ""},
    +		{"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""},
    +		{"DLT_IPFILTER", Const, 0, ""},
    +		{"DLT_IPMB", Const, 0, ""},
    +		{"DLT_IPMB_LINUX", Const, 0, ""},
    +		{"DLT_IPNET", Const, 1, ""},
    +		{"DLT_IPOIB", Const, 1, ""},
    +		{"DLT_IPV4", Const, 1, ""},
    +		{"DLT_IPV6", Const, 1, ""},
    +		{"DLT_IP_OVER_FC", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM1", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM2", Const, 0, ""},
    +		{"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""},
    +		{"DLT_JUNIPER_CHDLC", Const, 0, ""},
    +		{"DLT_JUNIPER_ES", Const, 0, ""},
    +		{"DLT_JUNIPER_ETHER", Const, 0, ""},
    +		{"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""},
    +		{"DLT_JUNIPER_FRELAY", Const, 0, ""},
    +		{"DLT_JUNIPER_GGSN", Const, 0, ""},
    +		{"DLT_JUNIPER_ISM", Const, 0, ""},
    +		{"DLT_JUNIPER_MFR", Const, 0, ""},
    +		{"DLT_JUNIPER_MLFR", Const, 0, ""},
    +		{"DLT_JUNIPER_MLPPP", Const, 0, ""},
    +		{"DLT_JUNIPER_MONITOR", Const, 0, ""},
    +		{"DLT_JUNIPER_PIC_PEER", Const, 0, ""},
    +		{"DLT_JUNIPER_PPP", Const, 0, ""},
    +		{"DLT_JUNIPER_PPPOE", Const, 0, ""},
    +		{"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""},
    +		{"DLT_JUNIPER_SERVICES", Const, 0, ""},
    +		{"DLT_JUNIPER_SRX_E2E", Const, 1, ""},
    +		{"DLT_JUNIPER_ST", Const, 0, ""},
    +		{"DLT_JUNIPER_VP", Const, 0, ""},
    +		{"DLT_JUNIPER_VS", Const, 1, ""},
    +		{"DLT_LAPB_WITH_DIR", Const, 0, ""},
    +		{"DLT_LAPD", Const, 0, ""},
    +		{"DLT_LIN", Const, 0, ""},
    +		{"DLT_LINUX_EVDEV", Const, 1, ""},
    +		{"DLT_LINUX_IRDA", Const, 0, ""},
    +		{"DLT_LINUX_LAPD", Const, 0, ""},
    +		{"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""},
    +		{"DLT_LINUX_SLL", Const, 0, ""},
    +		{"DLT_LOOP", Const, 0, ""},
    +		{"DLT_LTALK", Const, 0, ""},
    +		{"DLT_MATCHING_MAX", Const, 1, ""},
    +		{"DLT_MATCHING_MIN", Const, 1, ""},
    +		{"DLT_MFR", Const, 0, ""},
    +		{"DLT_MOST", Const, 0, ""},
    +		{"DLT_MPEG_2_TS", Const, 1, ""},
    +		{"DLT_MPLS", Const, 1, ""},
    +		{"DLT_MTP2", Const, 0, ""},
    +		{"DLT_MTP2_WITH_PHDR", Const, 0, ""},
    +		{"DLT_MTP3", Const, 0, ""},
    +		{"DLT_MUX27010", Const, 1, ""},
    +		{"DLT_NETANALYZER", Const, 1, ""},
    +		{"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""},
    +		{"DLT_NFC_LLCP", Const, 1, ""},
    +		{"DLT_NFLOG", Const, 1, ""},
    +		{"DLT_NG40", Const, 1, ""},
    +		{"DLT_NULL", Const, 0, ""},
    +		{"DLT_PCI_EXP", Const, 0, ""},
    +		{"DLT_PFLOG", Const, 0, ""},
    +		{"DLT_PFSYNC", Const, 0, ""},
    +		{"DLT_PPI", Const, 0, ""},
    +		{"DLT_PPP", Const, 0, ""},
    +		{"DLT_PPP_BSDOS", Const, 0, ""},
    +		{"DLT_PPP_ETHER", Const, 0, ""},
    +		{"DLT_PPP_PPPD", Const, 0, ""},
    +		{"DLT_PPP_SERIAL", Const, 0, ""},
    +		{"DLT_PPP_WITH_DIR", Const, 0, ""},
    +		{"DLT_PPP_WITH_DIRECTION", Const, 0, ""},
    +		{"DLT_PRISM_HEADER", Const, 0, ""},
    +		{"DLT_PRONET", Const, 0, ""},
    +		{"DLT_RAIF1", Const, 0, ""},
    +		{"DLT_RAW", Const, 0, ""},
    +		{"DLT_RAWAF_MASK", Const, 1, ""},
    +		{"DLT_RIO", Const, 0, ""},
    +		{"DLT_SCCP", Const, 0, ""},
    +		{"DLT_SITA", Const, 0, ""},
    +		{"DLT_SLIP", Const, 0, ""},
    +		{"DLT_SLIP_BSDOS", Const, 0, ""},
    +		{"DLT_STANAG_5066_D_PDU", Const, 1, ""},
    +		{"DLT_SUNATM", Const, 0, ""},
    +		{"DLT_SYMANTEC_FIREWALL", Const, 0, ""},
    +		{"DLT_TZSP", Const, 0, ""},
    +		{"DLT_USB", Const, 0, ""},
    +		{"DLT_USB_LINUX", Const, 0, ""},
    +		{"DLT_USB_LINUX_MMAPPED", Const, 1, ""},
    +		{"DLT_USER0", Const, 0, ""},
    +		{"DLT_USER1", Const, 0, ""},
    +		{"DLT_USER10", Const, 0, ""},
    +		{"DLT_USER11", Const, 0, ""},
    +		{"DLT_USER12", Const, 0, ""},
    +		{"DLT_USER13", Const, 0, ""},
    +		{"DLT_USER14", Const, 0, ""},
    +		{"DLT_USER15", Const, 0, ""},
    +		{"DLT_USER2", Const, 0, ""},
    +		{"DLT_USER3", Const, 0, ""},
    +		{"DLT_USER4", Const, 0, ""},
    +		{"DLT_USER5", Const, 0, ""},
    +		{"DLT_USER6", Const, 0, ""},
    +		{"DLT_USER7", Const, 0, ""},
    +		{"DLT_USER8", Const, 0, ""},
    +		{"DLT_USER9", Const, 0, ""},
    +		{"DLT_WIHART", Const, 1, ""},
    +		{"DLT_X2E_SERIAL", Const, 0, ""},
    +		{"DLT_X2E_XORAYA", Const, 0, ""},
    +		{"DNSMXData", Type, 0, ""},
    +		{"DNSMXData.NameExchange", Field, 0, ""},
    +		{"DNSMXData.Pad", Field, 0, ""},
    +		{"DNSMXData.Preference", Field, 0, ""},
    +		{"DNSPTRData", Type, 0, ""},
    +		{"DNSPTRData.Host", Field, 0, ""},
    +		{"DNSRecord", Type, 0, ""},
    +		{"DNSRecord.Data", Field, 0, ""},
    +		{"DNSRecord.Dw", Field, 0, ""},
    +		{"DNSRecord.Length", Field, 0, ""},
    +		{"DNSRecord.Name", Field, 0, ""},
    +		{"DNSRecord.Next", Field, 0, ""},
    +		{"DNSRecord.Reserved", Field, 0, ""},
    +		{"DNSRecord.Ttl", Field, 0, ""},
    +		{"DNSRecord.Type", Field, 0, ""},
    +		{"DNSSRVData", Type, 0, ""},
    +		{"DNSSRVData.Pad", Field, 0, ""},
    +		{"DNSSRVData.Port", Field, 0, ""},
    +		{"DNSSRVData.Priority", Field, 0, ""},
    +		{"DNSSRVData.Target", Field, 0, ""},
    +		{"DNSSRVData.Weight", Field, 0, ""},
    +		{"DNSTXTData", Type, 0, ""},
    +		{"DNSTXTData.StringArray", Field, 0, ""},
    +		{"DNSTXTData.StringCount", Field, 0, ""},
    +		{"DNS_INFO_NO_RECORDS", Const, 4, ""},
    +		{"DNS_TYPE_A", Const, 0, ""},
    +		{"DNS_TYPE_A6", Const, 0, ""},
    +		{"DNS_TYPE_AAAA", Const, 0, ""},
    +		{"DNS_TYPE_ADDRS", Const, 0, ""},
    +		{"DNS_TYPE_AFSDB", Const, 0, ""},
    +		{"DNS_TYPE_ALL", Const, 0, ""},
    +		{"DNS_TYPE_ANY", Const, 0, ""},
    +		{"DNS_TYPE_ATMA", Const, 0, ""},
    +		{"DNS_TYPE_AXFR", Const, 0, ""},
    +		{"DNS_TYPE_CERT", Const, 0, ""},
    +		{"DNS_TYPE_CNAME", Const, 0, ""},
    +		{"DNS_TYPE_DHCID", Const, 0, ""},
    +		{"DNS_TYPE_DNAME", Const, 0, ""},
    +		{"DNS_TYPE_DNSKEY", Const, 0, ""},
    +		{"DNS_TYPE_DS", Const, 0, ""},
    +		{"DNS_TYPE_EID", Const, 0, ""},
    +		{"DNS_TYPE_GID", Const, 0, ""},
    +		{"DNS_TYPE_GPOS", Const, 0, ""},
    +		{"DNS_TYPE_HINFO", Const, 0, ""},
    +		{"DNS_TYPE_ISDN", Const, 0, ""},
    +		{"DNS_TYPE_IXFR", Const, 0, ""},
    +		{"DNS_TYPE_KEY", Const, 0, ""},
    +		{"DNS_TYPE_KX", Const, 0, ""},
    +		{"DNS_TYPE_LOC", Const, 0, ""},
    +		{"DNS_TYPE_MAILA", Const, 0, ""},
    +		{"DNS_TYPE_MAILB", Const, 0, ""},
    +		{"DNS_TYPE_MB", Const, 0, ""},
    +		{"DNS_TYPE_MD", Const, 0, ""},
    +		{"DNS_TYPE_MF", Const, 0, ""},
    +		{"DNS_TYPE_MG", Const, 0, ""},
    +		{"DNS_TYPE_MINFO", Const, 0, ""},
    +		{"DNS_TYPE_MR", Const, 0, ""},
    +		{"DNS_TYPE_MX", Const, 0, ""},
    +		{"DNS_TYPE_NAPTR", Const, 0, ""},
    +		{"DNS_TYPE_NBSTAT", Const, 0, ""},
    +		{"DNS_TYPE_NIMLOC", Const, 0, ""},
    +		{"DNS_TYPE_NS", Const, 0, ""},
    +		{"DNS_TYPE_NSAP", Const, 0, ""},
    +		{"DNS_TYPE_NSAPPTR", Const, 0, ""},
    +		{"DNS_TYPE_NSEC", Const, 0, ""},
    +		{"DNS_TYPE_NULL", Const, 0, ""},
    +		{"DNS_TYPE_NXT", Const, 0, ""},
    +		{"DNS_TYPE_OPT", Const, 0, ""},
    +		{"DNS_TYPE_PTR", Const, 0, ""},
    +		{"DNS_TYPE_PX", Const, 0, ""},
    +		{"DNS_TYPE_RP", Const, 0, ""},
    +		{"DNS_TYPE_RRSIG", Const, 0, ""},
    +		{"DNS_TYPE_RT", Const, 0, ""},
    +		{"DNS_TYPE_SIG", Const, 0, ""},
    +		{"DNS_TYPE_SINK", Const, 0, ""},
    +		{"DNS_TYPE_SOA", Const, 0, ""},
    +		{"DNS_TYPE_SRV", Const, 0, ""},
    +		{"DNS_TYPE_TEXT", Const, 0, ""},
    +		{"DNS_TYPE_TKEY", Const, 0, ""},
    +		{"DNS_TYPE_TSIG", Const, 0, ""},
    +		{"DNS_TYPE_UID", Const, 0, ""},
    +		{"DNS_TYPE_UINFO", Const, 0, ""},
    +		{"DNS_TYPE_UNSPEC", Const, 0, ""},
    +		{"DNS_TYPE_WINS", Const, 0, ""},
    +		{"DNS_TYPE_WINSR", Const, 0, ""},
    +		{"DNS_TYPE_WKS", Const, 0, ""},
    +		{"DNS_TYPE_X25", Const, 0, ""},
    +		{"DT_BLK", Const, 0, ""},
    +		{"DT_CHR", Const, 0, ""},
    +		{"DT_DIR", Const, 0, ""},
    +		{"DT_FIFO", Const, 0, ""},
    +		{"DT_LNK", Const, 0, ""},
    +		{"DT_REG", Const, 0, ""},
    +		{"DT_SOCK", Const, 0, ""},
    +		{"DT_UNKNOWN", Const, 0, ""},
    +		{"DT_WHT", Const, 0, ""},
    +		{"DUPLICATE_CLOSE_SOURCE", Const, 0, ""},
    +		{"DUPLICATE_SAME_ACCESS", Const, 0, ""},
    +		{"DeleteFile", Func, 0, ""},
    +		{"DetachLsf", Func, 0, "func(fd int) error"},
    +		{"DeviceIoControl", Func, 4, ""},
    +		{"Dirent", Type, 0, ""},
    +		{"Dirent.Fileno", Field, 0, ""},
    +		{"Dirent.Ino", Field, 0, ""},
    +		{"Dirent.Name", Field, 0, ""},
    +		{"Dirent.Namlen", Field, 0, ""},
    +		{"Dirent.Off", Field, 0, ""},
    +		{"Dirent.Pad0", Field, 12, ""},
    +		{"Dirent.Pad1", Field, 12, ""},
    +		{"Dirent.Pad_cgo_0", Field, 0, ""},
    +		{"Dirent.Reclen", Field, 0, ""},
    +		{"Dirent.Seekoff", Field, 0, ""},
    +		{"Dirent.Type", Field, 0, ""},
    +		{"Dirent.X__d_padding", Field, 3, ""},
    +		{"DnsNameCompare", Func, 4, ""},
    +		{"DnsQuery", Func, 0, ""},
    +		{"DnsRecordListFree", Func, 0, ""},
    +		{"DnsSectionAdditional", Const, 4, ""},
    +		{"DnsSectionAnswer", Const, 4, ""},
    +		{"DnsSectionAuthority", Const, 4, ""},
    +		{"DnsSectionQuestion", Const, 4, ""},
    +		{"Dup", Func, 0, "func(oldfd int) (fd int, err error)"},
    +		{"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"},
    +		{"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"},
    +		{"DuplicateHandle", Func, 0, ""},
    +		{"E2BIG", Const, 0, ""},
    +		{"EACCES", Const, 0, ""},
    +		{"EADDRINUSE", Const, 0, ""},
    +		{"EADDRNOTAVAIL", Const, 0, ""},
    +		{"EADV", Const, 0, ""},
    +		{"EAFNOSUPPORT", Const, 0, ""},
    +		{"EAGAIN", Const, 0, ""},
    +		{"EALREADY", Const, 0, ""},
    +		{"EAUTH", Const, 0, ""},
    +		{"EBADARCH", Const, 0, ""},
    +		{"EBADE", Const, 0, ""},
    +		{"EBADEXEC", Const, 0, ""},
    +		{"EBADF", Const, 0, ""},
    +		{"EBADFD", Const, 0, ""},
    +		{"EBADMACHO", Const, 0, ""},
    +		{"EBADMSG", Const, 0, ""},
    +		{"EBADR", Const, 0, ""},
    +		{"EBADRPC", Const, 0, ""},
    +		{"EBADRQC", Const, 0, ""},
    +		{"EBADSLT", Const, 0, ""},
    +		{"EBFONT", Const, 0, ""},
    +		{"EBUSY", Const, 0, ""},
    +		{"ECANCELED", Const, 0, ""},
    +		{"ECAPMODE", Const, 1, ""},
    +		{"ECHILD", Const, 0, ""},
    +		{"ECHO", Const, 0, ""},
    +		{"ECHOCTL", Const, 0, ""},
    +		{"ECHOE", Const, 0, ""},
    +		{"ECHOK", Const, 0, ""},
    +		{"ECHOKE", Const, 0, ""},
    +		{"ECHONL", Const, 0, ""},
    +		{"ECHOPRT", Const, 0, ""},
    +		{"ECHRNG", Const, 0, ""},
    +		{"ECOMM", Const, 0, ""},
    +		{"ECONNABORTED", Const, 0, ""},
    +		{"ECONNREFUSED", Const, 0, ""},
    +		{"ECONNRESET", Const, 0, ""},
    +		{"EDEADLK", Const, 0, ""},
    +		{"EDEADLOCK", Const, 0, ""},
    +		{"EDESTADDRREQ", Const, 0, ""},
    +		{"EDEVERR", Const, 0, ""},
    +		{"EDOM", Const, 0, ""},
    +		{"EDOOFUS", Const, 0, ""},
    +		{"EDOTDOT", Const, 0, ""},
    +		{"EDQUOT", Const, 0, ""},
    +		{"EEXIST", Const, 0, ""},
    +		{"EFAULT", Const, 0, ""},
    +		{"EFBIG", Const, 0, ""},
    +		{"EFER_LMA", Const, 1, ""},
    +		{"EFER_LME", Const, 1, ""},
    +		{"EFER_NXE", Const, 1, ""},
    +		{"EFER_SCE", Const, 1, ""},
    +		{"EFTYPE", Const, 0, ""},
    +		{"EHOSTDOWN", Const, 0, ""},
    +		{"EHOSTUNREACH", Const, 0, ""},
    +		{"EHWPOISON", Const, 0, ""},
    +		{"EIDRM", Const, 0, ""},
    +		{"EILSEQ", Const, 0, ""},
    +		{"EINPROGRESS", Const, 0, ""},
    +		{"EINTR", Const, 0, ""},
    +		{"EINVAL", Const, 0, ""},
    +		{"EIO", Const, 0, ""},
    +		{"EIPSEC", Const, 1, ""},
    +		{"EISCONN", Const, 0, ""},
    +		{"EISDIR", Const, 0, ""},
    +		{"EISNAM", Const, 0, ""},
    +		{"EKEYEXPIRED", Const, 0, ""},
    +		{"EKEYREJECTED", Const, 0, ""},
    +		{"EKEYREVOKED", Const, 0, ""},
    +		{"EL2HLT", Const, 0, ""},
    +		{"EL2NSYNC", Const, 0, ""},
    +		{"EL3HLT", Const, 0, ""},
    +		{"EL3RST", Const, 0, ""},
    +		{"ELAST", Const, 0, ""},
    +		{"ELF_NGREG", Const, 0, ""},
    +		{"ELF_PRARGSZ", Const, 0, ""},
    +		{"ELIBACC", Const, 0, ""},
    +		{"ELIBBAD", Const, 0, ""},
    +		{"ELIBEXEC", Const, 0, ""},
    +		{"ELIBMAX", Const, 0, ""},
    +		{"ELIBSCN", Const, 0, ""},
    +		{"ELNRNG", Const, 0, ""},
    +		{"ELOOP", Const, 0, ""},
    +		{"EMEDIUMTYPE", Const, 0, ""},
    +		{"EMFILE", Const, 0, ""},
    +		{"EMLINK", Const, 0, ""},
    +		{"EMSGSIZE", Const, 0, ""},
    +		{"EMT_TAGOVF", Const, 1, ""},
    +		{"EMULTIHOP", Const, 0, ""},
    +		{"EMUL_ENABLED", Const, 1, ""},
    +		{"EMUL_LINUX", Const, 1, ""},
    +		{"EMUL_LINUX32", Const, 1, ""},
    +		{"EMUL_MAXID", Const, 1, ""},
    +		{"EMUL_NATIVE", Const, 1, ""},
    +		{"ENAMETOOLONG", Const, 0, ""},
    +		{"ENAVAIL", Const, 0, ""},
    +		{"ENDRUNDISC", Const, 1, ""},
    +		{"ENEEDAUTH", Const, 0, ""},
    +		{"ENETDOWN", Const, 0, ""},
    +		{"ENETRESET", Const, 0, ""},
    +		{"ENETUNREACH", Const, 0, ""},
    +		{"ENFILE", Const, 0, ""},
    +		{"ENOANO", Const, 0, ""},
    +		{"ENOATTR", Const, 0, ""},
    +		{"ENOBUFS", Const, 0, ""},
    +		{"ENOCSI", Const, 0, ""},
    +		{"ENODATA", Const, 0, ""},
    +		{"ENODEV", Const, 0, ""},
    +		{"ENOENT", Const, 0, ""},
    +		{"ENOEXEC", Const, 0, ""},
    +		{"ENOKEY", Const, 0, ""},
    +		{"ENOLCK", Const, 0, ""},
    +		{"ENOLINK", Const, 0, ""},
    +		{"ENOMEDIUM", Const, 0, ""},
    +		{"ENOMEM", Const, 0, ""},
    +		{"ENOMSG", Const, 0, ""},
    +		{"ENONET", Const, 0, ""},
    +		{"ENOPKG", Const, 0, ""},
    +		{"ENOPOLICY", Const, 0, ""},
    +		{"ENOPROTOOPT", Const, 0, ""},
    +		{"ENOSPC", Const, 0, ""},
    +		{"ENOSR", Const, 0, ""},
    +		{"ENOSTR", Const, 0, ""},
    +		{"ENOSYS", Const, 0, ""},
    +		{"ENOTBLK", Const, 0, ""},
    +		{"ENOTCAPABLE", Const, 0, ""},
    +		{"ENOTCONN", Const, 0, ""},
    +		{"ENOTDIR", Const, 0, ""},
    +		{"ENOTEMPTY", Const, 0, ""},
    +		{"ENOTNAM", Const, 0, ""},
    +		{"ENOTRECOVERABLE", Const, 0, ""},
    +		{"ENOTSOCK", Const, 0, ""},
    +		{"ENOTSUP", Const, 0, ""},
    +		{"ENOTTY", Const, 0, ""},
    +		{"ENOTUNIQ", Const, 0, ""},
    +		{"ENXIO", Const, 0, ""},
    +		{"EN_SW_CTL_INF", Const, 1, ""},
    +		{"EN_SW_CTL_PREC", Const, 1, ""},
    +		{"EN_SW_CTL_ROUND", Const, 1, ""},
    +		{"EN_SW_DATACHAIN", Const, 1, ""},
    +		{"EN_SW_DENORM", Const, 1, ""},
    +		{"EN_SW_INVOP", Const, 1, ""},
    +		{"EN_SW_OVERFLOW", Const, 1, ""},
    +		{"EN_SW_PRECLOSS", Const, 1, ""},
    +		{"EN_SW_UNDERFLOW", Const, 1, ""},
    +		{"EN_SW_ZERODIV", Const, 1, ""},
    +		{"EOPNOTSUPP", Const, 0, ""},
    +		{"EOVERFLOW", Const, 0, ""},
    +		{"EOWNERDEAD", Const, 0, ""},
    +		{"EPERM", Const, 0, ""},
    +		{"EPFNOSUPPORT", Const, 0, ""},
    +		{"EPIPE", Const, 0, ""},
    +		{"EPOLLERR", Const, 0, ""},
    +		{"EPOLLET", Const, 0, ""},
    +		{"EPOLLHUP", Const, 0, ""},
    +		{"EPOLLIN", Const, 0, ""},
    +		{"EPOLLMSG", Const, 0, ""},
    +		{"EPOLLONESHOT", Const, 0, ""},
    +		{"EPOLLOUT", Const, 0, ""},
    +		{"EPOLLPRI", Const, 0, ""},
    +		{"EPOLLRDBAND", Const, 0, ""},
    +		{"EPOLLRDHUP", Const, 0, ""},
    +		{"EPOLLRDNORM", Const, 0, ""},
    +		{"EPOLLWRBAND", Const, 0, ""},
    +		{"EPOLLWRNORM", Const, 0, ""},
    +		{"EPOLL_CLOEXEC", Const, 0, ""},
    +		{"EPOLL_CTL_ADD", Const, 0, ""},
    +		{"EPOLL_CTL_DEL", Const, 0, ""},
    +		{"EPOLL_CTL_MOD", Const, 0, ""},
    +		{"EPOLL_NONBLOCK", Const, 0, ""},
    +		{"EPROCLIM", Const, 0, ""},
    +		{"EPROCUNAVAIL", Const, 0, ""},
    +		{"EPROGMISMATCH", Const, 0, ""},
    +		{"EPROGUNAVAIL", Const, 0, ""},
    +		{"EPROTO", Const, 0, ""},
    +		{"EPROTONOSUPPORT", Const, 0, ""},
    +		{"EPROTOTYPE", Const, 0, ""},
    +		{"EPWROFF", Const, 0, ""},
    +		{"EQFULL", Const, 16, ""},
    +		{"ERANGE", Const, 0, ""},
    +		{"EREMCHG", Const, 0, ""},
    +		{"EREMOTE", Const, 0, ""},
    +		{"EREMOTEIO", Const, 0, ""},
    +		{"ERESTART", Const, 0, ""},
    +		{"ERFKILL", Const, 0, ""},
    +		{"EROFS", Const, 0, ""},
    +		{"ERPCMISMATCH", Const, 0, ""},
    +		{"ERROR_ACCESS_DENIED", Const, 0, ""},
    +		{"ERROR_ALREADY_EXISTS", Const, 0, ""},
    +		{"ERROR_BROKEN_PIPE", Const, 0, ""},
    +		{"ERROR_BUFFER_OVERFLOW", Const, 0, ""},
    +		{"ERROR_DIR_NOT_EMPTY", Const, 8, ""},
    +		{"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_FILE_EXISTS", Const, 0, ""},
    +		{"ERROR_FILE_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_HANDLE_EOF", Const, 2, ""},
    +		{"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""},
    +		{"ERROR_IO_PENDING", Const, 0, ""},
    +		{"ERROR_MOD_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_MORE_DATA", Const, 3, ""},
    +		{"ERROR_NETNAME_DELETED", Const, 3, ""},
    +		{"ERROR_NOT_FOUND", Const, 1, ""},
    +		{"ERROR_NO_MORE_FILES", Const, 0, ""},
    +		{"ERROR_OPERATION_ABORTED", Const, 0, ""},
    +		{"ERROR_PATH_NOT_FOUND", Const, 0, ""},
    +		{"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""},
    +		{"ERROR_PROC_NOT_FOUND", Const, 0, ""},
    +		{"ESHLIBVERS", Const, 0, ""},
    +		{"ESHUTDOWN", Const, 0, ""},
    +		{"ESOCKTNOSUPPORT", Const, 0, ""},
    +		{"ESPIPE", Const, 0, ""},
    +		{"ESRCH", Const, 0, ""},
    +		{"ESRMNT", Const, 0, ""},
    +		{"ESTALE", Const, 0, ""},
    +		{"ESTRPIPE", Const, 0, ""},
    +		{"ETHERCAP_JUMBO_MTU", Const, 1, ""},
    +		{"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""},
    +		{"ETHERCAP_VLAN_MTU", Const, 1, ""},
    +		{"ETHERMIN", Const, 1, ""},
    +		{"ETHERMTU", Const, 1, ""},
    +		{"ETHERMTU_JUMBO", Const, 1, ""},
    +		{"ETHERTYPE_8023", Const, 1, ""},
    +		{"ETHERTYPE_AARP", Const, 1, ""},
    +		{"ETHERTYPE_ACCTON", Const, 1, ""},
    +		{"ETHERTYPE_AEONIC", Const, 1, ""},
    +		{"ETHERTYPE_ALPHA", Const, 1, ""},
    +		{"ETHERTYPE_AMBER", Const, 1, ""},
    +		{"ETHERTYPE_AMOEBA", Const, 1, ""},
    +		{"ETHERTYPE_AOE", Const, 1, ""},
    +		{"ETHERTYPE_APOLLO", Const, 1, ""},
    +		{"ETHERTYPE_APOLLODOMAIN", Const, 1, ""},
    +		{"ETHERTYPE_APPLETALK", Const, 1, ""},
    +		{"ETHERTYPE_APPLITEK", Const, 1, ""},
    +		{"ETHERTYPE_ARGONAUT", Const, 1, ""},
    +		{"ETHERTYPE_ARP", Const, 1, ""},
    +		{"ETHERTYPE_AT", Const, 1, ""},
    +		{"ETHERTYPE_ATALK", Const, 1, ""},
    +		{"ETHERTYPE_ATOMIC", Const, 1, ""},
    +		{"ETHERTYPE_ATT", Const, 1, ""},
    +		{"ETHERTYPE_ATTSTANFORD", Const, 1, ""},
    +		{"ETHERTYPE_AUTOPHON", Const, 1, ""},
    +		{"ETHERTYPE_AXIS", Const, 1, ""},
    +		{"ETHERTYPE_BCLOOP", Const, 1, ""},
    +		{"ETHERTYPE_BOFL", Const, 1, ""},
    +		{"ETHERTYPE_CABLETRON", Const, 1, ""},
    +		{"ETHERTYPE_CHAOS", Const, 1, ""},
    +		{"ETHERTYPE_COMDESIGN", Const, 1, ""},
    +		{"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""},
    +		{"ETHERTYPE_COUNTERPOINT", Const, 1, ""},
    +		{"ETHERTYPE_CRONUS", Const, 1, ""},
    +		{"ETHERTYPE_CRONUSVLN", Const, 1, ""},
    +		{"ETHERTYPE_DCA", Const, 1, ""},
    +		{"ETHERTYPE_DDE", Const, 1, ""},
    +		{"ETHERTYPE_DEBNI", Const, 1, ""},
    +		{"ETHERTYPE_DECAM", Const, 1, ""},
    +		{"ETHERTYPE_DECCUST", Const, 1, ""},
    +		{"ETHERTYPE_DECDIAG", Const, 1, ""},
    +		{"ETHERTYPE_DECDNS", Const, 1, ""},
    +		{"ETHERTYPE_DECDTS", Const, 1, ""},
    +		{"ETHERTYPE_DECEXPER", Const, 1, ""},
    +		{"ETHERTYPE_DECLAST", Const, 1, ""},
    +		{"ETHERTYPE_DECLTM", Const, 1, ""},
    +		{"ETHERTYPE_DECMUMPS", Const, 1, ""},
    +		{"ETHERTYPE_DECNETBIOS", Const, 1, ""},
    +		{"ETHERTYPE_DELTACON", Const, 1, ""},
    +		{"ETHERTYPE_DIDDLE", Const, 1, ""},
    +		{"ETHERTYPE_DLOG1", Const, 1, ""},
    +		{"ETHERTYPE_DLOG2", Const, 1, ""},
    +		{"ETHERTYPE_DN", Const, 1, ""},
    +		{"ETHERTYPE_DOGFIGHT", Const, 1, ""},
    +		{"ETHERTYPE_DSMD", Const, 1, ""},
    +		{"ETHERTYPE_ECMA", Const, 1, ""},
    +		{"ETHERTYPE_ENCRYPT", Const, 1, ""},
    +		{"ETHERTYPE_ES", Const, 1, ""},
    +		{"ETHERTYPE_EXCELAN", Const, 1, ""},
    +		{"ETHERTYPE_EXPERDATA", Const, 1, ""},
    +		{"ETHERTYPE_FLIP", Const, 1, ""},
    +		{"ETHERTYPE_FLOWCONTROL", Const, 1, ""},
    +		{"ETHERTYPE_FRARP", Const, 1, ""},
    +		{"ETHERTYPE_GENDYN", Const, 1, ""},
    +		{"ETHERTYPE_HAYES", Const, 1, ""},
    +		{"ETHERTYPE_HIPPI_FP", Const, 1, ""},
    +		{"ETHERTYPE_HITACHI", Const, 1, ""},
    +		{"ETHERTYPE_HP", Const, 1, ""},
    +		{"ETHERTYPE_IEEEPUP", Const, 1, ""},
    +		{"ETHERTYPE_IEEEPUPAT", Const, 1, ""},
    +		{"ETHERTYPE_IMLBL", Const, 1, ""},
    +		{"ETHERTYPE_IMLBLDIAG", Const, 1, ""},
    +		{"ETHERTYPE_IP", Const, 1, ""},
    +		{"ETHERTYPE_IPAS", Const, 1, ""},
    +		{"ETHERTYPE_IPV6", Const, 1, ""},
    +		{"ETHERTYPE_IPX", Const, 1, ""},
    +		{"ETHERTYPE_IPXNEW", Const, 1, ""},
    +		{"ETHERTYPE_KALPANA", Const, 1, ""},
    +		{"ETHERTYPE_LANBRIDGE", Const, 1, ""},
    +		{"ETHERTYPE_LANPROBE", Const, 1, ""},
    +		{"ETHERTYPE_LAT", Const, 1, ""},
    +		{"ETHERTYPE_LBACK", Const, 1, ""},
    +		{"ETHERTYPE_LITTLE", Const, 1, ""},
    +		{"ETHERTYPE_LLDP", Const, 1, ""},
    +		{"ETHERTYPE_LOGICRAFT", Const, 1, ""},
    +		{"ETHERTYPE_LOOPBACK", Const, 1, ""},
    +		{"ETHERTYPE_MATRA", Const, 1, ""},
    +		{"ETHERTYPE_MAX", Const, 1, ""},
    +		{"ETHERTYPE_MERIT", Const, 1, ""},
    +		{"ETHERTYPE_MICP", Const, 1, ""},
    +		{"ETHERTYPE_MOPDL", Const, 1, ""},
    +		{"ETHERTYPE_MOPRC", Const, 1, ""},
    +		{"ETHERTYPE_MOTOROLA", Const, 1, ""},
    +		{"ETHERTYPE_MPLS", Const, 1, ""},
    +		{"ETHERTYPE_MPLS_MCAST", Const, 1, ""},
    +		{"ETHERTYPE_MUMPS", Const, 1, ""},
    +		{"ETHERTYPE_NBPCC", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLAIM", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLREQ", Const, 1, ""},
    +		{"ETHERTYPE_NBPCLRSP", Const, 1, ""},
    +		{"ETHERTYPE_NBPCREQ", Const, 1, ""},
    +		{"ETHERTYPE_NBPCRSP", Const, 1, ""},
    +		{"ETHERTYPE_NBPDG", Const, 1, ""},
    +		{"ETHERTYPE_NBPDGB", Const, 1, ""},
    +		{"ETHERTYPE_NBPDLTE", Const, 1, ""},
    +		{"ETHERTYPE_NBPRAR", Const, 1, ""},
    +		{"ETHERTYPE_NBPRAS", Const, 1, ""},
    +		{"ETHERTYPE_NBPRST", Const, 1, ""},
    +		{"ETHERTYPE_NBPSCD", Const, 1, ""},
    +		{"ETHERTYPE_NBPVCD", Const, 1, ""},
    +		{"ETHERTYPE_NBS", Const, 1, ""},
    +		{"ETHERTYPE_NCD", Const, 1, ""},
    +		{"ETHERTYPE_NESTAR", Const, 1, ""},
    +		{"ETHERTYPE_NETBEUI", Const, 1, ""},
    +		{"ETHERTYPE_NOVELL", Const, 1, ""},
    +		{"ETHERTYPE_NS", Const, 1, ""},
    +		{"ETHERTYPE_NSAT", Const, 1, ""},
    +		{"ETHERTYPE_NSCOMPAT", Const, 1, ""},
    +		{"ETHERTYPE_NTRAILER", Const, 1, ""},
    +		{"ETHERTYPE_OS9", Const, 1, ""},
    +		{"ETHERTYPE_OS9NET", Const, 1, ""},
    +		{"ETHERTYPE_PACER", Const, 1, ""},
    +		{"ETHERTYPE_PAE", Const, 1, ""},
    +		{"ETHERTYPE_PCS", Const, 1, ""},
    +		{"ETHERTYPE_PLANNING", Const, 1, ""},
    +		{"ETHERTYPE_PPP", Const, 1, ""},
    +		{"ETHERTYPE_PPPOE", Const, 1, ""},
    +		{"ETHERTYPE_PPPOEDISC", Const, 1, ""},
    +		{"ETHERTYPE_PRIMENTS", Const, 1, ""},
    +		{"ETHERTYPE_PUP", Const, 1, ""},
    +		{"ETHERTYPE_PUPAT", Const, 1, ""},
    +		{"ETHERTYPE_QINQ", Const, 1, ""},
    +		{"ETHERTYPE_RACAL", Const, 1, ""},
    +		{"ETHERTYPE_RATIONAL", Const, 1, ""},
    +		{"ETHERTYPE_RAWFR", Const, 1, ""},
    +		{"ETHERTYPE_RCL", Const, 1, ""},
    +		{"ETHERTYPE_RDP", Const, 1, ""},
    +		{"ETHERTYPE_RETIX", Const, 1, ""},
    +		{"ETHERTYPE_REVARP", Const, 1, ""},
    +		{"ETHERTYPE_SCA", Const, 1, ""},
    +		{"ETHERTYPE_SECTRA", Const, 1, ""},
    +		{"ETHERTYPE_SECUREDATA", Const, 1, ""},
    +		{"ETHERTYPE_SGITW", Const, 1, ""},
    +		{"ETHERTYPE_SG_BOUNCE", Const, 1, ""},
    +		{"ETHERTYPE_SG_DIAG", Const, 1, ""},
    +		{"ETHERTYPE_SG_NETGAMES", Const, 1, ""},
    +		{"ETHERTYPE_SG_RESV", Const, 1, ""},
    +		{"ETHERTYPE_SIMNET", Const, 1, ""},
    +		{"ETHERTYPE_SLOW", Const, 1, ""},
    +		{"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""},
    +		{"ETHERTYPE_SNA", Const, 1, ""},
    +		{"ETHERTYPE_SNMP", Const, 1, ""},
    +		{"ETHERTYPE_SONIX", Const, 1, ""},
    +		{"ETHERTYPE_SPIDER", Const, 1, ""},
    +		{"ETHERTYPE_SPRITE", Const, 1, ""},
    +		{"ETHERTYPE_STP", Const, 1, ""},
    +		{"ETHERTYPE_TALARIS", Const, 1, ""},
    +		{"ETHERTYPE_TALARISMC", Const, 1, ""},
    +		{"ETHERTYPE_TCPCOMP", Const, 1, ""},
    +		{"ETHERTYPE_TCPSM", Const, 1, ""},
    +		{"ETHERTYPE_TEC", Const, 1, ""},
    +		{"ETHERTYPE_TIGAN", Const, 1, ""},
    +		{"ETHERTYPE_TRAIL", Const, 1, ""},
    +		{"ETHERTYPE_TRANSETHER", Const, 1, ""},
    +		{"ETHERTYPE_TYMSHARE", Const, 1, ""},
    +		{"ETHERTYPE_UBBST", Const, 1, ""},
    +		{"ETHERTYPE_UBDEBUG", Const, 1, ""},
    +		{"ETHERTYPE_UBDIAGLOOP", Const, 1, ""},
    +		{"ETHERTYPE_UBDL", Const, 1, ""},
    +		{"ETHERTYPE_UBNIU", Const, 1, ""},
    +		{"ETHERTYPE_UBNMC", Const, 1, ""},
    +		{"ETHERTYPE_VALID", Const, 1, ""},
    +		{"ETHERTYPE_VARIAN", Const, 1, ""},
    +		{"ETHERTYPE_VAXELN", Const, 1, ""},
    +		{"ETHERTYPE_VEECO", Const, 1, ""},
    +		{"ETHERTYPE_VEXP", Const, 1, ""},
    +		{"ETHERTYPE_VGLAB", Const, 1, ""},
    +		{"ETHERTYPE_VINES", Const, 1, ""},
    +		{"ETHERTYPE_VINESECHO", Const, 1, ""},
    +		{"ETHERTYPE_VINESLOOP", Const, 1, ""},
    +		{"ETHERTYPE_VITAL", Const, 1, ""},
    +		{"ETHERTYPE_VLAN", Const, 1, ""},
    +		{"ETHERTYPE_VLTLMAN", Const, 1, ""},
    +		{"ETHERTYPE_VPROD", Const, 1, ""},
    +		{"ETHERTYPE_VURESERVED", Const, 1, ""},
    +		{"ETHERTYPE_WATERLOO", Const, 1, ""},
    +		{"ETHERTYPE_WELLFLEET", Const, 1, ""},
    +		{"ETHERTYPE_X25", Const, 1, ""},
    +		{"ETHERTYPE_X75", Const, 1, ""},
    +		{"ETHERTYPE_XNSSM", Const, 1, ""},
    +		{"ETHERTYPE_XTP", Const, 1, ""},
    +		{"ETHER_ADDR_LEN", Const, 1, ""},
    +		{"ETHER_ALIGN", Const, 1, ""},
    +		{"ETHER_CRC_LEN", Const, 1, ""},
    +		{"ETHER_CRC_POLY_BE", Const, 1, ""},
    +		{"ETHER_CRC_POLY_LE", Const, 1, ""},
    +		{"ETHER_HDR_LEN", Const, 1, ""},
    +		{"ETHER_MAX_DIX_LEN", Const, 1, ""},
    +		{"ETHER_MAX_LEN", Const, 1, ""},
    +		{"ETHER_MAX_LEN_JUMBO", Const, 1, ""},
    +		{"ETHER_MIN_LEN", Const, 1, ""},
    +		{"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""},
    +		{"ETHER_TYPE_LEN", Const, 1, ""},
    +		{"ETHER_VLAN_ENCAP_LEN", Const, 1, ""},
    +		{"ETH_P_1588", Const, 0, ""},
    +		{"ETH_P_8021Q", Const, 0, ""},
    +		{"ETH_P_802_2", Const, 0, ""},
    +		{"ETH_P_802_3", Const, 0, ""},
    +		{"ETH_P_AARP", Const, 0, ""},
    +		{"ETH_P_ALL", Const, 0, ""},
    +		{"ETH_P_AOE", Const, 0, ""},
    +		{"ETH_P_ARCNET", Const, 0, ""},
    +		{"ETH_P_ARP", Const, 0, ""},
    +		{"ETH_P_ATALK", Const, 0, ""},
    +		{"ETH_P_ATMFATE", Const, 0, ""},
    +		{"ETH_P_ATMMPOA", Const, 0, ""},
    +		{"ETH_P_AX25", Const, 0, ""},
    +		{"ETH_P_BPQ", Const, 0, ""},
    +		{"ETH_P_CAIF", Const, 0, ""},
    +		{"ETH_P_CAN", Const, 0, ""},
    +		{"ETH_P_CONTROL", Const, 0, ""},
    +		{"ETH_P_CUST", Const, 0, ""},
    +		{"ETH_P_DDCMP", Const, 0, ""},
    +		{"ETH_P_DEC", Const, 0, ""},
    +		{"ETH_P_DIAG", Const, 0, ""},
    +		{"ETH_P_DNA_DL", Const, 0, ""},
    +		{"ETH_P_DNA_RC", Const, 0, ""},
    +		{"ETH_P_DNA_RT", Const, 0, ""},
    +		{"ETH_P_DSA", Const, 0, ""},
    +		{"ETH_P_ECONET", Const, 0, ""},
    +		{"ETH_P_EDSA", Const, 0, ""},
    +		{"ETH_P_FCOE", Const, 0, ""},
    +		{"ETH_P_FIP", Const, 0, ""},
    +		{"ETH_P_HDLC", Const, 0, ""},
    +		{"ETH_P_IEEE802154", Const, 0, ""},
    +		{"ETH_P_IEEEPUP", Const, 0, ""},
    +		{"ETH_P_IEEEPUPAT", Const, 0, ""},
    +		{"ETH_P_IP", Const, 0, ""},
    +		{"ETH_P_IPV6", Const, 0, ""},
    +		{"ETH_P_IPX", Const, 0, ""},
    +		{"ETH_P_IRDA", Const, 0, ""},
    +		{"ETH_P_LAT", Const, 0, ""},
    +		{"ETH_P_LINK_CTL", Const, 0, ""},
    +		{"ETH_P_LOCALTALK", Const, 0, ""},
    +		{"ETH_P_LOOP", Const, 0, ""},
    +		{"ETH_P_MOBITEX", Const, 0, ""},
    +		{"ETH_P_MPLS_MC", Const, 0, ""},
    +		{"ETH_P_MPLS_UC", Const, 0, ""},
    +		{"ETH_P_PAE", Const, 0, ""},
    +		{"ETH_P_PAUSE", Const, 0, ""},
    +		{"ETH_P_PHONET", Const, 0, ""},
    +		{"ETH_P_PPPTALK", Const, 0, ""},
    +		{"ETH_P_PPP_DISC", Const, 0, ""},
    +		{"ETH_P_PPP_MP", Const, 0, ""},
    +		{"ETH_P_PPP_SES", Const, 0, ""},
    +		{"ETH_P_PUP", Const, 0, ""},
    +		{"ETH_P_PUPAT", Const, 0, ""},
    +		{"ETH_P_RARP", Const, 0, ""},
    +		{"ETH_P_SCA", Const, 0, ""},
    +		{"ETH_P_SLOW", Const, 0, ""},
    +		{"ETH_P_SNAP", Const, 0, ""},
    +		{"ETH_P_TEB", Const, 0, ""},
    +		{"ETH_P_TIPC", Const, 0, ""},
    +		{"ETH_P_TRAILER", Const, 0, ""},
    +		{"ETH_P_TR_802_2", Const, 0, ""},
    +		{"ETH_P_WAN_PPP", Const, 0, ""},
    +		{"ETH_P_WCCP", Const, 0, ""},
    +		{"ETH_P_X25", Const, 0, ""},
    +		{"ETIME", Const, 0, ""},
    +		{"ETIMEDOUT", Const, 0, ""},
    +		{"ETOOMANYREFS", Const, 0, ""},
    +		{"ETXTBSY", Const, 0, ""},
    +		{"EUCLEAN", Const, 0, ""},
    +		{"EUNATCH", Const, 0, ""},
    +		{"EUSERS", Const, 0, ""},
    +		{"EVFILT_AIO", Const, 0, ""},
    +		{"EVFILT_FS", Const, 0, ""},
    +		{"EVFILT_LIO", Const, 0, ""},
    +		{"EVFILT_MACHPORT", Const, 0, ""},
    +		{"EVFILT_PROC", Const, 0, ""},
    +		{"EVFILT_READ", Const, 0, ""},
    +		{"EVFILT_SIGNAL", Const, 0, ""},
    +		{"EVFILT_SYSCOUNT", Const, 0, ""},
    +		{"EVFILT_THREADMARKER", Const, 0, ""},
    +		{"EVFILT_TIMER", Const, 0, ""},
    +		{"EVFILT_USER", Const, 0, ""},
    +		{"EVFILT_VM", Const, 0, ""},
    +		{"EVFILT_VNODE", Const, 0, ""},
    +		{"EVFILT_WRITE", Const, 0, ""},
    +		{"EV_ADD", Const, 0, ""},
    +		{"EV_CLEAR", Const, 0, ""},
    +		{"EV_DELETE", Const, 0, ""},
    +		{"EV_DISABLE", Const, 0, ""},
    +		{"EV_DISPATCH", Const, 0, ""},
    +		{"EV_DROP", Const, 3, ""},
    +		{"EV_ENABLE", Const, 0, ""},
    +		{"EV_EOF", Const, 0, ""},
    +		{"EV_ERROR", Const, 0, ""},
    +		{"EV_FLAG0", Const, 0, ""},
    +		{"EV_FLAG1", Const, 0, ""},
    +		{"EV_ONESHOT", Const, 0, ""},
    +		{"EV_OOBAND", Const, 0, ""},
    +		{"EV_POLL", Const, 0, ""},
    +		{"EV_RECEIPT", Const, 0, ""},
    +		{"EV_SYSFLAGS", Const, 0, ""},
    +		{"EWINDOWS", Const, 0, ""},
    +		{"EWOULDBLOCK", Const, 0, ""},
    +		{"EXDEV", Const, 0, ""},
    +		{"EXFULL", Const, 0, ""},
    +		{"EXTA", Const, 0, ""},
    +		{"EXTB", Const, 0, ""},
    +		{"EXTPROC", Const, 0, ""},
    +		{"Environ", Func, 0, "func() []string"},
    +		{"EpollCreate", Func, 0, "func(size int) (fd int, err error)"},
    +		{"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"},
    +		{"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"},
    +		{"EpollEvent", Type, 0, ""},
    +		{"EpollEvent.Events", Field, 0, ""},
    +		{"EpollEvent.Fd", Field, 0, ""},
    +		{"EpollEvent.Pad", Field, 0, ""},
    +		{"EpollEvent.PadFd", Field, 0, ""},
    +		{"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"},
    +		{"Errno", Type, 0, ""},
    +		{"EscapeArg", Func, 0, ""},
    +		{"Exchangedata", Func, 0, ""},
    +		{"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"},
    +		{"Exit", Func, 0, "func(code int)"},
    +		{"ExitProcess", Func, 0, ""},
    +		{"FD_CLOEXEC", Const, 0, ""},
    +		{"FD_SETSIZE", Const, 0, ""},
    +		{"FILE_ACTION_ADDED", Const, 0, ""},
    +		{"FILE_ACTION_MODIFIED", Const, 0, ""},
    +		{"FILE_ACTION_REMOVED", Const, 0, ""},
    +		{"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""},
    +		{"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""},
    +		{"FILE_APPEND_DATA", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_NORMAL", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_READONLY", Const, 0, ""},
    +		{"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""},
    +		{"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""},
    +		{"FILE_BEGIN", Const, 0, ""},
    +		{"FILE_CURRENT", Const, 0, ""},
    +		{"FILE_END", Const, 0, ""},
    +		{"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""},
    +		{"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""},
    +		{"FILE_FLAG_OVERLAPPED", Const, 0, ""},
    +		{"FILE_LIST_DIRECTORY", Const, 0, ""},
    +		{"FILE_MAP_COPY", Const, 0, ""},
    +		{"FILE_MAP_EXECUTE", Const, 0, ""},
    +		{"FILE_MAP_READ", Const, 0, ""},
    +		{"FILE_MAP_WRITE", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""},
    +		{"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""},
    +		{"FILE_SHARE_DELETE", Const, 0, ""},
    +		{"FILE_SHARE_READ", Const, 0, ""},
    +		{"FILE_SHARE_WRITE", Const, 0, ""},
    +		{"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""},
    +		{"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""},
    +		{"FILE_TYPE_CHAR", Const, 0, ""},
    +		{"FILE_TYPE_DISK", Const, 0, ""},
    +		{"FILE_TYPE_PIPE", Const, 0, ""},
    +		{"FILE_TYPE_REMOTE", Const, 0, ""},
    +		{"FILE_TYPE_UNKNOWN", Const, 0, ""},
    +		{"FILE_WRITE_ATTRIBUTES", Const, 0, ""},
    +		{"FLUSHO", Const, 0, ""},
    +		{"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""},
    +		{"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""},
    +		{"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""},
    +		{"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""},
    +		{"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""},
    +		{"FSCTL_GET_REPARSE_POINT", Const, 4, ""},
    +		{"F_ADDFILESIGS", Const, 0, ""},
    +		{"F_ADDSIGS", Const, 0, ""},
    +		{"F_ALLOCATEALL", Const, 0, ""},
    +		{"F_ALLOCATECONTIG", Const, 0, ""},
    +		{"F_CANCEL", Const, 0, ""},
    +		{"F_CHKCLEAN", Const, 0, ""},
    +		{"F_CLOSEM", Const, 1, ""},
    +		{"F_DUP2FD", Const, 0, ""},
    +		{"F_DUP2FD_CLOEXEC", Const, 1, ""},
    +		{"F_DUPFD", Const, 0, ""},
    +		{"F_DUPFD_CLOEXEC", Const, 0, ""},
    +		{"F_EXLCK", Const, 0, ""},
    +		{"F_FINDSIGS", Const, 16, ""},
    +		{"F_FLUSH_DATA", Const, 0, ""},
    +		{"F_FREEZE_FS", Const, 0, ""},
    +		{"F_FSCTL", Const, 1, ""},
    +		{"F_FSDIRMASK", Const, 1, ""},
    +		{"F_FSIN", Const, 1, ""},
    +		{"F_FSINOUT", Const, 1, ""},
    +		{"F_FSOUT", Const, 1, ""},
    +		{"F_FSPRIV", Const, 1, ""},
    +		{"F_FSVOID", Const, 1, ""},
    +		{"F_FULLFSYNC", Const, 0, ""},
    +		{"F_GETCODEDIR", Const, 16, ""},
    +		{"F_GETFD", Const, 0, ""},
    +		{"F_GETFL", Const, 0, ""},
    +		{"F_GETLEASE", Const, 0, ""},
    +		{"F_GETLK", Const, 0, ""},
    +		{"F_GETLK64", Const, 0, ""},
    +		{"F_GETLKPID", Const, 0, ""},
    +		{"F_GETNOSIGPIPE", Const, 0, ""},
    +		{"F_GETOWN", Const, 0, ""},
    +		{"F_GETOWN_EX", Const, 0, ""},
    +		{"F_GETPATH", Const, 0, ""},
    +		{"F_GETPATH_MTMINFO", Const, 0, ""},
    +		{"F_GETPIPE_SZ", Const, 0, ""},
    +		{"F_GETPROTECTIONCLASS", Const, 0, ""},
    +		{"F_GETPROTECTIONLEVEL", Const, 16, ""},
    +		{"F_GETSIG", Const, 0, ""},
    +		{"F_GLOBAL_NOCACHE", Const, 0, ""},
    +		{"F_LOCK", Const, 0, ""},
    +		{"F_LOG2PHYS", Const, 0, ""},
    +		{"F_LOG2PHYS_EXT", Const, 0, ""},
    +		{"F_MARKDEPENDENCY", Const, 0, ""},
    +		{"F_MAXFD", Const, 1, ""},
    +		{"F_NOCACHE", Const, 0, ""},
    +		{"F_NODIRECT", Const, 0, ""},
    +		{"F_NOTIFY", Const, 0, ""},
    +		{"F_OGETLK", Const, 0, ""},
    +		{"F_OK", Const, 0, ""},
    +		{"F_OSETLK", Const, 0, ""},
    +		{"F_OSETLKW", Const, 0, ""},
    +		{"F_PARAM_MASK", Const, 1, ""},
    +		{"F_PARAM_MAX", Const, 1, ""},
    +		{"F_PATHPKG_CHECK", Const, 0, ""},
    +		{"F_PEOFPOSMODE", Const, 0, ""},
    +		{"F_PREALLOCATE", Const, 0, ""},
    +		{"F_RDADVISE", Const, 0, ""},
    +		{"F_RDAHEAD", Const, 0, ""},
    +		{"F_RDLCK", Const, 0, ""},
    +		{"F_READAHEAD", Const, 0, ""},
    +		{"F_READBOOTSTRAP", Const, 0, ""},
    +		{"F_SETBACKINGSTORE", Const, 0, ""},
    +		{"F_SETFD", Const, 0, ""},
    +		{"F_SETFL", Const, 0, ""},
    +		{"F_SETLEASE", Const, 0, ""},
    +		{"F_SETLK", Const, 0, ""},
    +		{"F_SETLK64", Const, 0, ""},
    +		{"F_SETLKW", Const, 0, ""},
    +		{"F_SETLKW64", Const, 0, ""},
    +		{"F_SETLKWTIMEOUT", Const, 16, ""},
    +		{"F_SETLK_REMOTE", Const, 0, ""},
    +		{"F_SETNOSIGPIPE", Const, 0, ""},
    +		{"F_SETOWN", Const, 0, ""},
    +		{"F_SETOWN_EX", Const, 0, ""},
    +		{"F_SETPIPE_SZ", Const, 0, ""},
    +		{"F_SETPROTECTIONCLASS", Const, 0, ""},
    +		{"F_SETSIG", Const, 0, ""},
    +		{"F_SETSIZE", Const, 0, ""},
    +		{"F_SHLCK", Const, 0, ""},
    +		{"F_SINGLE_WRITER", Const, 16, ""},
    +		{"F_TEST", Const, 0, ""},
    +		{"F_THAW_FS", Const, 0, ""},
    +		{"F_TLOCK", Const, 0, ""},
    +		{"F_TRANSCODEKEY", Const, 16, ""},
    +		{"F_ULOCK", Const, 0, ""},
    +		{"F_UNLCK", Const, 0, ""},
    +		{"F_UNLCKSYS", Const, 0, ""},
    +		{"F_VOLPOSMODE", Const, 0, ""},
    +		{"F_WRITEBOOTSTRAP", Const, 0, ""},
    +		{"F_WRLCK", Const, 0, ""},
    +		{"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"},
    +		{"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"},
    +		{"Fbootstraptransfer_t", Type, 0, ""},
    +		{"Fbootstraptransfer_t.Buffer", Field, 0, ""},
    +		{"Fbootstraptransfer_t.Length", Field, 0, ""},
    +		{"Fbootstraptransfer_t.Offset", Field, 0, ""},
    +		{"Fchdir", Func, 0, "func(fd int) (err error)"},
    +		{"Fchflags", Func, 0, ""},
    +		{"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"},
    +		{"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"},
    +		{"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"},
    +		{"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"},
    +		{"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"},
    +		{"FdSet", Type, 0, ""},
    +		{"FdSet.Bits", Field, 0, ""},
    +		{"FdSet.X__fds_bits", Field, 0, ""},
    +		{"Fdatasync", Func, 0, "func(fd int) (err error)"},
    +		{"FileNotifyInformation", Type, 0, ""},
    +		{"FileNotifyInformation.Action", Field, 0, ""},
    +		{"FileNotifyInformation.FileName", Field, 0, ""},
    +		{"FileNotifyInformation.FileNameLength", Field, 0, ""},
    +		{"FileNotifyInformation.NextEntryOffset", Field, 0, ""},
    +		{"Filetime", Type, 0, ""},
    +		{"Filetime.HighDateTime", Field, 0, ""},
    +		{"Filetime.LowDateTime", Field, 0, ""},
    +		{"FindClose", Func, 0, ""},
    +		{"FindFirstFile", Func, 0, ""},
    +		{"FindNextFile", Func, 0, ""},
    +		{"Flock", Func, 0, "func(fd int, how int) (err error)"},
    +		{"Flock_t", Type, 0, ""},
    +		{"Flock_t.Len", Field, 0, ""},
    +		{"Flock_t.Pad_cgo_0", Field, 0, ""},
    +		{"Flock_t.Pad_cgo_1", Field, 3, ""},
    +		{"Flock_t.Pid", Field, 0, ""},
    +		{"Flock_t.Start", Field, 0, ""},
    +		{"Flock_t.Sysid", Field, 0, ""},
    +		{"Flock_t.Type", Field, 0, ""},
    +		{"Flock_t.Whence", Field, 0, ""},
    +		{"FlushBpf", Func, 0, ""},
    +		{"FlushFileBuffers", Func, 0, ""},
    +		{"FlushViewOfFile", Func, 0, ""},
    +		{"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"},
    +		{"ForkLock", Var, 0, ""},
    +		{"FormatMessage", Func, 0, ""},
    +		{"Fpathconf", Func, 0, ""},
    +		{"FreeAddrInfoW", Func, 1, ""},
    +		{"FreeEnvironmentStrings", Func, 0, ""},
    +		{"FreeLibrary", Func, 0, ""},
    +		{"Fsid", Type, 0, ""},
    +		{"Fsid.Val", Field, 0, ""},
    +		{"Fsid.X__fsid_val", Field, 2, ""},
    +		{"Fsid.X__val", Field, 0, ""},
    +		{"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"},
    +		{"Fstatat", Func, 12, ""},
    +		{"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"},
    +		{"Fstore_t", Type, 0, ""},
    +		{"Fstore_t.Bytesalloc", Field, 0, ""},
    +		{"Fstore_t.Flags", Field, 0, ""},
    +		{"Fstore_t.Length", Field, 0, ""},
    +		{"Fstore_t.Offset", Field, 0, ""},
    +		{"Fstore_t.Posmode", Field, 0, ""},
    +		{"Fsync", Func, 0, "func(fd int) (err error)"},
    +		{"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"},
    +		{"FullPath", Func, 4, ""},
    +		{"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"},
    +		{"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"},
    +		{"GENERIC_ALL", Const, 0, ""},
    +		{"GENERIC_EXECUTE", Const, 0, ""},
    +		{"GENERIC_READ", Const, 0, ""},
    +		{"GENERIC_WRITE", Const, 0, ""},
    +		{"GUID", Type, 1, ""},
    +		{"GUID.Data1", Field, 1, ""},
    +		{"GUID.Data2", Field, 1, ""},
    +		{"GUID.Data3", Field, 1, ""},
    +		{"GUID.Data4", Field, 1, ""},
    +		{"GetAcceptExSockaddrs", Func, 0, ""},
    +		{"GetAdaptersInfo", Func, 0, ""},
    +		{"GetAddrInfoW", Func, 1, ""},
    +		{"GetCommandLine", Func, 0, ""},
    +		{"GetComputerName", Func, 0, ""},
    +		{"GetConsoleMode", Func, 1, ""},
    +		{"GetCurrentDirectory", Func, 0, ""},
    +		{"GetCurrentProcess", Func, 0, ""},
    +		{"GetEnvironmentStrings", Func, 0, ""},
    +		{"GetEnvironmentVariable", Func, 0, ""},
    +		{"GetExitCodeProcess", Func, 0, ""},
    +		{"GetFileAttributes", Func, 0, ""},
    +		{"GetFileAttributesEx", Func, 0, ""},
    +		{"GetFileExInfoStandard", Const, 0, ""},
    +		{"GetFileExMaxInfoLevel", Const, 0, ""},
    +		{"GetFileInformationByHandle", Func, 0, ""},
    +		{"GetFileType", Func, 0, ""},
    +		{"GetFullPathName", Func, 0, ""},
    +		{"GetHostByName", Func, 0, ""},
    +		{"GetIfEntry", Func, 0, ""},
    +		{"GetLastError", Func, 0, ""},
    +		{"GetLengthSid", Func, 0, ""},
    +		{"GetLongPathName", Func, 0, ""},
    +		{"GetProcAddress", Func, 0, ""},
    +		{"GetProcessTimes", Func, 0, ""},
    +		{"GetProtoByName", Func, 0, ""},
    +		{"GetQueuedCompletionStatus", Func, 0, ""},
    +		{"GetServByName", Func, 0, ""},
    +		{"GetShortPathName", Func, 0, ""},
    +		{"GetStartupInfo", Func, 0, ""},
    +		{"GetStdHandle", Func, 0, ""},
    +		{"GetSystemTimeAsFileTime", Func, 0, ""},
    +		{"GetTempPath", Func, 0, ""},
    +		{"GetTimeZoneInformation", Func, 0, ""},
    +		{"GetTokenInformation", Func, 0, ""},
    +		{"GetUserNameEx", Func, 0, ""},
    +		{"GetUserProfileDirectory", Func, 0, ""},
    +		{"GetVersion", Func, 0, ""},
    +		{"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"},
    +		{"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
    +		{"Getdirentries", Func, 0, ""},
    +		{"Getdtablesize", Func, 0, ""},
    +		{"Getegid", Func, 0, "func() (egid int)"},
    +		{"Getenv", Func, 0, "func(key string) (value string, found bool)"},
    +		{"Geteuid", Func, 0, "func() (euid int)"},
    +		{"Getfsstat", Func, 0, ""},
    +		{"Getgid", Func, 0, "func() (gid int)"},
    +		{"Getgroups", Func, 0, "func() (gids []int, err error)"},
    +		{"Getpagesize", Func, 0, "func() int"},
    +		{"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
    +		{"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"},
    +		{"Getpgrp", Func, 0, "func() (pid int)"},
    +		{"Getpid", Func, 0, "func() (pid int)"},
    +		{"Getppid", Func, 0, "func() (ppid int)"},
    +		{"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"},
    +		{"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"},
    +		{"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"},
    +		{"Getsid", Func, 0, ""},
    +		{"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
    +		{"Getsockopt", Func, 1, ""},
    +		{"GetsockoptByte", Func, 0, ""},
    +		{"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"},
    +		{"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"},
    +		{"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"},
    +		{"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"},
    +		{"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"},
    +		{"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"},
    +		{"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"},
    +		{"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"},
    +		{"Gettid", Func, 0, "func() (tid int)"},
    +		{"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"},
    +		{"Getuid", Func, 0, "func() (uid int)"},
    +		{"Getwd", Func, 0, "func() (wd string, err error)"},
    +		{"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"},
    +		{"HANDLE_FLAG_INHERIT", Const, 0, ""},
    +		{"HKEY_CLASSES_ROOT", Const, 0, ""},
    +		{"HKEY_CURRENT_CONFIG", Const, 0, ""},
    +		{"HKEY_CURRENT_USER", Const, 0, ""},
    +		{"HKEY_DYN_DATA", Const, 0, ""},
    +		{"HKEY_LOCAL_MACHINE", Const, 0, ""},
    +		{"HKEY_PERFORMANCE_DATA", Const, 0, ""},
    +		{"HKEY_USERS", Const, 0, ""},
    +		{"HUPCL", Const, 0, ""},
    +		{"Handle", Type, 0, ""},
    +		{"Hostent", Type, 0, ""},
    +		{"Hostent.AddrList", Field, 0, ""},
    +		{"Hostent.AddrType", Field, 0, ""},
    +		{"Hostent.Aliases", Field, 0, ""},
    +		{"Hostent.Length", Field, 0, ""},
    +		{"Hostent.Name", Field, 0, ""},
    +		{"ICANON", Const, 0, ""},
    +		{"ICMP6_FILTER", Const, 2, ""},
    +		{"ICMPV6_FILTER", Const, 2, ""},
    +		{"ICMPv6Filter", Type, 2, ""},
    +		{"ICMPv6Filter.Data", Field, 2, ""},
    +		{"ICMPv6Filter.Filt", Field, 2, ""},
    +		{"ICRNL", Const, 0, ""},
    +		{"IEXTEN", Const, 0, ""},
    +		{"IFAN_ARRIVAL", Const, 1, ""},
    +		{"IFAN_DEPARTURE", Const, 1, ""},
    +		{"IFA_ADDRESS", Const, 0, ""},
    +		{"IFA_ANYCAST", Const, 0, ""},
    +		{"IFA_BROADCAST", Const, 0, ""},
    +		{"IFA_CACHEINFO", Const, 0, ""},
    +		{"IFA_F_DADFAILED", Const, 0, ""},
    +		{"IFA_F_DEPRECATED", Const, 0, ""},
    +		{"IFA_F_HOMEADDRESS", Const, 0, ""},
    +		{"IFA_F_NODAD", Const, 0, ""},
    +		{"IFA_F_OPTIMISTIC", Const, 0, ""},
    +		{"IFA_F_PERMANENT", Const, 0, ""},
    +		{"IFA_F_SECONDARY", Const, 0, ""},
    +		{"IFA_F_TEMPORARY", Const, 0, ""},
    +		{"IFA_F_TENTATIVE", Const, 0, ""},
    +		{"IFA_LABEL", Const, 0, ""},
    +		{"IFA_LOCAL", Const, 0, ""},
    +		{"IFA_MAX", Const, 0, ""},
    +		{"IFA_MULTICAST", Const, 0, ""},
    +		{"IFA_ROUTE", Const, 1, ""},
    +		{"IFA_UNSPEC", Const, 0, ""},
    +		{"IFF_ALLMULTI", Const, 0, ""},
    +		{"IFF_ALTPHYS", Const, 0, ""},
    +		{"IFF_AUTOMEDIA", Const, 0, ""},
    +		{"IFF_BROADCAST", Const, 0, ""},
    +		{"IFF_CANTCHANGE", Const, 0, ""},
    +		{"IFF_CANTCONFIG", Const, 1, ""},
    +		{"IFF_DEBUG", Const, 0, ""},
    +		{"IFF_DRV_OACTIVE", Const, 0, ""},
    +		{"IFF_DRV_RUNNING", Const, 0, ""},
    +		{"IFF_DYING", Const, 0, ""},
    +		{"IFF_DYNAMIC", Const, 0, ""},
    +		{"IFF_LINK0", Const, 0, ""},
    +		{"IFF_LINK1", Const, 0, ""},
    +		{"IFF_LINK2", Const, 0, ""},
    +		{"IFF_LOOPBACK", Const, 0, ""},
    +		{"IFF_MASTER", Const, 0, ""},
    +		{"IFF_MONITOR", Const, 0, ""},
    +		{"IFF_MULTICAST", Const, 0, ""},
    +		{"IFF_NOARP", Const, 0, ""},
    +		{"IFF_NOTRAILERS", Const, 0, ""},
    +		{"IFF_NO_PI", Const, 0, ""},
    +		{"IFF_OACTIVE", Const, 0, ""},
    +		{"IFF_ONE_QUEUE", Const, 0, ""},
    +		{"IFF_POINTOPOINT", Const, 0, ""},
    +		{"IFF_POINTTOPOINT", Const, 0, ""},
    +		{"IFF_PORTSEL", Const, 0, ""},
    +		{"IFF_PPROMISC", Const, 0, ""},
    +		{"IFF_PROMISC", Const, 0, ""},
    +		{"IFF_RENAMING", Const, 0, ""},
    +		{"IFF_RUNNING", Const, 0, ""},
    +		{"IFF_SIMPLEX", Const, 0, ""},
    +		{"IFF_SLAVE", Const, 0, ""},
    +		{"IFF_SMART", Const, 0, ""},
    +		{"IFF_STATICARP", Const, 0, ""},
    +		{"IFF_TAP", Const, 0, ""},
    +		{"IFF_TUN", Const, 0, ""},
    +		{"IFF_TUN_EXCL", Const, 0, ""},
    +		{"IFF_UP", Const, 0, ""},
    +		{"IFF_VNET_HDR", Const, 0, ""},
    +		{"IFLA_ADDRESS", Const, 0, ""},
    +		{"IFLA_BROADCAST", Const, 0, ""},
    +		{"IFLA_COST", Const, 0, ""},
    +		{"IFLA_IFALIAS", Const, 0, ""},
    +		{"IFLA_IFNAME", Const, 0, ""},
    +		{"IFLA_LINK", Const, 0, ""},
    +		{"IFLA_LINKINFO", Const, 0, ""},
    +		{"IFLA_LINKMODE", Const, 0, ""},
    +		{"IFLA_MAP", Const, 0, ""},
    +		{"IFLA_MASTER", Const, 0, ""},
    +		{"IFLA_MAX", Const, 0, ""},
    +		{"IFLA_MTU", Const, 0, ""},
    +		{"IFLA_NET_NS_PID", Const, 0, ""},
    +		{"IFLA_OPERSTATE", Const, 0, ""},
    +		{"IFLA_PRIORITY", Const, 0, ""},
    +		{"IFLA_PROTINFO", Const, 0, ""},
    +		{"IFLA_QDISC", Const, 0, ""},
    +		{"IFLA_STATS", Const, 0, ""},
    +		{"IFLA_TXQLEN", Const, 0, ""},
    +		{"IFLA_UNSPEC", Const, 0, ""},
    +		{"IFLA_WEIGHT", Const, 0, ""},
    +		{"IFLA_WIRELESS", Const, 0, ""},
    +		{"IFNAMSIZ", Const, 0, ""},
    +		{"IFT_1822", Const, 0, ""},
    +		{"IFT_A12MPPSWITCH", Const, 0, ""},
    +		{"IFT_AAL2", Const, 0, ""},
    +		{"IFT_AAL5", Const, 0, ""},
    +		{"IFT_ADSL", Const, 0, ""},
    +		{"IFT_AFLANE8023", Const, 0, ""},
    +		{"IFT_AFLANE8025", Const, 0, ""},
    +		{"IFT_ARAP", Const, 0, ""},
    +		{"IFT_ARCNET", Const, 0, ""},
    +		{"IFT_ARCNETPLUS", Const, 0, ""},
    +		{"IFT_ASYNC", Const, 0, ""},
    +		{"IFT_ATM", Const, 0, ""},
    +		{"IFT_ATMDXI", Const, 0, ""},
    +		{"IFT_ATMFUNI", Const, 0, ""},
    +		{"IFT_ATMIMA", Const, 0, ""},
    +		{"IFT_ATMLOGICAL", Const, 0, ""},
    +		{"IFT_ATMRADIO", Const, 0, ""},
    +		{"IFT_ATMSUBINTERFACE", Const, 0, ""},
    +		{"IFT_ATMVCIENDPT", Const, 0, ""},
    +		{"IFT_ATMVIRTUAL", Const, 0, ""},
    +		{"IFT_BGPPOLICYACCOUNTING", Const, 0, ""},
    +		{"IFT_BLUETOOTH", Const, 1, ""},
    +		{"IFT_BRIDGE", Const, 0, ""},
    +		{"IFT_BSC", Const, 0, ""},
    +		{"IFT_CARP", Const, 0, ""},
    +		{"IFT_CCTEMUL", Const, 0, ""},
    +		{"IFT_CELLULAR", Const, 0, ""},
    +		{"IFT_CEPT", Const, 0, ""},
    +		{"IFT_CES", Const, 0, ""},
    +		{"IFT_CHANNEL", Const, 0, ""},
    +		{"IFT_CNR", Const, 0, ""},
    +		{"IFT_COFFEE", Const, 0, ""},
    +		{"IFT_COMPOSITELINK", Const, 0, ""},
    +		{"IFT_DCN", Const, 0, ""},
    +		{"IFT_DIGITALPOWERLINE", Const, 0, ""},
    +		{"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""},
    +		{"IFT_DLSW", Const, 0, ""},
    +		{"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""},
    +		{"IFT_DOCSCABLEMACLAYER", Const, 0, ""},
    +		{"IFT_DOCSCABLEUPSTREAM", Const, 0, ""},
    +		{"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""},
    +		{"IFT_DS0", Const, 0, ""},
    +		{"IFT_DS0BUNDLE", Const, 0, ""},
    +		{"IFT_DS1FDL", Const, 0, ""},
    +		{"IFT_DS3", Const, 0, ""},
    +		{"IFT_DTM", Const, 0, ""},
    +		{"IFT_DUMMY", Const, 1, ""},
    +		{"IFT_DVBASILN", Const, 0, ""},
    +		{"IFT_DVBASIOUT", Const, 0, ""},
    +		{"IFT_DVBRCCDOWNSTREAM", Const, 0, ""},
    +		{"IFT_DVBRCCMACLAYER", Const, 0, ""},
    +		{"IFT_DVBRCCUPSTREAM", Const, 0, ""},
    +		{"IFT_ECONET", Const, 1, ""},
    +		{"IFT_ENC", Const, 0, ""},
    +		{"IFT_EON", Const, 0, ""},
    +		{"IFT_EPLRS", Const, 0, ""},
    +		{"IFT_ESCON", Const, 0, ""},
    +		{"IFT_ETHER", Const, 0, ""},
    +		{"IFT_FAITH", Const, 0, ""},
    +		{"IFT_FAST", Const, 0, ""},
    +		{"IFT_FASTETHER", Const, 0, ""},
    +		{"IFT_FASTETHERFX", Const, 0, ""},
    +		{"IFT_FDDI", Const, 0, ""},
    +		{"IFT_FIBRECHANNEL", Const, 0, ""},
    +		{"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""},
    +		{"IFT_FRAMERELAYMPI", Const, 0, ""},
    +		{"IFT_FRDLCIENDPT", Const, 0, ""},
    +		{"IFT_FRELAY", Const, 0, ""},
    +		{"IFT_FRELAYDCE", Const, 0, ""},
    +		{"IFT_FRF16MFRBUNDLE", Const, 0, ""},
    +		{"IFT_FRFORWARD", Const, 0, ""},
    +		{"IFT_G703AT2MB", Const, 0, ""},
    +		{"IFT_G703AT64K", Const, 0, ""},
    +		{"IFT_GIF", Const, 0, ""},
    +		{"IFT_GIGABITETHERNET", Const, 0, ""},
    +		{"IFT_GR303IDT", Const, 0, ""},
    +		{"IFT_GR303RDT", Const, 0, ""},
    +		{"IFT_H323GATEKEEPER", Const, 0, ""},
    +		{"IFT_H323PROXY", Const, 0, ""},
    +		{"IFT_HDH1822", Const, 0, ""},
    +		{"IFT_HDLC", Const, 0, ""},
    +		{"IFT_HDSL2", Const, 0, ""},
    +		{"IFT_HIPERLAN2", Const, 0, ""},
    +		{"IFT_HIPPI", Const, 0, ""},
    +		{"IFT_HIPPIINTERFACE", Const, 0, ""},
    +		{"IFT_HOSTPAD", Const, 0, ""},
    +		{"IFT_HSSI", Const, 0, ""},
    +		{"IFT_HY", Const, 0, ""},
    +		{"IFT_IBM370PARCHAN", Const, 0, ""},
    +		{"IFT_IDSL", Const, 0, ""},
    +		{"IFT_IEEE1394", Const, 0, ""},
    +		{"IFT_IEEE80211", Const, 0, ""},
    +		{"IFT_IEEE80212", Const, 0, ""},
    +		{"IFT_IEEE8023ADLAG", Const, 0, ""},
    +		{"IFT_IFGSN", Const, 0, ""},
    +		{"IFT_IMT", Const, 0, ""},
    +		{"IFT_INFINIBAND", Const, 1, ""},
    +		{"IFT_INTERLEAVE", Const, 0, ""},
    +		{"IFT_IP", Const, 0, ""},
    +		{"IFT_IPFORWARD", Const, 0, ""},
    +		{"IFT_IPOVERATM", Const, 0, ""},
    +		{"IFT_IPOVERCDLC", Const, 0, ""},
    +		{"IFT_IPOVERCLAW", Const, 0, ""},
    +		{"IFT_IPSWITCH", Const, 0, ""},
    +		{"IFT_IPXIP", Const, 0, ""},
    +		{"IFT_ISDN", Const, 0, ""},
    +		{"IFT_ISDNBASIC", Const, 0, ""},
    +		{"IFT_ISDNPRIMARY", Const, 0, ""},
    +		{"IFT_ISDNS", Const, 0, ""},
    +		{"IFT_ISDNU", Const, 0, ""},
    +		{"IFT_ISO88022LLC", Const, 0, ""},
    +		{"IFT_ISO88023", Const, 0, ""},
    +		{"IFT_ISO88024", Const, 0, ""},
    +		{"IFT_ISO88025", Const, 0, ""},
    +		{"IFT_ISO88025CRFPINT", Const, 0, ""},
    +		{"IFT_ISO88025DTR", Const, 0, ""},
    +		{"IFT_ISO88025FIBER", Const, 0, ""},
    +		{"IFT_ISO88026", Const, 0, ""},
    +		{"IFT_ISUP", Const, 0, ""},
    +		{"IFT_L2VLAN", Const, 0, ""},
    +		{"IFT_L3IPVLAN", Const, 0, ""},
    +		{"IFT_L3IPXVLAN", Const, 0, ""},
    +		{"IFT_LAPB", Const, 0, ""},
    +		{"IFT_LAPD", Const, 0, ""},
    +		{"IFT_LAPF", Const, 0, ""},
    +		{"IFT_LINEGROUP", Const, 1, ""},
    +		{"IFT_LOCALTALK", Const, 0, ""},
    +		{"IFT_LOOP", Const, 0, ""},
    +		{"IFT_MEDIAMAILOVERIP", Const, 0, ""},
    +		{"IFT_MFSIGLINK", Const, 0, ""},
    +		{"IFT_MIOX25", Const, 0, ""},
    +		{"IFT_MODEM", Const, 0, ""},
    +		{"IFT_MPC", Const, 0, ""},
    +		{"IFT_MPLS", Const, 0, ""},
    +		{"IFT_MPLSTUNNEL", Const, 0, ""},
    +		{"IFT_MSDSL", Const, 0, ""},
    +		{"IFT_MVL", Const, 0, ""},
    +		{"IFT_MYRINET", Const, 0, ""},
    +		{"IFT_NFAS", Const, 0, ""},
    +		{"IFT_NSIP", Const, 0, ""},
    +		{"IFT_OPTICALCHANNEL", Const, 0, ""},
    +		{"IFT_OPTICALTRANSPORT", Const, 0, ""},
    +		{"IFT_OTHER", Const, 0, ""},
    +		{"IFT_P10", Const, 0, ""},
    +		{"IFT_P80", Const, 0, ""},
    +		{"IFT_PARA", Const, 0, ""},
    +		{"IFT_PDP", Const, 0, ""},
    +		{"IFT_PFLOG", Const, 0, ""},
    +		{"IFT_PFLOW", Const, 1, ""},
    +		{"IFT_PFSYNC", Const, 0, ""},
    +		{"IFT_PLC", Const, 0, ""},
    +		{"IFT_PON155", Const, 1, ""},
    +		{"IFT_PON622", Const, 1, ""},
    +		{"IFT_POS", Const, 0, ""},
    +		{"IFT_PPP", Const, 0, ""},
    +		{"IFT_PPPMULTILINKBUNDLE", Const, 0, ""},
    +		{"IFT_PROPATM", Const, 1, ""},
    +		{"IFT_PROPBWAP2MP", Const, 0, ""},
    +		{"IFT_PROPCNLS", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""},
    +		{"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""},
    +		{"IFT_PROPMUX", Const, 0, ""},
    +		{"IFT_PROPVIRTUAL", Const, 0, ""},
    +		{"IFT_PROPWIRELESSP2P", Const, 0, ""},
    +		{"IFT_PTPSERIAL", Const, 0, ""},
    +		{"IFT_PVC", Const, 0, ""},
    +		{"IFT_Q2931", Const, 1, ""},
    +		{"IFT_QLLC", Const, 0, ""},
    +		{"IFT_RADIOMAC", Const, 0, ""},
    +		{"IFT_RADSL", Const, 0, ""},
    +		{"IFT_REACHDSL", Const, 0, ""},
    +		{"IFT_RFC1483", Const, 0, ""},
    +		{"IFT_RS232", Const, 0, ""},
    +		{"IFT_RSRB", Const, 0, ""},
    +		{"IFT_SDLC", Const, 0, ""},
    +		{"IFT_SDSL", Const, 0, ""},
    +		{"IFT_SHDSL", Const, 0, ""},
    +		{"IFT_SIP", Const, 0, ""},
    +		{"IFT_SIPSIG", Const, 1, ""},
    +		{"IFT_SIPTG", Const, 1, ""},
    +		{"IFT_SLIP", Const, 0, ""},
    +		{"IFT_SMDSDXI", Const, 0, ""},
    +		{"IFT_SMDSICIP", Const, 0, ""},
    +		{"IFT_SONET", Const, 0, ""},
    +		{"IFT_SONETOVERHEADCHANNEL", Const, 0, ""},
    +		{"IFT_SONETPATH", Const, 0, ""},
    +		{"IFT_SONETVT", Const, 0, ""},
    +		{"IFT_SRP", Const, 0, ""},
    +		{"IFT_SS7SIGLINK", Const, 0, ""},
    +		{"IFT_STACKTOSTACK", Const, 0, ""},
    +		{"IFT_STARLAN", Const, 0, ""},
    +		{"IFT_STF", Const, 0, ""},
    +		{"IFT_T1", Const, 0, ""},
    +		{"IFT_TDLC", Const, 0, ""},
    +		{"IFT_TELINK", Const, 1, ""},
    +		{"IFT_TERMPAD", Const, 0, ""},
    +		{"IFT_TR008", Const, 0, ""},
    +		{"IFT_TRANSPHDLC", Const, 0, ""},
    +		{"IFT_TUNNEL", Const, 0, ""},
    +		{"IFT_ULTRA", Const, 0, ""},
    +		{"IFT_USB", Const, 0, ""},
    +		{"IFT_V11", Const, 0, ""},
    +		{"IFT_V35", Const, 0, ""},
    +		{"IFT_V36", Const, 0, ""},
    +		{"IFT_V37", Const, 0, ""},
    +		{"IFT_VDSL", Const, 0, ""},
    +		{"IFT_VIRTUALIPADDRESS", Const, 0, ""},
    +		{"IFT_VIRTUALTG", Const, 1, ""},
    +		{"IFT_VOICEDID", Const, 1, ""},
    +		{"IFT_VOICEEM", Const, 0, ""},
    +		{"IFT_VOICEEMFGD", Const, 1, ""},
    +		{"IFT_VOICEENCAP", Const, 0, ""},
    +		{"IFT_VOICEFGDEANA", Const, 1, ""},
    +		{"IFT_VOICEFXO", Const, 0, ""},
    +		{"IFT_VOICEFXS", Const, 0, ""},
    +		{"IFT_VOICEOVERATM", Const, 0, ""},
    +		{"IFT_VOICEOVERCABLE", Const, 1, ""},
    +		{"IFT_VOICEOVERFRAMERELAY", Const, 0, ""},
    +		{"IFT_VOICEOVERIP", Const, 0, ""},
    +		{"IFT_X213", Const, 0, ""},
    +		{"IFT_X25", Const, 0, ""},
    +		{"IFT_X25DDN", Const, 0, ""},
    +		{"IFT_X25HUNTGROUP", Const, 0, ""},
    +		{"IFT_X25MLP", Const, 0, ""},
    +		{"IFT_X25PLE", Const, 0, ""},
    +		{"IFT_XETHER", Const, 0, ""},
    +		{"IGNBRK", Const, 0, ""},
    +		{"IGNCR", Const, 0, ""},
    +		{"IGNORE", Const, 0, ""},
    +		{"IGNPAR", Const, 0, ""},
    +		{"IMAXBEL", Const, 0, ""},
    +		{"INFINITE", Const, 0, ""},
    +		{"INLCR", Const, 0, ""},
    +		{"INPCK", Const, 0, ""},
    +		{"INVALID_FILE_ATTRIBUTES", Const, 0, ""},
    +		{"IN_ACCESS", Const, 0, ""},
    +		{"IN_ALL_EVENTS", Const, 0, ""},
    +		{"IN_ATTRIB", Const, 0, ""},
    +		{"IN_CLASSA_HOST", Const, 0, ""},
    +		{"IN_CLASSA_MAX", Const, 0, ""},
    +		{"IN_CLASSA_NET", Const, 0, ""},
    +		{"IN_CLASSA_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSB_HOST", Const, 0, ""},
    +		{"IN_CLASSB_MAX", Const, 0, ""},
    +		{"IN_CLASSB_NET", Const, 0, ""},
    +		{"IN_CLASSB_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSC_HOST", Const, 0, ""},
    +		{"IN_CLASSC_NET", Const, 0, ""},
    +		{"IN_CLASSC_NSHIFT", Const, 0, ""},
    +		{"IN_CLASSD_HOST", Const, 0, ""},
    +		{"IN_CLASSD_NET", Const, 0, ""},
    +		{"IN_CLASSD_NSHIFT", Const, 0, ""},
    +		{"IN_CLOEXEC", Const, 0, ""},
    +		{"IN_CLOSE", Const, 0, ""},
    +		{"IN_CLOSE_NOWRITE", Const, 0, ""},
    +		{"IN_CLOSE_WRITE", Const, 0, ""},
    +		{"IN_CREATE", Const, 0, ""},
    +		{"IN_DELETE", Const, 0, ""},
    +		{"IN_DELETE_SELF", Const, 0, ""},
    +		{"IN_DONT_FOLLOW", Const, 0, ""},
    +		{"IN_EXCL_UNLINK", Const, 0, ""},
    +		{"IN_IGNORED", Const, 0, ""},
    +		{"IN_ISDIR", Const, 0, ""},
    +		{"IN_LINKLOCALNETNUM", Const, 0, ""},
    +		{"IN_LOOPBACKNET", Const, 0, ""},
    +		{"IN_MASK_ADD", Const, 0, ""},
    +		{"IN_MODIFY", Const, 0, ""},
    +		{"IN_MOVE", Const, 0, ""},
    +		{"IN_MOVED_FROM", Const, 0, ""},
    +		{"IN_MOVED_TO", Const, 0, ""},
    +		{"IN_MOVE_SELF", Const, 0, ""},
    +		{"IN_NONBLOCK", Const, 0, ""},
    +		{"IN_ONESHOT", Const, 0, ""},
    +		{"IN_ONLYDIR", Const, 0, ""},
    +		{"IN_OPEN", Const, 0, ""},
    +		{"IN_Q_OVERFLOW", Const, 0, ""},
    +		{"IN_RFC3021_HOST", Const, 1, ""},
    +		{"IN_RFC3021_MASK", Const, 1, ""},
    +		{"IN_RFC3021_NET", Const, 1, ""},
    +		{"IN_RFC3021_NSHIFT", Const, 1, ""},
    +		{"IN_UNMOUNT", Const, 0, ""},
    +		{"IOC_IN", Const, 1, ""},
    +		{"IOC_INOUT", Const, 1, ""},
    +		{"IOC_OUT", Const, 1, ""},
    +		{"IOC_VENDOR", Const, 3, ""},
    +		{"IOC_WS2", Const, 1, ""},
    +		{"IO_REPARSE_TAG_SYMLINK", Const, 4, ""},
    +		{"IPMreq", Type, 0, ""},
    +		{"IPMreq.Interface", Field, 0, ""},
    +		{"IPMreq.Multiaddr", Field, 0, ""},
    +		{"IPMreqn", Type, 0, ""},
    +		{"IPMreqn.Address", Field, 0, ""},
    +		{"IPMreqn.Ifindex", Field, 0, ""},
    +		{"IPMreqn.Multiaddr", Field, 0, ""},
    +		{"IPPROTO_3PC", Const, 0, ""},
    +		{"IPPROTO_ADFS", Const, 0, ""},
    +		{"IPPROTO_AH", Const, 0, ""},
    +		{"IPPROTO_AHIP", Const, 0, ""},
    +		{"IPPROTO_APES", Const, 0, ""},
    +		{"IPPROTO_ARGUS", Const, 0, ""},
    +		{"IPPROTO_AX25", Const, 0, ""},
    +		{"IPPROTO_BHA", Const, 0, ""},
    +		{"IPPROTO_BLT", Const, 0, ""},
    +		{"IPPROTO_BRSATMON", Const, 0, ""},
    +		{"IPPROTO_CARP", Const, 0, ""},
    +		{"IPPROTO_CFTP", Const, 0, ""},
    +		{"IPPROTO_CHAOS", Const, 0, ""},
    +		{"IPPROTO_CMTP", Const, 0, ""},
    +		{"IPPROTO_COMP", Const, 0, ""},
    +		{"IPPROTO_CPHB", Const, 0, ""},
    +		{"IPPROTO_CPNX", Const, 0, ""},
    +		{"IPPROTO_DCCP", Const, 0, ""},
    +		{"IPPROTO_DDP", Const, 0, ""},
    +		{"IPPROTO_DGP", Const, 0, ""},
    +		{"IPPROTO_DIVERT", Const, 0, ""},
    +		{"IPPROTO_DIVERT_INIT", Const, 3, ""},
    +		{"IPPROTO_DIVERT_RESP", Const, 3, ""},
    +		{"IPPROTO_DONE", Const, 0, ""},
    +		{"IPPROTO_DSTOPTS", Const, 0, ""},
    +		{"IPPROTO_EGP", Const, 0, ""},
    +		{"IPPROTO_EMCON", Const, 0, ""},
    +		{"IPPROTO_ENCAP", Const, 0, ""},
    +		{"IPPROTO_EON", Const, 0, ""},
    +		{"IPPROTO_ESP", Const, 0, ""},
    +		{"IPPROTO_ETHERIP", Const, 0, ""},
    +		{"IPPROTO_FRAGMENT", Const, 0, ""},
    +		{"IPPROTO_GGP", Const, 0, ""},
    +		{"IPPROTO_GMTP", Const, 0, ""},
    +		{"IPPROTO_GRE", Const, 0, ""},
    +		{"IPPROTO_HELLO", Const, 0, ""},
    +		{"IPPROTO_HMP", Const, 0, ""},
    +		{"IPPROTO_HOPOPTS", Const, 0, ""},
    +		{"IPPROTO_ICMP", Const, 0, ""},
    +		{"IPPROTO_ICMPV6", Const, 0, ""},
    +		{"IPPROTO_IDP", Const, 0, ""},
    +		{"IPPROTO_IDPR", Const, 0, ""},
    +		{"IPPROTO_IDRP", Const, 0, ""},
    +		{"IPPROTO_IGMP", Const, 0, ""},
    +		{"IPPROTO_IGP", Const, 0, ""},
    +		{"IPPROTO_IGRP", Const, 0, ""},
    +		{"IPPROTO_IL", Const, 0, ""},
    +		{"IPPROTO_INLSP", Const, 0, ""},
    +		{"IPPROTO_INP", Const, 0, ""},
    +		{"IPPROTO_IP", Const, 0, ""},
    +		{"IPPROTO_IPCOMP", Const, 0, ""},
    +		{"IPPROTO_IPCV", Const, 0, ""},
    +		{"IPPROTO_IPEIP", Const, 0, ""},
    +		{"IPPROTO_IPIP", Const, 0, ""},
    +		{"IPPROTO_IPPC", Const, 0, ""},
    +		{"IPPROTO_IPV4", Const, 0, ""},
    +		{"IPPROTO_IPV6", Const, 0, ""},
    +		{"IPPROTO_IPV6_ICMP", Const, 1, ""},
    +		{"IPPROTO_IRTP", Const, 0, ""},
    +		{"IPPROTO_KRYPTOLAN", Const, 0, ""},
    +		{"IPPROTO_LARP", Const, 0, ""},
    +		{"IPPROTO_LEAF1", Const, 0, ""},
    +		{"IPPROTO_LEAF2", Const, 0, ""},
    +		{"IPPROTO_MAX", Const, 0, ""},
    +		{"IPPROTO_MAXID", Const, 0, ""},
    +		{"IPPROTO_MEAS", Const, 0, ""},
    +		{"IPPROTO_MH", Const, 1, ""},
    +		{"IPPROTO_MHRP", Const, 0, ""},
    +		{"IPPROTO_MICP", Const, 0, ""},
    +		{"IPPROTO_MOBILE", Const, 0, ""},
    +		{"IPPROTO_MPLS", Const, 1, ""},
    +		{"IPPROTO_MTP", Const, 0, ""},
    +		{"IPPROTO_MUX", Const, 0, ""},
    +		{"IPPROTO_ND", Const, 0, ""},
    +		{"IPPROTO_NHRP", Const, 0, ""},
    +		{"IPPROTO_NONE", Const, 0, ""},
    +		{"IPPROTO_NSP", Const, 0, ""},
    +		{"IPPROTO_NVPII", Const, 0, ""},
    +		{"IPPROTO_OLD_DIVERT", Const, 0, ""},
    +		{"IPPROTO_OSPFIGP", Const, 0, ""},
    +		{"IPPROTO_PFSYNC", Const, 0, ""},
    +		{"IPPROTO_PGM", Const, 0, ""},
    +		{"IPPROTO_PIGP", Const, 0, ""},
    +		{"IPPROTO_PIM", Const, 0, ""},
    +		{"IPPROTO_PRM", Const, 0, ""},
    +		{"IPPROTO_PUP", Const, 0, ""},
    +		{"IPPROTO_PVP", Const, 0, ""},
    +		{"IPPROTO_RAW", Const, 0, ""},
    +		{"IPPROTO_RCCMON", Const, 0, ""},
    +		{"IPPROTO_RDP", Const, 0, ""},
    +		{"IPPROTO_ROUTING", Const, 0, ""},
    +		{"IPPROTO_RSVP", Const, 0, ""},
    +		{"IPPROTO_RVD", Const, 0, ""},
    +		{"IPPROTO_SATEXPAK", Const, 0, ""},
    +		{"IPPROTO_SATMON", Const, 0, ""},
    +		{"IPPROTO_SCCSP", Const, 0, ""},
    +		{"IPPROTO_SCTP", Const, 0, ""},
    +		{"IPPROTO_SDRP", Const, 0, ""},
    +		{"IPPROTO_SEND", Const, 1, ""},
    +		{"IPPROTO_SEP", Const, 0, ""},
    +		{"IPPROTO_SKIP", Const, 0, ""},
    +		{"IPPROTO_SPACER", Const, 0, ""},
    +		{"IPPROTO_SRPC", Const, 0, ""},
    +		{"IPPROTO_ST", Const, 0, ""},
    +		{"IPPROTO_SVMTP", Const, 0, ""},
    +		{"IPPROTO_SWIPE", Const, 0, ""},
    +		{"IPPROTO_TCF", Const, 0, ""},
    +		{"IPPROTO_TCP", Const, 0, ""},
    +		{"IPPROTO_TLSP", Const, 0, ""},
    +		{"IPPROTO_TP", Const, 0, ""},
    +		{"IPPROTO_TPXX", Const, 0, ""},
    +		{"IPPROTO_TRUNK1", Const, 0, ""},
    +		{"IPPROTO_TRUNK2", Const, 0, ""},
    +		{"IPPROTO_TTP", Const, 0, ""},
    +		{"IPPROTO_UDP", Const, 0, ""},
    +		{"IPPROTO_UDPLITE", Const, 0, ""},
    +		{"IPPROTO_VINES", Const, 0, ""},
    +		{"IPPROTO_VISA", Const, 0, ""},
    +		{"IPPROTO_VMTP", Const, 0, ""},
    +		{"IPPROTO_VRRP", Const, 1, ""},
    +		{"IPPROTO_WBEXPAK", Const, 0, ""},
    +		{"IPPROTO_WBMON", Const, 0, ""},
    +		{"IPPROTO_WSN", Const, 0, ""},
    +		{"IPPROTO_XNET", Const, 0, ""},
    +		{"IPPROTO_XTP", Const, 0, ""},
    +		{"IPV6_2292DSTOPTS", Const, 0, ""},
    +		{"IPV6_2292HOPLIMIT", Const, 0, ""},
    +		{"IPV6_2292HOPOPTS", Const, 0, ""},
    +		{"IPV6_2292NEXTHOP", Const, 0, ""},
    +		{"IPV6_2292PKTINFO", Const, 0, ""},
    +		{"IPV6_2292PKTOPTIONS", Const, 0, ""},
    +		{"IPV6_2292RTHDR", Const, 0, ""},
    +		{"IPV6_ADDRFORM", Const, 0, ""},
    +		{"IPV6_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"IPV6_AUTHHDR", Const, 0, ""},
    +		{"IPV6_AUTH_LEVEL", Const, 1, ""},
    +		{"IPV6_AUTOFLOWLABEL", Const, 0, ""},
    +		{"IPV6_BINDANY", Const, 0, ""},
    +		{"IPV6_BINDV6ONLY", Const, 0, ""},
    +		{"IPV6_BOUND_IF", Const, 0, ""},
    +		{"IPV6_CHECKSUM", Const, 0, ""},
    +		{"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""},
    +		{"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
    +		{"IPV6_DEFHLIM", Const, 0, ""},
    +		{"IPV6_DONTFRAG", Const, 0, ""},
    +		{"IPV6_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"IPV6_DSTOPTS", Const, 0, ""},
    +		{"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""},
    +		{"IPV6_ESP_TRANS_LEVEL", Const, 1, ""},
    +		{"IPV6_FAITH", Const, 0, ""},
    +		{"IPV6_FLOWINFO_MASK", Const, 0, ""},
    +		{"IPV6_FLOWLABEL_MASK", Const, 0, ""},
    +		{"IPV6_FRAGTTL", Const, 0, ""},
    +		{"IPV6_FW_ADD", Const, 0, ""},
    +		{"IPV6_FW_DEL", Const, 0, ""},
    +		{"IPV6_FW_FLUSH", Const, 0, ""},
    +		{"IPV6_FW_GET", Const, 0, ""},
    +		{"IPV6_FW_ZERO", Const, 0, ""},
    +		{"IPV6_HLIMDEC", Const, 0, ""},
    +		{"IPV6_HOPLIMIT", Const, 0, ""},
    +		{"IPV6_HOPOPTS", Const, 0, ""},
    +		{"IPV6_IPCOMP_LEVEL", Const, 1, ""},
    +		{"IPV6_IPSEC_POLICY", Const, 0, ""},
    +		{"IPV6_JOIN_ANYCAST", Const, 0, ""},
    +		{"IPV6_JOIN_GROUP", Const, 0, ""},
    +		{"IPV6_LEAVE_ANYCAST", Const, 0, ""},
    +		{"IPV6_LEAVE_GROUP", Const, 0, ""},
    +		{"IPV6_MAXHLIM", Const, 0, ""},
    +		{"IPV6_MAXOPTHDR", Const, 0, ""},
    +		{"IPV6_MAXPACKET", Const, 0, ""},
    +		{"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""},
    +		{"IPV6_MAX_MEMBERSHIPS", Const, 0, ""},
    +		{"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""},
    +		{"IPV6_MIN_MEMBERSHIPS", Const, 0, ""},
    +		{"IPV6_MMTU", Const, 0, ""},
    +		{"IPV6_MSFILTER", Const, 0, ""},
    +		{"IPV6_MTU", Const, 0, ""},
    +		{"IPV6_MTU_DISCOVER", Const, 0, ""},
    +		{"IPV6_MULTICAST_HOPS", Const, 0, ""},
    +		{"IPV6_MULTICAST_IF", Const, 0, ""},
    +		{"IPV6_MULTICAST_LOOP", Const, 0, ""},
    +		{"IPV6_NEXTHOP", Const, 0, ""},
    +		{"IPV6_OPTIONS", Const, 1, ""},
    +		{"IPV6_PATHMTU", Const, 0, ""},
    +		{"IPV6_PIPEX", Const, 1, ""},
    +		{"IPV6_PKTINFO", Const, 0, ""},
    +		{"IPV6_PMTUDISC_DO", Const, 0, ""},
    +		{"IPV6_PMTUDISC_DONT", Const, 0, ""},
    +		{"IPV6_PMTUDISC_PROBE", Const, 0, ""},
    +		{"IPV6_PMTUDISC_WANT", Const, 0, ""},
    +		{"IPV6_PORTRANGE", Const, 0, ""},
    +		{"IPV6_PORTRANGE_DEFAULT", Const, 0, ""},
    +		{"IPV6_PORTRANGE_HIGH", Const, 0, ""},
    +		{"IPV6_PORTRANGE_LOW", Const, 0, ""},
    +		{"IPV6_PREFER_TEMPADDR", Const, 0, ""},
    +		{"IPV6_RECVDSTOPTS", Const, 0, ""},
    +		{"IPV6_RECVDSTPORT", Const, 3, ""},
    +		{"IPV6_RECVERR", Const, 0, ""},
    +		{"IPV6_RECVHOPLIMIT", Const, 0, ""},
    +		{"IPV6_RECVHOPOPTS", Const, 0, ""},
    +		{"IPV6_RECVPATHMTU", Const, 0, ""},
    +		{"IPV6_RECVPKTINFO", Const, 0, ""},
    +		{"IPV6_RECVRTHDR", Const, 0, ""},
    +		{"IPV6_RECVTCLASS", Const, 0, ""},
    +		{"IPV6_ROUTER_ALERT", Const, 0, ""},
    +		{"IPV6_RTABLE", Const, 1, ""},
    +		{"IPV6_RTHDR", Const, 0, ""},
    +		{"IPV6_RTHDRDSTOPTS", Const, 0, ""},
    +		{"IPV6_RTHDR_LOOSE", Const, 0, ""},
    +		{"IPV6_RTHDR_STRICT", Const, 0, ""},
    +		{"IPV6_RTHDR_TYPE_0", Const, 0, ""},
    +		{"IPV6_RXDSTOPTS", Const, 0, ""},
    +		{"IPV6_RXHOPOPTS", Const, 0, ""},
    +		{"IPV6_SOCKOPT_RESERVED1", Const, 0, ""},
    +		{"IPV6_TCLASS", Const, 0, ""},
    +		{"IPV6_UNICAST_HOPS", Const, 0, ""},
    +		{"IPV6_USE_MIN_MTU", Const, 0, ""},
    +		{"IPV6_V6ONLY", Const, 0, ""},
    +		{"IPV6_VERSION", Const, 0, ""},
    +		{"IPV6_VERSION_MASK", Const, 0, ""},
    +		{"IPV6_XFRM_POLICY", Const, 0, ""},
    +		{"IP_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""},
    +		{"IP_AUTH_LEVEL", Const, 1, ""},
    +		{"IP_BINDANY", Const, 0, ""},
    +		{"IP_BLOCK_SOURCE", Const, 0, ""},
    +		{"IP_BOUND_IF", Const, 0, ""},
    +		{"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
    +		{"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""},
    +		{"IP_DF", Const, 0, ""},
    +		{"IP_DIVERTFL", Const, 3, ""},
    +		{"IP_DONTFRAG", Const, 0, ""},
    +		{"IP_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""},
    +		{"IP_DUMMYNET3", Const, 0, ""},
    +		{"IP_DUMMYNET_CONFIGURE", Const, 0, ""},
    +		{"IP_DUMMYNET_DEL", Const, 0, ""},
    +		{"IP_DUMMYNET_FLUSH", Const, 0, ""},
    +		{"IP_DUMMYNET_GET", Const, 0, ""},
    +		{"IP_EF", Const, 1, ""},
    +		{"IP_ERRORMTU", Const, 1, ""},
    +		{"IP_ESP_NETWORK_LEVEL", Const, 1, ""},
    +		{"IP_ESP_TRANS_LEVEL", Const, 1, ""},
    +		{"IP_FAITH", Const, 0, ""},
    +		{"IP_FREEBIND", Const, 0, ""},
    +		{"IP_FW3", Const, 0, ""},
    +		{"IP_FW_ADD", Const, 0, ""},
    +		{"IP_FW_DEL", Const, 0, ""},
    +		{"IP_FW_FLUSH", Const, 0, ""},
    +		{"IP_FW_GET", Const, 0, ""},
    +		{"IP_FW_NAT_CFG", Const, 0, ""},
    +		{"IP_FW_NAT_DEL", Const, 0, ""},
    +		{"IP_FW_NAT_GET_CONFIG", Const, 0, ""},
    +		{"IP_FW_NAT_GET_LOG", Const, 0, ""},
    +		{"IP_FW_RESETLOG", Const, 0, ""},
    +		{"IP_FW_TABLE_ADD", Const, 0, ""},
    +		{"IP_FW_TABLE_DEL", Const, 0, ""},
    +		{"IP_FW_TABLE_FLUSH", Const, 0, ""},
    +		{"IP_FW_TABLE_GETSIZE", Const, 0, ""},
    +		{"IP_FW_TABLE_LIST", Const, 0, ""},
    +		{"IP_FW_ZERO", Const, 0, ""},
    +		{"IP_HDRINCL", Const, 0, ""},
    +		{"IP_IPCOMP_LEVEL", Const, 1, ""},
    +		{"IP_IPSECFLOWINFO", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_AUTH", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_CRED", Const, 1, ""},
    +		{"IP_IPSEC_LOCAL_ID", Const, 1, ""},
    +		{"IP_IPSEC_POLICY", Const, 0, ""},
    +		{"IP_IPSEC_REMOTE_AUTH", Const, 1, ""},
    +		{"IP_IPSEC_REMOTE_CRED", Const, 1, ""},
    +		{"IP_IPSEC_REMOTE_ID", Const, 1, ""},
    +		{"IP_MAXPACKET", Const, 0, ""},
    +		{"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""},
    +		{"IP_MAX_MEMBERSHIPS", Const, 0, ""},
    +		{"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""},
    +		{"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""},
    +		{"IP_MAX_SOURCE_FILTER", Const, 0, ""},
    +		{"IP_MF", Const, 0, ""},
    +		{"IP_MINFRAGSIZE", Const, 1, ""},
    +		{"IP_MINTTL", Const, 0, ""},
    +		{"IP_MIN_MEMBERSHIPS", Const, 0, ""},
    +		{"IP_MSFILTER", Const, 0, ""},
    +		{"IP_MSS", Const, 0, ""},
    +		{"IP_MTU", Const, 0, ""},
    +		{"IP_MTU_DISCOVER", Const, 0, ""},
    +		{"IP_MULTICAST_IF", Const, 0, ""},
    +		{"IP_MULTICAST_IFINDEX", Const, 0, ""},
    +		{"IP_MULTICAST_LOOP", Const, 0, ""},
    +		{"IP_MULTICAST_TTL", Const, 0, ""},
    +		{"IP_MULTICAST_VIF", Const, 0, ""},
    +		{"IP_NAT__XXX", Const, 0, ""},
    +		{"IP_OFFMASK", Const, 0, ""},
    +		{"IP_OLD_FW_ADD", Const, 0, ""},
    +		{"IP_OLD_FW_DEL", Const, 0, ""},
    +		{"IP_OLD_FW_FLUSH", Const, 0, ""},
    +		{"IP_OLD_FW_GET", Const, 0, ""},
    +		{"IP_OLD_FW_RESETLOG", Const, 0, ""},
    +		{"IP_OLD_FW_ZERO", Const, 0, ""},
    +		{"IP_ONESBCAST", Const, 0, ""},
    +		{"IP_OPTIONS", Const, 0, ""},
    +		{"IP_ORIGDSTADDR", Const, 0, ""},
    +		{"IP_PASSSEC", Const, 0, ""},
    +		{"IP_PIPEX", Const, 1, ""},
    +		{"IP_PKTINFO", Const, 0, ""},
    +		{"IP_PKTOPTIONS", Const, 0, ""},
    +		{"IP_PMTUDISC", Const, 0, ""},
    +		{"IP_PMTUDISC_DO", Const, 0, ""},
    +		{"IP_PMTUDISC_DONT", Const, 0, ""},
    +		{"IP_PMTUDISC_PROBE", Const, 0, ""},
    +		{"IP_PMTUDISC_WANT", Const, 0, ""},
    +		{"IP_PORTRANGE", Const, 0, ""},
    +		{"IP_PORTRANGE_DEFAULT", Const, 0, ""},
    +		{"IP_PORTRANGE_HIGH", Const, 0, ""},
    +		{"IP_PORTRANGE_LOW", Const, 0, ""},
    +		{"IP_RECVDSTADDR", Const, 0, ""},
    +		{"IP_RECVDSTPORT", Const, 1, ""},
    +		{"IP_RECVERR", Const, 0, ""},
    +		{"IP_RECVIF", Const, 0, ""},
    +		{"IP_RECVOPTS", Const, 0, ""},
    +		{"IP_RECVORIGDSTADDR", Const, 0, ""},
    +		{"IP_RECVPKTINFO", Const, 0, ""},
    +		{"IP_RECVRETOPTS", Const, 0, ""},
    +		{"IP_RECVRTABLE", Const, 1, ""},
    +		{"IP_RECVTOS", Const, 0, ""},
    +		{"IP_RECVTTL", Const, 0, ""},
    +		{"IP_RETOPTS", Const, 0, ""},
    +		{"IP_RF", Const, 0, ""},
    +		{"IP_ROUTER_ALERT", Const, 0, ""},
    +		{"IP_RSVP_OFF", Const, 0, ""},
    +		{"IP_RSVP_ON", Const, 0, ""},
    +		{"IP_RSVP_VIF_OFF", Const, 0, ""},
    +		{"IP_RSVP_VIF_ON", Const, 0, ""},
    +		{"IP_RTABLE", Const, 1, ""},
    +		{"IP_SENDSRCADDR", Const, 0, ""},
    +		{"IP_STRIPHDR", Const, 0, ""},
    +		{"IP_TOS", Const, 0, ""},
    +		{"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""},
    +		{"IP_TRANSPARENT", Const, 0, ""},
    +		{"IP_TTL", Const, 0, ""},
    +		{"IP_UNBLOCK_SOURCE", Const, 0, ""},
    +		{"IP_XFRM_POLICY", Const, 0, ""},
    +		{"IPv6MTUInfo", Type, 2, ""},
    +		{"IPv6MTUInfo.Addr", Field, 2, ""},
    +		{"IPv6MTUInfo.Mtu", Field, 2, ""},
    +		{"IPv6Mreq", Type, 0, ""},
    +		{"IPv6Mreq.Interface", Field, 0, ""},
    +		{"IPv6Mreq.Multiaddr", Field, 0, ""},
    +		{"ISIG", Const, 0, ""},
    +		{"ISTRIP", Const, 0, ""},
    +		{"IUCLC", Const, 0, ""},
    +		{"IUTF8", Const, 0, ""},
    +		{"IXANY", Const, 0, ""},
    +		{"IXOFF", Const, 0, ""},
    +		{"IXON", Const, 0, ""},
    +		{"IfAddrmsg", Type, 0, ""},
    +		{"IfAddrmsg.Family", Field, 0, ""},
    +		{"IfAddrmsg.Flags", Field, 0, ""},
    +		{"IfAddrmsg.Index", Field, 0, ""},
    +		{"IfAddrmsg.Prefixlen", Field, 0, ""},
    +		{"IfAddrmsg.Scope", Field, 0, ""},
    +		{"IfAnnounceMsghdr", Type, 1, ""},
    +		{"IfAnnounceMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfAnnounceMsghdr.Index", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Msglen", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Name", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Type", Field, 1, ""},
    +		{"IfAnnounceMsghdr.Version", Field, 1, ""},
    +		{"IfAnnounceMsghdr.What", Field, 1, ""},
    +		{"IfData", Type, 0, ""},
    +		{"IfData.Addrlen", Field, 0, ""},
    +		{"IfData.Baudrate", Field, 0, ""},
    +		{"IfData.Capabilities", Field, 2, ""},
    +		{"IfData.Collisions", Field, 0, ""},
    +		{"IfData.Datalen", Field, 0, ""},
    +		{"IfData.Epoch", Field, 0, ""},
    +		{"IfData.Hdrlen", Field, 0, ""},
    +		{"IfData.Hwassist", Field, 0, ""},
    +		{"IfData.Ibytes", Field, 0, ""},
    +		{"IfData.Ierrors", Field, 0, ""},
    +		{"IfData.Imcasts", Field, 0, ""},
    +		{"IfData.Ipackets", Field, 0, ""},
    +		{"IfData.Iqdrops", Field, 0, ""},
    +		{"IfData.Lastchange", Field, 0, ""},
    +		{"IfData.Link_state", Field, 0, ""},
    +		{"IfData.Mclpool", Field, 2, ""},
    +		{"IfData.Metric", Field, 0, ""},
    +		{"IfData.Mtu", Field, 0, ""},
    +		{"IfData.Noproto", Field, 0, ""},
    +		{"IfData.Obytes", Field, 0, ""},
    +		{"IfData.Oerrors", Field, 0, ""},
    +		{"IfData.Omcasts", Field, 0, ""},
    +		{"IfData.Opackets", Field, 0, ""},
    +		{"IfData.Pad", Field, 2, ""},
    +		{"IfData.Pad_cgo_0", Field, 2, ""},
    +		{"IfData.Pad_cgo_1", Field, 2, ""},
    +		{"IfData.Physical", Field, 0, ""},
    +		{"IfData.Recvquota", Field, 0, ""},
    +		{"IfData.Recvtiming", Field, 0, ""},
    +		{"IfData.Reserved1", Field, 0, ""},
    +		{"IfData.Reserved2", Field, 0, ""},
    +		{"IfData.Spare_char1", Field, 0, ""},
    +		{"IfData.Spare_char2", Field, 0, ""},
    +		{"IfData.Type", Field, 0, ""},
    +		{"IfData.Typelen", Field, 0, ""},
    +		{"IfData.Unused1", Field, 0, ""},
    +		{"IfData.Unused2", Field, 0, ""},
    +		{"IfData.Xmitquota", Field, 0, ""},
    +		{"IfData.Xmittiming", Field, 0, ""},
    +		{"IfInfomsg", Type, 0, ""},
    +		{"IfInfomsg.Change", Field, 0, ""},
    +		{"IfInfomsg.Family", Field, 0, ""},
    +		{"IfInfomsg.Flags", Field, 0, ""},
    +		{"IfInfomsg.Index", Field, 0, ""},
    +		{"IfInfomsg.Type", Field, 0, ""},
    +		{"IfInfomsg.X__ifi_pad", Field, 0, ""},
    +		{"IfMsghdr", Type, 0, ""},
    +		{"IfMsghdr.Addrs", Field, 0, ""},
    +		{"IfMsghdr.Data", Field, 0, ""},
    +		{"IfMsghdr.Flags", Field, 0, ""},
    +		{"IfMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfMsghdr.Index", Field, 0, ""},
    +		{"IfMsghdr.Msglen", Field, 0, ""},
    +		{"IfMsghdr.Pad1", Field, 2, ""},
    +		{"IfMsghdr.Pad2", Field, 2, ""},
    +		{"IfMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfMsghdr.Pad_cgo_1", Field, 2, ""},
    +		{"IfMsghdr.Tableid", Field, 2, ""},
    +		{"IfMsghdr.Type", Field, 0, ""},
    +		{"IfMsghdr.Version", Field, 0, ""},
    +		{"IfMsghdr.Xflags", Field, 2, ""},
    +		{"IfaMsghdr", Type, 0, ""},
    +		{"IfaMsghdr.Addrs", Field, 0, ""},
    +		{"IfaMsghdr.Flags", Field, 0, ""},
    +		{"IfaMsghdr.Hdrlen", Field, 2, ""},
    +		{"IfaMsghdr.Index", Field, 0, ""},
    +		{"IfaMsghdr.Metric", Field, 0, ""},
    +		{"IfaMsghdr.Msglen", Field, 0, ""},
    +		{"IfaMsghdr.Pad1", Field, 2, ""},
    +		{"IfaMsghdr.Pad2", Field, 2, ""},
    +		{"IfaMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfaMsghdr.Tableid", Field, 2, ""},
    +		{"IfaMsghdr.Type", Field, 0, ""},
    +		{"IfaMsghdr.Version", Field, 0, ""},
    +		{"IfmaMsghdr", Type, 0, ""},
    +		{"IfmaMsghdr.Addrs", Field, 0, ""},
    +		{"IfmaMsghdr.Flags", Field, 0, ""},
    +		{"IfmaMsghdr.Index", Field, 0, ""},
    +		{"IfmaMsghdr.Msglen", Field, 0, ""},
    +		{"IfmaMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"IfmaMsghdr.Type", Field, 0, ""},
    +		{"IfmaMsghdr.Version", Field, 0, ""},
    +		{"IfmaMsghdr2", Type, 0, ""},
    +		{"IfmaMsghdr2.Addrs", Field, 0, ""},
    +		{"IfmaMsghdr2.Flags", Field, 0, ""},
    +		{"IfmaMsghdr2.Index", Field, 0, ""},
    +		{"IfmaMsghdr2.Msglen", Field, 0, ""},
    +		{"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""},
    +		{"IfmaMsghdr2.Refcount", Field, 0, ""},
    +		{"IfmaMsghdr2.Type", Field, 0, ""},
    +		{"IfmaMsghdr2.Version", Field, 0, ""},
    +		{"ImplementsGetwd", Const, 0, ""},
    +		{"Inet4Pktinfo", Type, 0, ""},
    +		{"Inet4Pktinfo.Addr", Field, 0, ""},
    +		{"Inet4Pktinfo.Ifindex", Field, 0, ""},
    +		{"Inet4Pktinfo.Spec_dst", Field, 0, ""},
    +		{"Inet6Pktinfo", Type, 0, ""},
    +		{"Inet6Pktinfo.Addr", Field, 0, ""},
    +		{"Inet6Pktinfo.Ifindex", Field, 0, ""},
    +		{"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"},
    +		{"InotifyEvent", Type, 0, ""},
    +		{"InotifyEvent.Cookie", Field, 0, ""},
    +		{"InotifyEvent.Len", Field, 0, ""},
    +		{"InotifyEvent.Mask", Field, 0, ""},
    +		{"InotifyEvent.Name", Field, 0, ""},
    +		{"InotifyEvent.Wd", Field, 0, ""},
    +		{"InotifyInit", Func, 0, "func() (fd int, err error)"},
    +		{"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"},
    +		{"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"},
    +		{"InterfaceAddrMessage", Type, 0, ""},
    +		{"InterfaceAddrMessage.Data", Field, 0, ""},
    +		{"InterfaceAddrMessage.Header", Field, 0, ""},
    +		{"InterfaceAnnounceMessage", Type, 1, ""},
    +		{"InterfaceAnnounceMessage.Header", Field, 1, ""},
    +		{"InterfaceInfo", Type, 0, ""},
    +		{"InterfaceInfo.Address", Field, 0, ""},
    +		{"InterfaceInfo.BroadcastAddress", Field, 0, ""},
    +		{"InterfaceInfo.Flags", Field, 0, ""},
    +		{"InterfaceInfo.Netmask", Field, 0, ""},
    +		{"InterfaceMessage", Type, 0, ""},
    +		{"InterfaceMessage.Data", Field, 0, ""},
    +		{"InterfaceMessage.Header", Field, 0, ""},
    +		{"InterfaceMulticastAddrMessage", Type, 0, ""},
    +		{"InterfaceMulticastAddrMessage.Data", Field, 0, ""},
    +		{"InterfaceMulticastAddrMessage.Header", Field, 0, ""},
    +		{"InvalidHandle", Const, 0, ""},
    +		{"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"},
    +		{"Iopl", Func, 0, "func(level int) (err error)"},
    +		{"Iovec", Type, 0, ""},
    +		{"Iovec.Base", Field, 0, ""},
    +		{"Iovec.Len", Field, 0, ""},
    +		{"IpAdapterInfo", Type, 0, ""},
    +		{"IpAdapterInfo.AdapterName", Field, 0, ""},
    +		{"IpAdapterInfo.Address", Field, 0, ""},
    +		{"IpAdapterInfo.AddressLength", Field, 0, ""},
    +		{"IpAdapterInfo.ComboIndex", Field, 0, ""},
    +		{"IpAdapterInfo.CurrentIpAddress", Field, 0, ""},
    +		{"IpAdapterInfo.Description", Field, 0, ""},
    +		{"IpAdapterInfo.DhcpEnabled", Field, 0, ""},
    +		{"IpAdapterInfo.DhcpServer", Field, 0, ""},
    +		{"IpAdapterInfo.GatewayList", Field, 0, ""},
    +		{"IpAdapterInfo.HaveWins", Field, 0, ""},
    +		{"IpAdapterInfo.Index", Field, 0, ""},
    +		{"IpAdapterInfo.IpAddressList", Field, 0, ""},
    +		{"IpAdapterInfo.LeaseExpires", Field, 0, ""},
    +		{"IpAdapterInfo.LeaseObtained", Field, 0, ""},
    +		{"IpAdapterInfo.Next", Field, 0, ""},
    +		{"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""},
    +		{"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""},
    +		{"IpAdapterInfo.Type", Field, 0, ""},
    +		{"IpAddrString", Type, 0, ""},
    +		{"IpAddrString.Context", Field, 0, ""},
    +		{"IpAddrString.IpAddress", Field, 0, ""},
    +		{"IpAddrString.IpMask", Field, 0, ""},
    +		{"IpAddrString.Next", Field, 0, ""},
    +		{"IpAddressString", Type, 0, ""},
    +		{"IpAddressString.String", Field, 0, ""},
    +		{"IpMaskString", Type, 0, ""},
    +		{"IpMaskString.String", Field, 2, ""},
    +		{"Issetugid", Func, 0, ""},
    +		{"KEY_ALL_ACCESS", Const, 0, ""},
    +		{"KEY_CREATE_LINK", Const, 0, ""},
    +		{"KEY_CREATE_SUB_KEY", Const, 0, ""},
    +		{"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""},
    +		{"KEY_EXECUTE", Const, 0, ""},
    +		{"KEY_NOTIFY", Const, 0, ""},
    +		{"KEY_QUERY_VALUE", Const, 0, ""},
    +		{"KEY_READ", Const, 0, ""},
    +		{"KEY_SET_VALUE", Const, 0, ""},
    +		{"KEY_WOW64_32KEY", Const, 0, ""},
    +		{"KEY_WOW64_64KEY", Const, 0, ""},
    +		{"KEY_WRITE", Const, 0, ""},
    +		{"Kevent", Func, 0, ""},
    +		{"Kevent_t", Type, 0, ""},
    +		{"Kevent_t.Data", Field, 0, ""},
    +		{"Kevent_t.Fflags", Field, 0, ""},
    +		{"Kevent_t.Filter", Field, 0, ""},
    +		{"Kevent_t.Flags", Field, 0, ""},
    +		{"Kevent_t.Ident", Field, 0, ""},
    +		{"Kevent_t.Pad_cgo_0", Field, 2, ""},
    +		{"Kevent_t.Udata", Field, 0, ""},
    +		{"Kill", Func, 0, "func(pid int, sig Signal) (err error)"},
    +		{"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"},
    +		{"Kqueue", Func, 0, ""},
    +		{"LANG_ENGLISH", Const, 0, ""},
    +		{"LAYERED_PROTOCOL", Const, 2, ""},
    +		{"LCNT_OVERLOAD_FLUSH", Const, 1, ""},
    +		{"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_HALT", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_RESTART", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""},
    +		{"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""},
    +		{"LINUX_REBOOT_MAGIC1", Const, 0, ""},
    +		{"LINUX_REBOOT_MAGIC2", Const, 0, ""},
    +		{"LOCK_EX", Const, 0, ""},
    +		{"LOCK_NB", Const, 0, ""},
    +		{"LOCK_SH", Const, 0, ""},
    +		{"LOCK_UN", Const, 0, ""},
    +		{"LazyDLL", Type, 0, ""},
    +		{"LazyDLL.Name", Field, 0, ""},
    +		{"LazyProc", Type, 0, ""},
    +		{"LazyProc.Name", Field, 0, ""},
    +		{"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"},
    +		{"Linger", Type, 0, ""},
    +		{"Linger.Linger", Field, 0, ""},
    +		{"Linger.Onoff", Field, 0, ""},
    +		{"Link", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Listen", Func, 0, "func(s int, n int) (err error)"},
    +		{"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"},
    +		{"LoadCancelIoEx", Func, 1, ""},
    +		{"LoadConnectEx", Func, 1, ""},
    +		{"LoadCreateSymbolicLink", Func, 4, ""},
    +		{"LoadDLL", Func, 0, ""},
    +		{"LoadGetAddrInfo", Func, 1, ""},
    +		{"LoadLibrary", Func, 0, ""},
    +		{"LoadSetFileCompletionNotificationModes", Func, 2, ""},
    +		{"LocalFree", Func, 0, ""},
    +		{"Log2phys_t", Type, 0, ""},
    +		{"Log2phys_t.Contigbytes", Field, 0, ""},
    +		{"Log2phys_t.Devoffset", Field, 0, ""},
    +		{"Log2phys_t.Flags", Field, 0, ""},
    +		{"LookupAccountName", Func, 0, ""},
    +		{"LookupAccountSid", Func, 0, ""},
    +		{"LookupSID", Func, 0, ""},
    +		{"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"},
    +		{"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"},
    +		{"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"},
    +		{"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
    +		{"MADV_AUTOSYNC", Const, 1, ""},
    +		{"MADV_CAN_REUSE", Const, 0, ""},
    +		{"MADV_CORE", Const, 1, ""},
    +		{"MADV_DOFORK", Const, 0, ""},
    +		{"MADV_DONTFORK", Const, 0, ""},
    +		{"MADV_DONTNEED", Const, 0, ""},
    +		{"MADV_FREE", Const, 0, ""},
    +		{"MADV_FREE_REUSABLE", Const, 0, ""},
    +		{"MADV_FREE_REUSE", Const, 0, ""},
    +		{"MADV_HUGEPAGE", Const, 0, ""},
    +		{"MADV_HWPOISON", Const, 0, ""},
    +		{"MADV_MERGEABLE", Const, 0, ""},
    +		{"MADV_NOCORE", Const, 1, ""},
    +		{"MADV_NOHUGEPAGE", Const, 0, ""},
    +		{"MADV_NORMAL", Const, 0, ""},
    +		{"MADV_NOSYNC", Const, 1, ""},
    +		{"MADV_PROTECT", Const, 1, ""},
    +		{"MADV_RANDOM", Const, 0, ""},
    +		{"MADV_REMOVE", Const, 0, ""},
    +		{"MADV_SEQUENTIAL", Const, 0, ""},
    +		{"MADV_SPACEAVAIL", Const, 3, ""},
    +		{"MADV_UNMERGEABLE", Const, 0, ""},
    +		{"MADV_WILLNEED", Const, 0, ""},
    +		{"MADV_ZERO_WIRED_PAGES", Const, 0, ""},
    +		{"MAP_32BIT", Const, 0, ""},
    +		{"MAP_ALIGNED_SUPER", Const, 3, ""},
    +		{"MAP_ALIGNMENT_16MB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_1TB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_256TB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_4GB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_64KB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_64PB", Const, 3, ""},
    +		{"MAP_ALIGNMENT_MASK", Const, 3, ""},
    +		{"MAP_ALIGNMENT_SHIFT", Const, 3, ""},
    +		{"MAP_ANON", Const, 0, ""},
    +		{"MAP_ANONYMOUS", Const, 0, ""},
    +		{"MAP_COPY", Const, 0, ""},
    +		{"MAP_DENYWRITE", Const, 0, ""},
    +		{"MAP_EXECUTABLE", Const, 0, ""},
    +		{"MAP_FILE", Const, 0, ""},
    +		{"MAP_FIXED", Const, 0, ""},
    +		{"MAP_FLAGMASK", Const, 3, ""},
    +		{"MAP_GROWSDOWN", Const, 0, ""},
    +		{"MAP_HASSEMAPHORE", Const, 0, ""},
    +		{"MAP_HUGETLB", Const, 0, ""},
    +		{"MAP_INHERIT", Const, 3, ""},
    +		{"MAP_INHERIT_COPY", Const, 3, ""},
    +		{"MAP_INHERIT_DEFAULT", Const, 3, ""},
    +		{"MAP_INHERIT_DONATE_COPY", Const, 3, ""},
    +		{"MAP_INHERIT_NONE", Const, 3, ""},
    +		{"MAP_INHERIT_SHARE", Const, 3, ""},
    +		{"MAP_JIT", Const, 0, ""},
    +		{"MAP_LOCKED", Const, 0, ""},
    +		{"MAP_NOCACHE", Const, 0, ""},
    +		{"MAP_NOCORE", Const, 1, ""},
    +		{"MAP_NOEXTEND", Const, 0, ""},
    +		{"MAP_NONBLOCK", Const, 0, ""},
    +		{"MAP_NORESERVE", Const, 0, ""},
    +		{"MAP_NOSYNC", Const, 1, ""},
    +		{"MAP_POPULATE", Const, 0, ""},
    +		{"MAP_PREFAULT_READ", Const, 1, ""},
    +		{"MAP_PRIVATE", Const, 0, ""},
    +		{"MAP_RENAME", Const, 0, ""},
    +		{"MAP_RESERVED0080", Const, 0, ""},
    +		{"MAP_RESERVED0100", Const, 1, ""},
    +		{"MAP_SHARED", Const, 0, ""},
    +		{"MAP_STACK", Const, 0, ""},
    +		{"MAP_TRYFIXED", Const, 3, ""},
    +		{"MAP_TYPE", Const, 0, ""},
    +		{"MAP_WIRED", Const, 3, ""},
    +		{"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""},
    +		{"MAXLEN_IFDESCR", Const, 0, ""},
    +		{"MAXLEN_PHYSADDR", Const, 0, ""},
    +		{"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""},
    +		{"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""},
    +		{"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""},
    +		{"MAX_COMPUTERNAME_LENGTH", Const, 0, ""},
    +		{"MAX_INTERFACE_NAME_LEN", Const, 0, ""},
    +		{"MAX_LONG_PATH", Const, 0, ""},
    +		{"MAX_PATH", Const, 0, ""},
    +		{"MAX_PROTOCOL_CHAIN", Const, 2, ""},
    +		{"MCL_CURRENT", Const, 0, ""},
    +		{"MCL_FUTURE", Const, 0, ""},
    +		{"MNT_DETACH", Const, 0, ""},
    +		{"MNT_EXPIRE", Const, 0, ""},
    +		{"MNT_FORCE", Const, 0, ""},
    +		{"MSG_BCAST", Const, 1, ""},
    +		{"MSG_CMSG_CLOEXEC", Const, 0, ""},
    +		{"MSG_COMPAT", Const, 0, ""},
    +		{"MSG_CONFIRM", Const, 0, ""},
    +		{"MSG_CONTROLMBUF", Const, 1, ""},
    +		{"MSG_CTRUNC", Const, 0, ""},
    +		{"MSG_DONTROUTE", Const, 0, ""},
    +		{"MSG_DONTWAIT", Const, 0, ""},
    +		{"MSG_EOF", Const, 0, ""},
    +		{"MSG_EOR", Const, 0, ""},
    +		{"MSG_ERRQUEUE", Const, 0, ""},
    +		{"MSG_FASTOPEN", Const, 1, ""},
    +		{"MSG_FIN", Const, 0, ""},
    +		{"MSG_FLUSH", Const, 0, ""},
    +		{"MSG_HAVEMORE", Const, 0, ""},
    +		{"MSG_HOLD", Const, 0, ""},
    +		{"MSG_IOVUSRSPACE", Const, 1, ""},
    +		{"MSG_LENUSRSPACE", Const, 1, ""},
    +		{"MSG_MCAST", Const, 1, ""},
    +		{"MSG_MORE", Const, 0, ""},
    +		{"MSG_NAMEMBUF", Const, 1, ""},
    +		{"MSG_NBIO", Const, 0, ""},
    +		{"MSG_NEEDSA", Const, 0, ""},
    +		{"MSG_NOSIGNAL", Const, 0, ""},
    +		{"MSG_NOTIFICATION", Const, 0, ""},
    +		{"MSG_OOB", Const, 0, ""},
    +		{"MSG_PEEK", Const, 0, ""},
    +		{"MSG_PROXY", Const, 0, ""},
    +		{"MSG_RCVMORE", Const, 0, ""},
    +		{"MSG_RST", Const, 0, ""},
    +		{"MSG_SEND", Const, 0, ""},
    +		{"MSG_SYN", Const, 0, ""},
    +		{"MSG_TRUNC", Const, 0, ""},
    +		{"MSG_TRYHARD", Const, 0, ""},
    +		{"MSG_USERFLAGS", Const, 1, ""},
    +		{"MSG_WAITALL", Const, 0, ""},
    +		{"MSG_WAITFORONE", Const, 0, ""},
    +		{"MSG_WAITSTREAM", Const, 0, ""},
    +		{"MS_ACTIVE", Const, 0, ""},
    +		{"MS_ASYNC", Const, 0, ""},
    +		{"MS_BIND", Const, 0, ""},
    +		{"MS_DEACTIVATE", Const, 0, ""},
    +		{"MS_DIRSYNC", Const, 0, ""},
    +		{"MS_INVALIDATE", Const, 0, ""},
    +		{"MS_I_VERSION", Const, 0, ""},
    +		{"MS_KERNMOUNT", Const, 0, ""},
    +		{"MS_KILLPAGES", Const, 0, ""},
    +		{"MS_MANDLOCK", Const, 0, ""},
    +		{"MS_MGC_MSK", Const, 0, ""},
    +		{"MS_MGC_VAL", Const, 0, ""},
    +		{"MS_MOVE", Const, 0, ""},
    +		{"MS_NOATIME", Const, 0, ""},
    +		{"MS_NODEV", Const, 0, ""},
    +		{"MS_NODIRATIME", Const, 0, ""},
    +		{"MS_NOEXEC", Const, 0, ""},
    +		{"MS_NOSUID", Const, 0, ""},
    +		{"MS_NOUSER", Const, 0, ""},
    +		{"MS_POSIXACL", Const, 0, ""},
    +		{"MS_PRIVATE", Const, 0, ""},
    +		{"MS_RDONLY", Const, 0, ""},
    +		{"MS_REC", Const, 0, ""},
    +		{"MS_RELATIME", Const, 0, ""},
    +		{"MS_REMOUNT", Const, 0, ""},
    +		{"MS_RMT_MASK", Const, 0, ""},
    +		{"MS_SHARED", Const, 0, ""},
    +		{"MS_SILENT", Const, 0, ""},
    +		{"MS_SLAVE", Const, 0, ""},
    +		{"MS_STRICTATIME", Const, 0, ""},
    +		{"MS_SYNC", Const, 0, ""},
    +		{"MS_SYNCHRONOUS", Const, 0, ""},
    +		{"MS_UNBINDABLE", Const, 0, ""},
    +		{"Madvise", Func, 0, "func(b []byte, advice int) (err error)"},
    +		{"MapViewOfFile", Func, 0, ""},
    +		{"MaxTokenInfoClass", Const, 0, ""},
    +		{"Mclpool", Type, 2, ""},
    +		{"Mclpool.Alive", Field, 2, ""},
    +		{"Mclpool.Cwm", Field, 2, ""},
    +		{"Mclpool.Grown", Field, 2, ""},
    +		{"Mclpool.Hwm", Field, 2, ""},
    +		{"Mclpool.Lwm", Field, 2, ""},
    +		{"MibIfRow", Type, 0, ""},
    +		{"MibIfRow.AdminStatus", Field, 0, ""},
    +		{"MibIfRow.Descr", Field, 0, ""},
    +		{"MibIfRow.DescrLen", Field, 0, ""},
    +		{"MibIfRow.InDiscards", Field, 0, ""},
    +		{"MibIfRow.InErrors", Field, 0, ""},
    +		{"MibIfRow.InNUcastPkts", Field, 0, ""},
    +		{"MibIfRow.InOctets", Field, 0, ""},
    +		{"MibIfRow.InUcastPkts", Field, 0, ""},
    +		{"MibIfRow.InUnknownProtos", Field, 0, ""},
    +		{"MibIfRow.Index", Field, 0, ""},
    +		{"MibIfRow.LastChange", Field, 0, ""},
    +		{"MibIfRow.Mtu", Field, 0, ""},
    +		{"MibIfRow.Name", Field, 0, ""},
    +		{"MibIfRow.OperStatus", Field, 0, ""},
    +		{"MibIfRow.OutDiscards", Field, 0, ""},
    +		{"MibIfRow.OutErrors", Field, 0, ""},
    +		{"MibIfRow.OutNUcastPkts", Field, 0, ""},
    +		{"MibIfRow.OutOctets", Field, 0, ""},
    +		{"MibIfRow.OutQLen", Field, 0, ""},
    +		{"MibIfRow.OutUcastPkts", Field, 0, ""},
    +		{"MibIfRow.PhysAddr", Field, 0, ""},
    +		{"MibIfRow.PhysAddrLen", Field, 0, ""},
    +		{"MibIfRow.Speed", Field, 0, ""},
    +		{"MibIfRow.Type", Field, 0, ""},
    +		{"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"},
    +		{"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"},
    +		{"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"},
    +		{"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"},
    +		{"Mlock", Func, 0, "func(b []byte) (err error)"},
    +		{"Mlockall", Func, 0, "func(flags int) (err error)"},
    +		{"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"},
    +		{"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"},
    +		{"MoveFile", Func, 0, ""},
    +		{"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"},
    +		{"Msghdr", Type, 0, ""},
    +		{"Msghdr.Control", Field, 0, ""},
    +		{"Msghdr.Controllen", Field, 0, ""},
    +		{"Msghdr.Flags", Field, 0, ""},
    +		{"Msghdr.Iov", Field, 0, ""},
    +		{"Msghdr.Iovlen", Field, 0, ""},
    +		{"Msghdr.Name", Field, 0, ""},
    +		{"Msghdr.Namelen", Field, 0, ""},
    +		{"Msghdr.Pad_cgo_0", Field, 0, ""},
    +		{"Msghdr.Pad_cgo_1", Field, 0, ""},
    +		{"Munlock", Func, 0, "func(b []byte) (err error)"},
    +		{"Munlockall", Func, 0, "func() (err error)"},
    +		{"Munmap", Func, 0, "func(b []byte) (err error)"},
    +		{"MustLoadDLL", Func, 0, ""},
    +		{"NAME_MAX", Const, 0, ""},
    +		{"NETLINK_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"NETLINK_AUDIT", Const, 0, ""},
    +		{"NETLINK_BROADCAST_ERROR", Const, 0, ""},
    +		{"NETLINK_CONNECTOR", Const, 0, ""},
    +		{"NETLINK_DNRTMSG", Const, 0, ""},
    +		{"NETLINK_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"NETLINK_ECRYPTFS", Const, 0, ""},
    +		{"NETLINK_FIB_LOOKUP", Const, 0, ""},
    +		{"NETLINK_FIREWALL", Const, 0, ""},
    +		{"NETLINK_GENERIC", Const, 0, ""},
    +		{"NETLINK_INET_DIAG", Const, 0, ""},
    +		{"NETLINK_IP6_FW", Const, 0, ""},
    +		{"NETLINK_ISCSI", Const, 0, ""},
    +		{"NETLINK_KOBJECT_UEVENT", Const, 0, ""},
    +		{"NETLINK_NETFILTER", Const, 0, ""},
    +		{"NETLINK_NFLOG", Const, 0, ""},
    +		{"NETLINK_NO_ENOBUFS", Const, 0, ""},
    +		{"NETLINK_PKTINFO", Const, 0, ""},
    +		{"NETLINK_RDMA", Const, 0, ""},
    +		{"NETLINK_ROUTE", Const, 0, ""},
    +		{"NETLINK_SCSITRANSPORT", Const, 0, ""},
    +		{"NETLINK_SELINUX", Const, 0, ""},
    +		{"NETLINK_UNUSED", Const, 0, ""},
    +		{"NETLINK_USERSOCK", Const, 0, ""},
    +		{"NETLINK_XFRM", Const, 0, ""},
    +		{"NET_RT_DUMP", Const, 0, ""},
    +		{"NET_RT_DUMP2", Const, 0, ""},
    +		{"NET_RT_FLAGS", Const, 0, ""},
    +		{"NET_RT_IFLIST", Const, 0, ""},
    +		{"NET_RT_IFLIST2", Const, 0, ""},
    +		{"NET_RT_IFLISTL", Const, 1, ""},
    +		{"NET_RT_IFMALIST", Const, 0, ""},
    +		{"NET_RT_MAXID", Const, 0, ""},
    +		{"NET_RT_OIFLIST", Const, 1, ""},
    +		{"NET_RT_OOIFLIST", Const, 1, ""},
    +		{"NET_RT_STAT", Const, 0, ""},
    +		{"NET_RT_STATS", Const, 1, ""},
    +		{"NET_RT_TABLE", Const, 1, ""},
    +		{"NET_RT_TRASH", Const, 0, ""},
    +		{"NLA_ALIGNTO", Const, 0, ""},
    +		{"NLA_F_NESTED", Const, 0, ""},
    +		{"NLA_F_NET_BYTEORDER", Const, 0, ""},
    +		{"NLA_HDRLEN", Const, 0, ""},
    +		{"NLMSG_ALIGNTO", Const, 0, ""},
    +		{"NLMSG_DONE", Const, 0, ""},
    +		{"NLMSG_ERROR", Const, 0, ""},
    +		{"NLMSG_HDRLEN", Const, 0, ""},
    +		{"NLMSG_MIN_TYPE", Const, 0, ""},
    +		{"NLMSG_NOOP", Const, 0, ""},
    +		{"NLMSG_OVERRUN", Const, 0, ""},
    +		{"NLM_F_ACK", Const, 0, ""},
    +		{"NLM_F_APPEND", Const, 0, ""},
    +		{"NLM_F_ATOMIC", Const, 0, ""},
    +		{"NLM_F_CREATE", Const, 0, ""},
    +		{"NLM_F_DUMP", Const, 0, ""},
    +		{"NLM_F_ECHO", Const, 0, ""},
    +		{"NLM_F_EXCL", Const, 0, ""},
    +		{"NLM_F_MATCH", Const, 0, ""},
    +		{"NLM_F_MULTI", Const, 0, ""},
    +		{"NLM_F_REPLACE", Const, 0, ""},
    +		{"NLM_F_REQUEST", Const, 0, ""},
    +		{"NLM_F_ROOT", Const, 0, ""},
    +		{"NOFLSH", Const, 0, ""},
    +		{"NOTE_ABSOLUTE", Const, 0, ""},
    +		{"NOTE_ATTRIB", Const, 0, ""},
    +		{"NOTE_BACKGROUND", Const, 16, ""},
    +		{"NOTE_CHILD", Const, 0, ""},
    +		{"NOTE_CRITICAL", Const, 16, ""},
    +		{"NOTE_DELETE", Const, 0, ""},
    +		{"NOTE_EOF", Const, 1, ""},
    +		{"NOTE_EXEC", Const, 0, ""},
    +		{"NOTE_EXIT", Const, 0, ""},
    +		{"NOTE_EXITSTATUS", Const, 0, ""},
    +		{"NOTE_EXIT_CSERROR", Const, 16, ""},
    +		{"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""},
    +		{"NOTE_EXIT_DETAIL", Const, 16, ""},
    +		{"NOTE_EXIT_DETAIL_MASK", Const, 16, ""},
    +		{"NOTE_EXIT_MEMORY", Const, 16, ""},
    +		{"NOTE_EXIT_REPARENTED", Const, 16, ""},
    +		{"NOTE_EXTEND", Const, 0, ""},
    +		{"NOTE_FFAND", Const, 0, ""},
    +		{"NOTE_FFCOPY", Const, 0, ""},
    +		{"NOTE_FFCTRLMASK", Const, 0, ""},
    +		{"NOTE_FFLAGSMASK", Const, 0, ""},
    +		{"NOTE_FFNOP", Const, 0, ""},
    +		{"NOTE_FFOR", Const, 0, ""},
    +		{"NOTE_FORK", Const, 0, ""},
    +		{"NOTE_LEEWAY", Const, 16, ""},
    +		{"NOTE_LINK", Const, 0, ""},
    +		{"NOTE_LOWAT", Const, 0, ""},
    +		{"NOTE_NONE", Const, 0, ""},
    +		{"NOTE_NSECONDS", Const, 0, ""},
    +		{"NOTE_PCTRLMASK", Const, 0, ""},
    +		{"NOTE_PDATAMASK", Const, 0, ""},
    +		{"NOTE_REAP", Const, 0, ""},
    +		{"NOTE_RENAME", Const, 0, ""},
    +		{"NOTE_RESOURCEEND", Const, 0, ""},
    +		{"NOTE_REVOKE", Const, 0, ""},
    +		{"NOTE_SECONDS", Const, 0, ""},
    +		{"NOTE_SIGNAL", Const, 0, ""},
    +		{"NOTE_TRACK", Const, 0, ""},
    +		{"NOTE_TRACKERR", Const, 0, ""},
    +		{"NOTE_TRIGGER", Const, 0, ""},
    +		{"NOTE_TRUNCATE", Const, 1, ""},
    +		{"NOTE_USECONDS", Const, 0, ""},
    +		{"NOTE_VM_ERROR", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""},
    +		{"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""},
    +		{"NOTE_WRITE", Const, 0, ""},
    +		{"NameCanonical", Const, 0, ""},
    +		{"NameCanonicalEx", Const, 0, ""},
    +		{"NameDisplay", Const, 0, ""},
    +		{"NameDnsDomain", Const, 0, ""},
    +		{"NameFullyQualifiedDN", Const, 0, ""},
    +		{"NameSamCompatible", Const, 0, ""},
    +		{"NameServicePrincipal", Const, 0, ""},
    +		{"NameUniqueId", Const, 0, ""},
    +		{"NameUnknown", Const, 0, ""},
    +		{"NameUserPrincipal", Const, 0, ""},
    +		{"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"},
    +		{"NetApiBufferFree", Func, 0, ""},
    +		{"NetGetJoinInformation", Func, 2, ""},
    +		{"NetSetupDomainName", Const, 2, ""},
    +		{"NetSetupUnjoined", Const, 2, ""},
    +		{"NetSetupUnknownStatus", Const, 2, ""},
    +		{"NetSetupWorkgroupName", Const, 2, ""},
    +		{"NetUserGetInfo", Func, 0, ""},
    +		{"NetlinkMessage", Type, 0, ""},
    +		{"NetlinkMessage.Data", Field, 0, ""},
    +		{"NetlinkMessage.Header", Field, 0, ""},
    +		{"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"},
    +		{"NetlinkRouteAttr", Type, 0, ""},
    +		{"NetlinkRouteAttr.Attr", Field, 0, ""},
    +		{"NetlinkRouteAttr.Value", Field, 0, ""},
    +		{"NetlinkRouteRequest", Type, 0, ""},
    +		{"NetlinkRouteRequest.Data", Field, 0, ""},
    +		{"NetlinkRouteRequest.Header", Field, 0, ""},
    +		{"NewCallback", Func, 0, ""},
    +		{"NewCallbackCDecl", Func, 3, ""},
    +		{"NewLazyDLL", Func, 0, ""},
    +		{"NlAttr", Type, 0, ""},
    +		{"NlAttr.Len", Field, 0, ""},
    +		{"NlAttr.Type", Field, 0, ""},
    +		{"NlMsgerr", Type, 0, ""},
    +		{"NlMsgerr.Error", Field, 0, ""},
    +		{"NlMsgerr.Msg", Field, 0, ""},
    +		{"NlMsghdr", Type, 0, ""},
    +		{"NlMsghdr.Flags", Field, 0, ""},
    +		{"NlMsghdr.Len", Field, 0, ""},
    +		{"NlMsghdr.Pid", Field, 0, ""},
    +		{"NlMsghdr.Seq", Field, 0, ""},
    +		{"NlMsghdr.Type", Field, 0, ""},
    +		{"NsecToFiletime", Func, 0, ""},
    +		{"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"},
    +		{"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"},
    +		{"Ntohs", Func, 0, ""},
    +		{"OCRNL", Const, 0, ""},
    +		{"OFDEL", Const, 0, ""},
    +		{"OFILL", Const, 0, ""},
    +		{"OFIOGETBMAP", Const, 1, ""},
    +		{"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""},
    +		{"OID_SERVER_GATED_CRYPTO", Var, 0, ""},
    +		{"OID_SGC_NETSCAPE", Var, 0, ""},
    +		{"OLCUC", Const, 0, ""},
    +		{"ONLCR", Const, 0, ""},
    +		{"ONLRET", Const, 0, ""},
    +		{"ONOCR", Const, 0, ""},
    +		{"ONOEOT", Const, 1, ""},
    +		{"OPEN_ALWAYS", Const, 0, ""},
    +		{"OPEN_EXISTING", Const, 0, ""},
    +		{"OPOST", Const, 0, ""},
    +		{"O_ACCMODE", Const, 0, ""},
    +		{"O_ALERT", Const, 0, ""},
    +		{"O_ALT_IO", Const, 1, ""},
    +		{"O_APPEND", Const, 0, ""},
    +		{"O_ASYNC", Const, 0, ""},
    +		{"O_CLOEXEC", Const, 0, ""},
    +		{"O_CREAT", Const, 0, ""},
    +		{"O_DIRECT", Const, 0, ""},
    +		{"O_DIRECTORY", Const, 0, ""},
    +		{"O_DP_GETRAWENCRYPTED", Const, 16, ""},
    +		{"O_DSYNC", Const, 0, ""},
    +		{"O_EVTONLY", Const, 0, ""},
    +		{"O_EXCL", Const, 0, ""},
    +		{"O_EXEC", Const, 0, ""},
    +		{"O_EXLOCK", Const, 0, ""},
    +		{"O_FSYNC", Const, 0, ""},
    +		{"O_LARGEFILE", Const, 0, ""},
    +		{"O_NDELAY", Const, 0, ""},
    +		{"O_NOATIME", Const, 0, ""},
    +		{"O_NOCTTY", Const, 0, ""},
    +		{"O_NOFOLLOW", Const, 0, ""},
    +		{"O_NONBLOCK", Const, 0, ""},
    +		{"O_NOSIGPIPE", Const, 1, ""},
    +		{"O_POPUP", Const, 0, ""},
    +		{"O_RDONLY", Const, 0, ""},
    +		{"O_RDWR", Const, 0, ""},
    +		{"O_RSYNC", Const, 0, ""},
    +		{"O_SHLOCK", Const, 0, ""},
    +		{"O_SYMLINK", Const, 0, ""},
    +		{"O_SYNC", Const, 0, ""},
    +		{"O_TRUNC", Const, 0, ""},
    +		{"O_TTY_INIT", Const, 0, ""},
    +		{"O_WRONLY", Const, 0, ""},
    +		{"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"},
    +		{"OpenCurrentProcessToken", Func, 0, ""},
    +		{"OpenProcess", Func, 0, ""},
    +		{"OpenProcessToken", Func, 0, ""},
    +		{"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"},
    +		{"Overlapped", Type, 0, ""},
    +		{"Overlapped.HEvent", Field, 0, ""},
    +		{"Overlapped.Internal", Field, 0, ""},
    +		{"Overlapped.InternalHigh", Field, 0, ""},
    +		{"Overlapped.Offset", Field, 0, ""},
    +		{"Overlapped.OffsetHigh", Field, 0, ""},
    +		{"PACKET_ADD_MEMBERSHIP", Const, 0, ""},
    +		{"PACKET_BROADCAST", Const, 0, ""},
    +		{"PACKET_DROP_MEMBERSHIP", Const, 0, ""},
    +		{"PACKET_FASTROUTE", Const, 0, ""},
    +		{"PACKET_HOST", Const, 0, ""},
    +		{"PACKET_LOOPBACK", Const, 0, ""},
    +		{"PACKET_MR_ALLMULTI", Const, 0, ""},
    +		{"PACKET_MR_MULTICAST", Const, 0, ""},
    +		{"PACKET_MR_PROMISC", Const, 0, ""},
    +		{"PACKET_MULTICAST", Const, 0, ""},
    +		{"PACKET_OTHERHOST", Const, 0, ""},
    +		{"PACKET_OUTGOING", Const, 0, ""},
    +		{"PACKET_RECV_OUTPUT", Const, 0, ""},
    +		{"PACKET_RX_RING", Const, 0, ""},
    +		{"PACKET_STATISTICS", Const, 0, ""},
    +		{"PAGE_EXECUTE_READ", Const, 0, ""},
    +		{"PAGE_EXECUTE_READWRITE", Const, 0, ""},
    +		{"PAGE_EXECUTE_WRITECOPY", Const, 0, ""},
    +		{"PAGE_READONLY", Const, 0, ""},
    +		{"PAGE_READWRITE", Const, 0, ""},
    +		{"PAGE_WRITECOPY", Const, 0, ""},
    +		{"PARENB", Const, 0, ""},
    +		{"PARMRK", Const, 0, ""},
    +		{"PARODD", Const, 0, ""},
    +		{"PENDIN", Const, 0, ""},
    +		{"PFL_HIDDEN", Const, 2, ""},
    +		{"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""},
    +		{"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""},
    +		{"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""},
    +		{"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""},
    +		{"PF_FLUSH", Const, 1, ""},
    +		{"PKCS_7_ASN_ENCODING", Const, 0, ""},
    +		{"PMC5_PIPELINE_FLUSH", Const, 1, ""},
    +		{"PRIO_PGRP", Const, 2, ""},
    +		{"PRIO_PROCESS", Const, 2, ""},
    +		{"PRIO_USER", Const, 2, ""},
    +		{"PRI_IOFLUSH", Const, 1, ""},
    +		{"PROCESS_QUERY_INFORMATION", Const, 0, ""},
    +		{"PROCESS_TERMINATE", Const, 2, ""},
    +		{"PROT_EXEC", Const, 0, ""},
    +		{"PROT_GROWSDOWN", Const, 0, ""},
    +		{"PROT_GROWSUP", Const, 0, ""},
    +		{"PROT_NONE", Const, 0, ""},
    +		{"PROT_READ", Const, 0, ""},
    +		{"PROT_WRITE", Const, 0, ""},
    +		{"PROV_DH_SCHANNEL", Const, 0, ""},
    +		{"PROV_DSS", Const, 0, ""},
    +		{"PROV_DSS_DH", Const, 0, ""},
    +		{"PROV_EC_ECDSA_FULL", Const, 0, ""},
    +		{"PROV_EC_ECDSA_SIG", Const, 0, ""},
    +		{"PROV_EC_ECNRA_FULL", Const, 0, ""},
    +		{"PROV_EC_ECNRA_SIG", Const, 0, ""},
    +		{"PROV_FORTEZZA", Const, 0, ""},
    +		{"PROV_INTEL_SEC", Const, 0, ""},
    +		{"PROV_MS_EXCHANGE", Const, 0, ""},
    +		{"PROV_REPLACE_OWF", Const, 0, ""},
    +		{"PROV_RNG", Const, 0, ""},
    +		{"PROV_RSA_AES", Const, 0, ""},
    +		{"PROV_RSA_FULL", Const, 0, ""},
    +		{"PROV_RSA_SCHANNEL", Const, 0, ""},
    +		{"PROV_RSA_SIG", Const, 0, ""},
    +		{"PROV_SPYRUS_LYNKS", Const, 0, ""},
    +		{"PROV_SSL", Const, 0, ""},
    +		{"PR_CAPBSET_DROP", Const, 0, ""},
    +		{"PR_CAPBSET_READ", Const, 0, ""},
    +		{"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_ENDIAN_BIG", Const, 0, ""},
    +		{"PR_ENDIAN_LITTLE", Const, 0, ""},
    +		{"PR_ENDIAN_PPC_LITTLE", Const, 0, ""},
    +		{"PR_FPEMU_NOPRINT", Const, 0, ""},
    +		{"PR_FPEMU_SIGFPE", Const, 0, ""},
    +		{"PR_FP_EXC_ASYNC", Const, 0, ""},
    +		{"PR_FP_EXC_DISABLED", Const, 0, ""},
    +		{"PR_FP_EXC_DIV", Const, 0, ""},
    +		{"PR_FP_EXC_INV", Const, 0, ""},
    +		{"PR_FP_EXC_NONRECOV", Const, 0, ""},
    +		{"PR_FP_EXC_OVF", Const, 0, ""},
    +		{"PR_FP_EXC_PRECISE", Const, 0, ""},
    +		{"PR_FP_EXC_RES", Const, 0, ""},
    +		{"PR_FP_EXC_SW_ENABLE", Const, 0, ""},
    +		{"PR_FP_EXC_UND", Const, 0, ""},
    +		{"PR_GET_DUMPABLE", Const, 0, ""},
    +		{"PR_GET_ENDIAN", Const, 0, ""},
    +		{"PR_GET_FPEMU", Const, 0, ""},
    +		{"PR_GET_FPEXC", Const, 0, ""},
    +		{"PR_GET_KEEPCAPS", Const, 0, ""},
    +		{"PR_GET_NAME", Const, 0, ""},
    +		{"PR_GET_PDEATHSIG", Const, 0, ""},
    +		{"PR_GET_SECCOMP", Const, 0, ""},
    +		{"PR_GET_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_GET_SECUREBITS", Const, 0, ""},
    +		{"PR_GET_TIMERSLACK", Const, 0, ""},
    +		{"PR_GET_TIMING", Const, 0, ""},
    +		{"PR_GET_TSC", Const, 0, ""},
    +		{"PR_GET_UNALIGN", Const, 0, ""},
    +		{"PR_MCE_KILL", Const, 0, ""},
    +		{"PR_MCE_KILL_CLEAR", Const, 0, ""},
    +		{"PR_MCE_KILL_DEFAULT", Const, 0, ""},
    +		{"PR_MCE_KILL_EARLY", Const, 0, ""},
    +		{"PR_MCE_KILL_GET", Const, 0, ""},
    +		{"PR_MCE_KILL_LATE", Const, 0, ""},
    +		{"PR_MCE_KILL_SET", Const, 0, ""},
    +		{"PR_SECCOMP_FILTER_EVENT", Const, 0, ""},
    +		{"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""},
    +		{"PR_SET_DUMPABLE", Const, 0, ""},
    +		{"PR_SET_ENDIAN", Const, 0, ""},
    +		{"PR_SET_FPEMU", Const, 0, ""},
    +		{"PR_SET_FPEXC", Const, 0, ""},
    +		{"PR_SET_KEEPCAPS", Const, 0, ""},
    +		{"PR_SET_NAME", Const, 0, ""},
    +		{"PR_SET_PDEATHSIG", Const, 0, ""},
    +		{"PR_SET_PTRACER", Const, 0, ""},
    +		{"PR_SET_SECCOMP", Const, 0, ""},
    +		{"PR_SET_SECCOMP_FILTER", Const, 0, ""},
    +		{"PR_SET_SECUREBITS", Const, 0, ""},
    +		{"PR_SET_TIMERSLACK", Const, 0, ""},
    +		{"PR_SET_TIMING", Const, 0, ""},
    +		{"PR_SET_TSC", Const, 0, ""},
    +		{"PR_SET_UNALIGN", Const, 0, ""},
    +		{"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""},
    +		{"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""},
    +		{"PR_TIMING_STATISTICAL", Const, 0, ""},
    +		{"PR_TIMING_TIMESTAMP", Const, 0, ""},
    +		{"PR_TSC_ENABLE", Const, 0, ""},
    +		{"PR_TSC_SIGSEGV", Const, 0, ""},
    +		{"PR_UNALIGN_NOPRINT", Const, 0, ""},
    +		{"PR_UNALIGN_SIGBUS", Const, 0, ""},
    +		{"PTRACE_ARCH_PRCTL", Const, 0, ""},
    +		{"PTRACE_ATTACH", Const, 0, ""},
    +		{"PTRACE_CONT", Const, 0, ""},
    +		{"PTRACE_DETACH", Const, 0, ""},
    +		{"PTRACE_EVENT_CLONE", Const, 0, ""},
    +		{"PTRACE_EVENT_EXEC", Const, 0, ""},
    +		{"PTRACE_EVENT_EXIT", Const, 0, ""},
    +		{"PTRACE_EVENT_FORK", Const, 0, ""},
    +		{"PTRACE_EVENT_VFORK", Const, 0, ""},
    +		{"PTRACE_EVENT_VFORK_DONE", Const, 0, ""},
    +		{"PTRACE_GETCRUNCHREGS", Const, 0, ""},
    +		{"PTRACE_GETEVENTMSG", Const, 0, ""},
    +		{"PTRACE_GETFPREGS", Const, 0, ""},
    +		{"PTRACE_GETFPXREGS", Const, 0, ""},
    +		{"PTRACE_GETHBPREGS", Const, 0, ""},
    +		{"PTRACE_GETREGS", Const, 0, ""},
    +		{"PTRACE_GETREGSET", Const, 0, ""},
    +		{"PTRACE_GETSIGINFO", Const, 0, ""},
    +		{"PTRACE_GETVFPREGS", Const, 0, ""},
    +		{"PTRACE_GETWMMXREGS", Const, 0, ""},
    +		{"PTRACE_GET_THREAD_AREA", Const, 0, ""},
    +		{"PTRACE_KILL", Const, 0, ""},
    +		{"PTRACE_OLDSETOPTIONS", Const, 0, ""},
    +		{"PTRACE_O_MASK", Const, 0, ""},
    +		{"PTRACE_O_TRACECLONE", Const, 0, ""},
    +		{"PTRACE_O_TRACEEXEC", Const, 0, ""},
    +		{"PTRACE_O_TRACEEXIT", Const, 0, ""},
    +		{"PTRACE_O_TRACEFORK", Const, 0, ""},
    +		{"PTRACE_O_TRACESYSGOOD", Const, 0, ""},
    +		{"PTRACE_O_TRACEVFORK", Const, 0, ""},
    +		{"PTRACE_O_TRACEVFORKDONE", Const, 0, ""},
    +		{"PTRACE_PEEKDATA", Const, 0, ""},
    +		{"PTRACE_PEEKTEXT", Const, 0, ""},
    +		{"PTRACE_PEEKUSR", Const, 0, ""},
    +		{"PTRACE_POKEDATA", Const, 0, ""},
    +		{"PTRACE_POKETEXT", Const, 0, ""},
    +		{"PTRACE_POKEUSR", Const, 0, ""},
    +		{"PTRACE_SETCRUNCHREGS", Const, 0, ""},
    +		{"PTRACE_SETFPREGS", Const, 0, ""},
    +		{"PTRACE_SETFPXREGS", Const, 0, ""},
    +		{"PTRACE_SETHBPREGS", Const, 0, ""},
    +		{"PTRACE_SETOPTIONS", Const, 0, ""},
    +		{"PTRACE_SETREGS", Const, 0, ""},
    +		{"PTRACE_SETREGSET", Const, 0, ""},
    +		{"PTRACE_SETSIGINFO", Const, 0, ""},
    +		{"PTRACE_SETVFPREGS", Const, 0, ""},
    +		{"PTRACE_SETWMMXREGS", Const, 0, ""},
    +		{"PTRACE_SET_SYSCALL", Const, 0, ""},
    +		{"PTRACE_SET_THREAD_AREA", Const, 0, ""},
    +		{"PTRACE_SINGLEBLOCK", Const, 0, ""},
    +		{"PTRACE_SINGLESTEP", Const, 0, ""},
    +		{"PTRACE_SYSCALL", Const, 0, ""},
    +		{"PTRACE_SYSEMU", Const, 0, ""},
    +		{"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""},
    +		{"PTRACE_TRACEME", Const, 0, ""},
    +		{"PT_ATTACH", Const, 0, ""},
    +		{"PT_ATTACHEXC", Const, 0, ""},
    +		{"PT_CONTINUE", Const, 0, ""},
    +		{"PT_DATA_ADDR", Const, 0, ""},
    +		{"PT_DENY_ATTACH", Const, 0, ""},
    +		{"PT_DETACH", Const, 0, ""},
    +		{"PT_FIRSTMACH", Const, 0, ""},
    +		{"PT_FORCEQUOTA", Const, 0, ""},
    +		{"PT_KILL", Const, 0, ""},
    +		{"PT_MASK", Const, 1, ""},
    +		{"PT_READ_D", Const, 0, ""},
    +		{"PT_READ_I", Const, 0, ""},
    +		{"PT_READ_U", Const, 0, ""},
    +		{"PT_SIGEXC", Const, 0, ""},
    +		{"PT_STEP", Const, 0, ""},
    +		{"PT_TEXT_ADDR", Const, 0, ""},
    +		{"PT_TEXT_END_ADDR", Const, 0, ""},
    +		{"PT_THUPDATE", Const, 0, ""},
    +		{"PT_TRACE_ME", Const, 0, ""},
    +		{"PT_WRITE_D", Const, 0, ""},
    +		{"PT_WRITE_I", Const, 0, ""},
    +		{"PT_WRITE_U", Const, 0, ""},
    +		{"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"},
    +		{"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"},
    +		{"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"},
    +		{"ParseRoutingMessage", Func, 0, ""},
    +		{"ParseRoutingSockaddr", Func, 0, ""},
    +		{"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"},
    +		{"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"},
    +		{"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"},
    +		{"PathMax", Const, 0, ""},
    +		{"Pathconf", Func, 0, ""},
    +		{"Pause", Func, 0, "func() (err error)"},
    +		{"Pipe", Func, 0, "func(p []int) error"},
    +		{"Pipe2", Func, 1, "func(p []int, flags int) error"},
    +		{"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"},
    +		{"Pointer", Type, 11, ""},
    +		{"PostQueuedCompletionStatus", Func, 0, ""},
    +		{"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
    +		{"Proc", Type, 0, ""},
    +		{"Proc.Dll", Field, 0, ""},
    +		{"Proc.Name", Field, 0, ""},
    +		{"ProcAttr", Type, 0, ""},
    +		{"ProcAttr.Dir", Field, 0, ""},
    +		{"ProcAttr.Env", Field, 0, ""},
    +		{"ProcAttr.Files", Field, 0, ""},
    +		{"ProcAttr.Sys", Field, 0, ""},
    +		{"Process32First", Func, 4, ""},
    +		{"Process32Next", Func, 4, ""},
    +		{"ProcessEntry32", Type, 4, ""},
    +		{"ProcessEntry32.DefaultHeapID", Field, 4, ""},
    +		{"ProcessEntry32.ExeFile", Field, 4, ""},
    +		{"ProcessEntry32.Flags", Field, 4, ""},
    +		{"ProcessEntry32.ModuleID", Field, 4, ""},
    +		{"ProcessEntry32.ParentProcessID", Field, 4, ""},
    +		{"ProcessEntry32.PriClassBase", Field, 4, ""},
    +		{"ProcessEntry32.ProcessID", Field, 4, ""},
    +		{"ProcessEntry32.Size", Field, 4, ""},
    +		{"ProcessEntry32.Threads", Field, 4, ""},
    +		{"ProcessEntry32.Usage", Field, 4, ""},
    +		{"ProcessInformation", Type, 0, ""},
    +		{"ProcessInformation.Process", Field, 0, ""},
    +		{"ProcessInformation.ProcessId", Field, 0, ""},
    +		{"ProcessInformation.Thread", Field, 0, ""},
    +		{"ProcessInformation.ThreadId", Field, 0, ""},
    +		{"Protoent", Type, 0, ""},
    +		{"Protoent.Aliases", Field, 0, ""},
    +		{"Protoent.Name", Field, 0, ""},
    +		{"Protoent.Proto", Field, 0, ""},
    +		{"PtraceAttach", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"},
    +		{"PtraceDetach", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"},
    +		{"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"},
    +		{"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
    +		{"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
    +		{"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
    +		{"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
    +		{"PtraceRegs", Type, 0, ""},
    +		{"PtraceRegs.Cs", Field, 0, ""},
    +		{"PtraceRegs.Ds", Field, 0, ""},
    +		{"PtraceRegs.Eax", Field, 0, ""},
    +		{"PtraceRegs.Ebp", Field, 0, ""},
    +		{"PtraceRegs.Ebx", Field, 0, ""},
    +		{"PtraceRegs.Ecx", Field, 0, ""},
    +		{"PtraceRegs.Edi", Field, 0, ""},
    +		{"PtraceRegs.Edx", Field, 0, ""},
    +		{"PtraceRegs.Eflags", Field, 0, ""},
    +		{"PtraceRegs.Eip", Field, 0, ""},
    +		{"PtraceRegs.Es", Field, 0, ""},
    +		{"PtraceRegs.Esi", Field, 0, ""},
    +		{"PtraceRegs.Esp", Field, 0, ""},
    +		{"PtraceRegs.Fs", Field, 0, ""},
    +		{"PtraceRegs.Fs_base", Field, 0, ""},
    +		{"PtraceRegs.Gs", Field, 0, ""},
    +		{"PtraceRegs.Gs_base", Field, 0, ""},
    +		{"PtraceRegs.Orig_eax", Field, 0, ""},
    +		{"PtraceRegs.Orig_rax", Field, 0, ""},
    +		{"PtraceRegs.R10", Field, 0, ""},
    +		{"PtraceRegs.R11", Field, 0, ""},
    +		{"PtraceRegs.R12", Field, 0, ""},
    +		{"PtraceRegs.R13", Field, 0, ""},
    +		{"PtraceRegs.R14", Field, 0, ""},
    +		{"PtraceRegs.R15", Field, 0, ""},
    +		{"PtraceRegs.R8", Field, 0, ""},
    +		{"PtraceRegs.R9", Field, 0, ""},
    +		{"PtraceRegs.Rax", Field, 0, ""},
    +		{"PtraceRegs.Rbp", Field, 0, ""},
    +		{"PtraceRegs.Rbx", Field, 0, ""},
    +		{"PtraceRegs.Rcx", Field, 0, ""},
    +		{"PtraceRegs.Rdi", Field, 0, ""},
    +		{"PtraceRegs.Rdx", Field, 0, ""},
    +		{"PtraceRegs.Rip", Field, 0, ""},
    +		{"PtraceRegs.Rsi", Field, 0, ""},
    +		{"PtraceRegs.Rsp", Field, 0, ""},
    +		{"PtraceRegs.Ss", Field, 0, ""},
    +		{"PtraceRegs.Uregs", Field, 0, ""},
    +		{"PtraceRegs.Xcs", Field, 0, ""},
    +		{"PtraceRegs.Xds", Field, 0, ""},
    +		{"PtraceRegs.Xes", Field, 0, ""},
    +		{"PtraceRegs.Xfs", Field, 0, ""},
    +		{"PtraceRegs.Xgs", Field, 0, ""},
    +		{"PtraceRegs.Xss", Field, 0, ""},
    +		{"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"},
    +		{"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"},
    +		{"PtraceSingleStep", Func, 0, "func(pid int) (err error)"},
    +		{"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"},
    +		{"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
    +		{"REG_BINARY", Const, 0, ""},
    +		{"REG_DWORD", Const, 0, ""},
    +		{"REG_DWORD_BIG_ENDIAN", Const, 0, ""},
    +		{"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""},
    +		{"REG_EXPAND_SZ", Const, 0, ""},
    +		{"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""},
    +		{"REG_LINK", Const, 0, ""},
    +		{"REG_MULTI_SZ", Const, 0, ""},
    +		{"REG_NONE", Const, 0, ""},
    +		{"REG_QWORD", Const, 0, ""},
    +		{"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""},
    +		{"REG_RESOURCE_LIST", Const, 0, ""},
    +		{"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""},
    +		{"REG_SZ", Const, 0, ""},
    +		{"RLIMIT_AS", Const, 0, ""},
    +		{"RLIMIT_CORE", Const, 0, ""},
    +		{"RLIMIT_CPU", Const, 0, ""},
    +		{"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""},
    +		{"RLIMIT_DATA", Const, 0, ""},
    +		{"RLIMIT_FSIZE", Const, 0, ""},
    +		{"RLIMIT_NOFILE", Const, 0, ""},
    +		{"RLIMIT_STACK", Const, 0, ""},
    +		{"RLIM_INFINITY", Const, 0, ""},
    +		{"RTAX_ADVMSS", Const, 0, ""},
    +		{"RTAX_AUTHOR", Const, 0, ""},
    +		{"RTAX_BRD", Const, 0, ""},
    +		{"RTAX_CWND", Const, 0, ""},
    +		{"RTAX_DST", Const, 0, ""},
    +		{"RTAX_FEATURES", Const, 0, ""},
    +		{"RTAX_FEATURE_ALLFRAG", Const, 0, ""},
    +		{"RTAX_FEATURE_ECN", Const, 0, ""},
    +		{"RTAX_FEATURE_SACK", Const, 0, ""},
    +		{"RTAX_FEATURE_TIMESTAMP", Const, 0, ""},
    +		{"RTAX_GATEWAY", Const, 0, ""},
    +		{"RTAX_GENMASK", Const, 0, ""},
    +		{"RTAX_HOPLIMIT", Const, 0, ""},
    +		{"RTAX_IFA", Const, 0, ""},
    +		{"RTAX_IFP", Const, 0, ""},
    +		{"RTAX_INITCWND", Const, 0, ""},
    +		{"RTAX_INITRWND", Const, 0, ""},
    +		{"RTAX_LABEL", Const, 1, ""},
    +		{"RTAX_LOCK", Const, 0, ""},
    +		{"RTAX_MAX", Const, 0, ""},
    +		{"RTAX_MTU", Const, 0, ""},
    +		{"RTAX_NETMASK", Const, 0, ""},
    +		{"RTAX_REORDERING", Const, 0, ""},
    +		{"RTAX_RTO_MIN", Const, 0, ""},
    +		{"RTAX_RTT", Const, 0, ""},
    +		{"RTAX_RTTVAR", Const, 0, ""},
    +		{"RTAX_SRC", Const, 1, ""},
    +		{"RTAX_SRCMASK", Const, 1, ""},
    +		{"RTAX_SSTHRESH", Const, 0, ""},
    +		{"RTAX_TAG", Const, 1, ""},
    +		{"RTAX_UNSPEC", Const, 0, ""},
    +		{"RTAX_WINDOW", Const, 0, ""},
    +		{"RTA_ALIGNTO", Const, 0, ""},
    +		{"RTA_AUTHOR", Const, 0, ""},
    +		{"RTA_BRD", Const, 0, ""},
    +		{"RTA_CACHEINFO", Const, 0, ""},
    +		{"RTA_DST", Const, 0, ""},
    +		{"RTA_FLOW", Const, 0, ""},
    +		{"RTA_GATEWAY", Const, 0, ""},
    +		{"RTA_GENMASK", Const, 0, ""},
    +		{"RTA_IFA", Const, 0, ""},
    +		{"RTA_IFP", Const, 0, ""},
    +		{"RTA_IIF", Const, 0, ""},
    +		{"RTA_LABEL", Const, 1, ""},
    +		{"RTA_MAX", Const, 0, ""},
    +		{"RTA_METRICS", Const, 0, ""},
    +		{"RTA_MULTIPATH", Const, 0, ""},
    +		{"RTA_NETMASK", Const, 0, ""},
    +		{"RTA_OIF", Const, 0, ""},
    +		{"RTA_PREFSRC", Const, 0, ""},
    +		{"RTA_PRIORITY", Const, 0, ""},
    +		{"RTA_SRC", Const, 0, ""},
    +		{"RTA_SRCMASK", Const, 1, ""},
    +		{"RTA_TABLE", Const, 0, ""},
    +		{"RTA_TAG", Const, 1, ""},
    +		{"RTA_UNSPEC", Const, 0, ""},
    +		{"RTCF_DIRECTSRC", Const, 0, ""},
    +		{"RTCF_DOREDIRECT", Const, 0, ""},
    +		{"RTCF_LOG", Const, 0, ""},
    +		{"RTCF_MASQ", Const, 0, ""},
    +		{"RTCF_NAT", Const, 0, ""},
    +		{"RTCF_VALVE", Const, 0, ""},
    +		{"RTF_ADDRCLASSMASK", Const, 0, ""},
    +		{"RTF_ADDRCONF", Const, 0, ""},
    +		{"RTF_ALLONLINK", Const, 0, ""},
    +		{"RTF_ANNOUNCE", Const, 1, ""},
    +		{"RTF_BLACKHOLE", Const, 0, ""},
    +		{"RTF_BROADCAST", Const, 0, ""},
    +		{"RTF_CACHE", Const, 0, ""},
    +		{"RTF_CLONED", Const, 1, ""},
    +		{"RTF_CLONING", Const, 0, ""},
    +		{"RTF_CONDEMNED", Const, 0, ""},
    +		{"RTF_DEFAULT", Const, 0, ""},
    +		{"RTF_DELCLONE", Const, 0, ""},
    +		{"RTF_DONE", Const, 0, ""},
    +		{"RTF_DYNAMIC", Const, 0, ""},
    +		{"RTF_FLOW", Const, 0, ""},
    +		{"RTF_FMASK", Const, 0, ""},
    +		{"RTF_GATEWAY", Const, 0, ""},
    +		{"RTF_GWFLAG_COMPAT", Const, 3, ""},
    +		{"RTF_HOST", Const, 0, ""},
    +		{"RTF_IFREF", Const, 0, ""},
    +		{"RTF_IFSCOPE", Const, 0, ""},
    +		{"RTF_INTERFACE", Const, 0, ""},
    +		{"RTF_IRTT", Const, 0, ""},
    +		{"RTF_LINKRT", Const, 0, ""},
    +		{"RTF_LLDATA", Const, 0, ""},
    +		{"RTF_LLINFO", Const, 0, ""},
    +		{"RTF_LOCAL", Const, 0, ""},
    +		{"RTF_MASK", Const, 1, ""},
    +		{"RTF_MODIFIED", Const, 0, ""},
    +		{"RTF_MPATH", Const, 1, ""},
    +		{"RTF_MPLS", Const, 1, ""},
    +		{"RTF_MSS", Const, 0, ""},
    +		{"RTF_MTU", Const, 0, ""},
    +		{"RTF_MULTICAST", Const, 0, ""},
    +		{"RTF_NAT", Const, 0, ""},
    +		{"RTF_NOFORWARD", Const, 0, ""},
    +		{"RTF_NONEXTHOP", Const, 0, ""},
    +		{"RTF_NOPMTUDISC", Const, 0, ""},
    +		{"RTF_PERMANENT_ARP", Const, 1, ""},
    +		{"RTF_PINNED", Const, 0, ""},
    +		{"RTF_POLICY", Const, 0, ""},
    +		{"RTF_PRCLONING", Const, 0, ""},
    +		{"RTF_PROTO1", Const, 0, ""},
    +		{"RTF_PROTO2", Const, 0, ""},
    +		{"RTF_PROTO3", Const, 0, ""},
    +		{"RTF_PROXY", Const, 16, ""},
    +		{"RTF_REINSTATE", Const, 0, ""},
    +		{"RTF_REJECT", Const, 0, ""},
    +		{"RTF_RNH_LOCKED", Const, 0, ""},
    +		{"RTF_ROUTER", Const, 16, ""},
    +		{"RTF_SOURCE", Const, 1, ""},
    +		{"RTF_SRC", Const, 1, ""},
    +		{"RTF_STATIC", Const, 0, ""},
    +		{"RTF_STICKY", Const, 0, ""},
    +		{"RTF_THROW", Const, 0, ""},
    +		{"RTF_TUNNEL", Const, 1, ""},
    +		{"RTF_UP", Const, 0, ""},
    +		{"RTF_USETRAILERS", Const, 1, ""},
    +		{"RTF_WASCLONED", Const, 0, ""},
    +		{"RTF_WINDOW", Const, 0, ""},
    +		{"RTF_XRESOLVE", Const, 0, ""},
    +		{"RTM_ADD", Const, 0, ""},
    +		{"RTM_BASE", Const, 0, ""},
    +		{"RTM_CHANGE", Const, 0, ""},
    +		{"RTM_CHGADDR", Const, 1, ""},
    +		{"RTM_DELACTION", Const, 0, ""},
    +		{"RTM_DELADDR", Const, 0, ""},
    +		{"RTM_DELADDRLABEL", Const, 0, ""},
    +		{"RTM_DELETE", Const, 0, ""},
    +		{"RTM_DELLINK", Const, 0, ""},
    +		{"RTM_DELMADDR", Const, 0, ""},
    +		{"RTM_DELNEIGH", Const, 0, ""},
    +		{"RTM_DELQDISC", Const, 0, ""},
    +		{"RTM_DELROUTE", Const, 0, ""},
    +		{"RTM_DELRULE", Const, 0, ""},
    +		{"RTM_DELTCLASS", Const, 0, ""},
    +		{"RTM_DELTFILTER", Const, 0, ""},
    +		{"RTM_DESYNC", Const, 1, ""},
    +		{"RTM_F_CLONED", Const, 0, ""},
    +		{"RTM_F_EQUALIZE", Const, 0, ""},
    +		{"RTM_F_NOTIFY", Const, 0, ""},
    +		{"RTM_F_PREFIX", Const, 0, ""},
    +		{"RTM_GET", Const, 0, ""},
    +		{"RTM_GET2", Const, 0, ""},
    +		{"RTM_GETACTION", Const, 0, ""},
    +		{"RTM_GETADDR", Const, 0, ""},
    +		{"RTM_GETADDRLABEL", Const, 0, ""},
    +		{"RTM_GETANYCAST", Const, 0, ""},
    +		{"RTM_GETDCB", Const, 0, ""},
    +		{"RTM_GETLINK", Const, 0, ""},
    +		{"RTM_GETMULTICAST", Const, 0, ""},
    +		{"RTM_GETNEIGH", Const, 0, ""},
    +		{"RTM_GETNEIGHTBL", Const, 0, ""},
    +		{"RTM_GETQDISC", Const, 0, ""},
    +		{"RTM_GETROUTE", Const, 0, ""},
    +		{"RTM_GETRULE", Const, 0, ""},
    +		{"RTM_GETTCLASS", Const, 0, ""},
    +		{"RTM_GETTFILTER", Const, 0, ""},
    +		{"RTM_IEEE80211", Const, 0, ""},
    +		{"RTM_IFANNOUNCE", Const, 0, ""},
    +		{"RTM_IFINFO", Const, 0, ""},
    +		{"RTM_IFINFO2", Const, 0, ""},
    +		{"RTM_LLINFO_UPD", Const, 1, ""},
    +		{"RTM_LOCK", Const, 0, ""},
    +		{"RTM_LOSING", Const, 0, ""},
    +		{"RTM_MAX", Const, 0, ""},
    +		{"RTM_MAXSIZE", Const, 1, ""},
    +		{"RTM_MISS", Const, 0, ""},
    +		{"RTM_NEWACTION", Const, 0, ""},
    +		{"RTM_NEWADDR", Const, 0, ""},
    +		{"RTM_NEWADDRLABEL", Const, 0, ""},
    +		{"RTM_NEWLINK", Const, 0, ""},
    +		{"RTM_NEWMADDR", Const, 0, ""},
    +		{"RTM_NEWMADDR2", Const, 0, ""},
    +		{"RTM_NEWNDUSEROPT", Const, 0, ""},
    +		{"RTM_NEWNEIGH", Const, 0, ""},
    +		{"RTM_NEWNEIGHTBL", Const, 0, ""},
    +		{"RTM_NEWPREFIX", Const, 0, ""},
    +		{"RTM_NEWQDISC", Const, 0, ""},
    +		{"RTM_NEWROUTE", Const, 0, ""},
    +		{"RTM_NEWRULE", Const, 0, ""},
    +		{"RTM_NEWTCLASS", Const, 0, ""},
    +		{"RTM_NEWTFILTER", Const, 0, ""},
    +		{"RTM_NR_FAMILIES", Const, 0, ""},
    +		{"RTM_NR_MSGTYPES", Const, 0, ""},
    +		{"RTM_OIFINFO", Const, 1, ""},
    +		{"RTM_OLDADD", Const, 0, ""},
    +		{"RTM_OLDDEL", Const, 0, ""},
    +		{"RTM_OOIFINFO", Const, 1, ""},
    +		{"RTM_REDIRECT", Const, 0, ""},
    +		{"RTM_RESOLVE", Const, 0, ""},
    +		{"RTM_RTTUNIT", Const, 0, ""},
    +		{"RTM_SETDCB", Const, 0, ""},
    +		{"RTM_SETGATE", Const, 1, ""},
    +		{"RTM_SETLINK", Const, 0, ""},
    +		{"RTM_SETNEIGHTBL", Const, 0, ""},
    +		{"RTM_VERSION", Const, 0, ""},
    +		{"RTNH_ALIGNTO", Const, 0, ""},
    +		{"RTNH_F_DEAD", Const, 0, ""},
    +		{"RTNH_F_ONLINK", Const, 0, ""},
    +		{"RTNH_F_PERVASIVE", Const, 0, ""},
    +		{"RTNLGRP_IPV4_IFADDR", Const, 1, ""},
    +		{"RTNLGRP_IPV4_MROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV4_ROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV4_RULE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_IFADDR", Const, 1, ""},
    +		{"RTNLGRP_IPV6_IFINFO", Const, 1, ""},
    +		{"RTNLGRP_IPV6_MROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_PREFIX", Const, 1, ""},
    +		{"RTNLGRP_IPV6_ROUTE", Const, 1, ""},
    +		{"RTNLGRP_IPV6_RULE", Const, 1, ""},
    +		{"RTNLGRP_LINK", Const, 1, ""},
    +		{"RTNLGRP_ND_USEROPT", Const, 1, ""},
    +		{"RTNLGRP_NEIGH", Const, 1, ""},
    +		{"RTNLGRP_NONE", Const, 1, ""},
    +		{"RTNLGRP_NOTIFY", Const, 1, ""},
    +		{"RTNLGRP_TC", Const, 1, ""},
    +		{"RTN_ANYCAST", Const, 0, ""},
    +		{"RTN_BLACKHOLE", Const, 0, ""},
    +		{"RTN_BROADCAST", Const, 0, ""},
    +		{"RTN_LOCAL", Const, 0, ""},
    +		{"RTN_MAX", Const, 0, ""},
    +		{"RTN_MULTICAST", Const, 0, ""},
    +		{"RTN_NAT", Const, 0, ""},
    +		{"RTN_PROHIBIT", Const, 0, ""},
    +		{"RTN_THROW", Const, 0, ""},
    +		{"RTN_UNICAST", Const, 0, ""},
    +		{"RTN_UNREACHABLE", Const, 0, ""},
    +		{"RTN_UNSPEC", Const, 0, ""},
    +		{"RTN_XRESOLVE", Const, 0, ""},
    +		{"RTPROT_BIRD", Const, 0, ""},
    +		{"RTPROT_BOOT", Const, 0, ""},
    +		{"RTPROT_DHCP", Const, 0, ""},
    +		{"RTPROT_DNROUTED", Const, 0, ""},
    +		{"RTPROT_GATED", Const, 0, ""},
    +		{"RTPROT_KERNEL", Const, 0, ""},
    +		{"RTPROT_MRT", Const, 0, ""},
    +		{"RTPROT_NTK", Const, 0, ""},
    +		{"RTPROT_RA", Const, 0, ""},
    +		{"RTPROT_REDIRECT", Const, 0, ""},
    +		{"RTPROT_STATIC", Const, 0, ""},
    +		{"RTPROT_UNSPEC", Const, 0, ""},
    +		{"RTPROT_XORP", Const, 0, ""},
    +		{"RTPROT_ZEBRA", Const, 0, ""},
    +		{"RTV_EXPIRE", Const, 0, ""},
    +		{"RTV_HOPCOUNT", Const, 0, ""},
    +		{"RTV_MTU", Const, 0, ""},
    +		{"RTV_RPIPE", Const, 0, ""},
    +		{"RTV_RTT", Const, 0, ""},
    +		{"RTV_RTTVAR", Const, 0, ""},
    +		{"RTV_SPIPE", Const, 0, ""},
    +		{"RTV_SSTHRESH", Const, 0, ""},
    +		{"RTV_WEIGHT", Const, 0, ""},
    +		{"RT_CACHING_CONTEXT", Const, 1, ""},
    +		{"RT_CLASS_DEFAULT", Const, 0, ""},
    +		{"RT_CLASS_LOCAL", Const, 0, ""},
    +		{"RT_CLASS_MAIN", Const, 0, ""},
    +		{"RT_CLASS_MAX", Const, 0, ""},
    +		{"RT_CLASS_UNSPEC", Const, 0, ""},
    +		{"RT_DEFAULT_FIB", Const, 1, ""},
    +		{"RT_NORTREF", Const, 1, ""},
    +		{"RT_SCOPE_HOST", Const, 0, ""},
    +		{"RT_SCOPE_LINK", Const, 0, ""},
    +		{"RT_SCOPE_NOWHERE", Const, 0, ""},
    +		{"RT_SCOPE_SITE", Const, 0, ""},
    +		{"RT_SCOPE_UNIVERSE", Const, 0, ""},
    +		{"RT_TABLEID_MAX", Const, 1, ""},
    +		{"RT_TABLE_COMPAT", Const, 0, ""},
    +		{"RT_TABLE_DEFAULT", Const, 0, ""},
    +		{"RT_TABLE_LOCAL", Const, 0, ""},
    +		{"RT_TABLE_MAIN", Const, 0, ""},
    +		{"RT_TABLE_MAX", Const, 0, ""},
    +		{"RT_TABLE_UNSPEC", Const, 0, ""},
    +		{"RUSAGE_CHILDREN", Const, 0, ""},
    +		{"RUSAGE_SELF", Const, 0, ""},
    +		{"RUSAGE_THREAD", Const, 0, ""},
    +		{"Radvisory_t", Type, 0, ""},
    +		{"Radvisory_t.Count", Field, 0, ""},
    +		{"Radvisory_t.Offset", Field, 0, ""},
    +		{"Radvisory_t.Pad_cgo_0", Field, 0, ""},
    +		{"RawConn", Type, 9, ""},
    +		{"RawSockaddr", Type, 0, ""},
    +		{"RawSockaddr.Data", Field, 0, ""},
    +		{"RawSockaddr.Family", Field, 0, ""},
    +		{"RawSockaddr.Len", Field, 0, ""},
    +		{"RawSockaddrAny", Type, 0, ""},
    +		{"RawSockaddrAny.Addr", Field, 0, ""},
    +		{"RawSockaddrAny.Pad", Field, 0, ""},
    +		{"RawSockaddrDatalink", Type, 0, ""},
    +		{"RawSockaddrDatalink.Alen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Data", Field, 0, ""},
    +		{"RawSockaddrDatalink.Family", Field, 0, ""},
    +		{"RawSockaddrDatalink.Index", Field, 0, ""},
    +		{"RawSockaddrDatalink.Len", Field, 0, ""},
    +		{"RawSockaddrDatalink.Nlen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""},
    +		{"RawSockaddrDatalink.Slen", Field, 0, ""},
    +		{"RawSockaddrDatalink.Type", Field, 0, ""},
    +		{"RawSockaddrInet4", Type, 0, ""},
    +		{"RawSockaddrInet4.Addr", Field, 0, ""},
    +		{"RawSockaddrInet4.Family", Field, 0, ""},
    +		{"RawSockaddrInet4.Len", Field, 0, ""},
    +		{"RawSockaddrInet4.Port", Field, 0, ""},
    +		{"RawSockaddrInet4.Zero", Field, 0, ""},
    +		{"RawSockaddrInet6", Type, 0, ""},
    +		{"RawSockaddrInet6.Addr", Field, 0, ""},
    +		{"RawSockaddrInet6.Family", Field, 0, ""},
    +		{"RawSockaddrInet6.Flowinfo", Field, 0, ""},
    +		{"RawSockaddrInet6.Len", Field, 0, ""},
    +		{"RawSockaddrInet6.Port", Field, 0, ""},
    +		{"RawSockaddrInet6.Scope_id", Field, 0, ""},
    +		{"RawSockaddrLinklayer", Type, 0, ""},
    +		{"RawSockaddrLinklayer.Addr", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Family", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Halen", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Hatype", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Ifindex", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Pkttype", Field, 0, ""},
    +		{"RawSockaddrLinklayer.Protocol", Field, 0, ""},
    +		{"RawSockaddrNetlink", Type, 0, ""},
    +		{"RawSockaddrNetlink.Family", Field, 0, ""},
    +		{"RawSockaddrNetlink.Groups", Field, 0, ""},
    +		{"RawSockaddrNetlink.Pad", Field, 0, ""},
    +		{"RawSockaddrNetlink.Pid", Field, 0, ""},
    +		{"RawSockaddrUnix", Type, 0, ""},
    +		{"RawSockaddrUnix.Family", Field, 0, ""},
    +		{"RawSockaddrUnix.Len", Field, 0, ""},
    +		{"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""},
    +		{"RawSockaddrUnix.Path", Field, 0, ""},
    +		{"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"},
    +		{"ReadConsole", Func, 1, ""},
    +		{"ReadDirectoryChanges", Func, 0, ""},
    +		{"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
    +		{"ReadFile", Func, 0, ""},
    +		{"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"},
    +		{"Reboot", Func, 0, "func(cmd int) (err error)"},
    +		{"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"},
    +		{"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"},
    +		{"RegCloseKey", Func, 0, ""},
    +		{"RegEnumKeyEx", Func, 0, ""},
    +		{"RegOpenKeyEx", Func, 0, ""},
    +		{"RegQueryInfoKey", Func, 0, ""},
    +		{"RegQueryValueEx", Func, 0, ""},
    +		{"RemoveDirectory", Func, 0, ""},
    +		{"Removexattr", Func, 1, "func(path string, attr string) (err error)"},
    +		{"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"},
    +		{"Revoke", Func, 0, ""},
    +		{"Rlimit", Type, 0, ""},
    +		{"Rlimit.Cur", Field, 0, ""},
    +		{"Rlimit.Max", Field, 0, ""},
    +		{"Rmdir", Func, 0, "func(path string) error"},
    +		{"RouteMessage", Type, 0, ""},
    +		{"RouteMessage.Data", Field, 0, ""},
    +		{"RouteMessage.Header", Field, 0, ""},
    +		{"RouteRIB", Func, 0, ""},
    +		{"RoutingMessage", Type, 0, ""},
    +		{"RtAttr", Type, 0, ""},
    +		{"RtAttr.Len", Field, 0, ""},
    +		{"RtAttr.Type", Field, 0, ""},
    +		{"RtGenmsg", Type, 0, ""},
    +		{"RtGenmsg.Family", Field, 0, ""},
    +		{"RtMetrics", Type, 0, ""},
    +		{"RtMetrics.Expire", Field, 0, ""},
    +		{"RtMetrics.Filler", Field, 0, ""},
    +		{"RtMetrics.Hopcount", Field, 0, ""},
    +		{"RtMetrics.Locks", Field, 0, ""},
    +		{"RtMetrics.Mtu", Field, 0, ""},
    +		{"RtMetrics.Pad", Field, 3, ""},
    +		{"RtMetrics.Pksent", Field, 0, ""},
    +		{"RtMetrics.Recvpipe", Field, 0, ""},
    +		{"RtMetrics.Refcnt", Field, 2, ""},
    +		{"RtMetrics.Rtt", Field, 0, ""},
    +		{"RtMetrics.Rttvar", Field, 0, ""},
    +		{"RtMetrics.Sendpipe", Field, 0, ""},
    +		{"RtMetrics.Ssthresh", Field, 0, ""},
    +		{"RtMetrics.Weight", Field, 0, ""},
    +		{"RtMsg", Type, 0, ""},
    +		{"RtMsg.Dst_len", Field, 0, ""},
    +		{"RtMsg.Family", Field, 0, ""},
    +		{"RtMsg.Flags", Field, 0, ""},
    +		{"RtMsg.Protocol", Field, 0, ""},
    +		{"RtMsg.Scope", Field, 0, ""},
    +		{"RtMsg.Src_len", Field, 0, ""},
    +		{"RtMsg.Table", Field, 0, ""},
    +		{"RtMsg.Tos", Field, 0, ""},
    +		{"RtMsg.Type", Field, 0, ""},
    +		{"RtMsghdr", Type, 0, ""},
    +		{"RtMsghdr.Addrs", Field, 0, ""},
    +		{"RtMsghdr.Errno", Field, 0, ""},
    +		{"RtMsghdr.Flags", Field, 0, ""},
    +		{"RtMsghdr.Fmask", Field, 0, ""},
    +		{"RtMsghdr.Hdrlen", Field, 2, ""},
    +		{"RtMsghdr.Index", Field, 0, ""},
    +		{"RtMsghdr.Inits", Field, 0, ""},
    +		{"RtMsghdr.Mpls", Field, 2, ""},
    +		{"RtMsghdr.Msglen", Field, 0, ""},
    +		{"RtMsghdr.Pad_cgo_0", Field, 0, ""},
    +		{"RtMsghdr.Pad_cgo_1", Field, 2, ""},
    +		{"RtMsghdr.Pid", Field, 0, ""},
    +		{"RtMsghdr.Priority", Field, 2, ""},
    +		{"RtMsghdr.Rmx", Field, 0, ""},
    +		{"RtMsghdr.Seq", Field, 0, ""},
    +		{"RtMsghdr.Tableid", Field, 2, ""},
    +		{"RtMsghdr.Type", Field, 0, ""},
    +		{"RtMsghdr.Use", Field, 0, ""},
    +		{"RtMsghdr.Version", Field, 0, ""},
    +		{"RtNexthop", Type, 0, ""},
    +		{"RtNexthop.Flags", Field, 0, ""},
    +		{"RtNexthop.Hops", Field, 0, ""},
    +		{"RtNexthop.Ifindex", Field, 0, ""},
    +		{"RtNexthop.Len", Field, 0, ""},
    +		{"Rusage", Type, 0, ""},
    +		{"Rusage.CreationTime", Field, 0, ""},
    +		{"Rusage.ExitTime", Field, 0, ""},
    +		{"Rusage.Idrss", Field, 0, ""},
    +		{"Rusage.Inblock", Field, 0, ""},
    +		{"Rusage.Isrss", Field, 0, ""},
    +		{"Rusage.Ixrss", Field, 0, ""},
    +		{"Rusage.KernelTime", Field, 0, ""},
    +		{"Rusage.Majflt", Field, 0, ""},
    +		{"Rusage.Maxrss", Field, 0, ""},
    +		{"Rusage.Minflt", Field, 0, ""},
    +		{"Rusage.Msgrcv", Field, 0, ""},
    +		{"Rusage.Msgsnd", Field, 0, ""},
    +		{"Rusage.Nivcsw", Field, 0, ""},
    +		{"Rusage.Nsignals", Field, 0, ""},
    +		{"Rusage.Nswap", Field, 0, ""},
    +		{"Rusage.Nvcsw", Field, 0, ""},
    +		{"Rusage.Oublock", Field, 0, ""},
    +		{"Rusage.Stime", Field, 0, ""},
    +		{"Rusage.UserTime", Field, 0, ""},
    +		{"Rusage.Utime", Field, 0, ""},
    +		{"SCM_BINTIME", Const, 0, ""},
    +		{"SCM_CREDENTIALS", Const, 0, ""},
    +		{"SCM_CREDS", Const, 0, ""},
    +		{"SCM_RIGHTS", Const, 0, ""},
    +		{"SCM_TIMESTAMP", Const, 0, ""},
    +		{"SCM_TIMESTAMPING", Const, 0, ""},
    +		{"SCM_TIMESTAMPNS", Const, 0, ""},
    +		{"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""},
    +		{"SHUT_RD", Const, 0, ""},
    +		{"SHUT_RDWR", Const, 0, ""},
    +		{"SHUT_WR", Const, 0, ""},
    +		{"SID", Type, 0, ""},
    +		{"SIDAndAttributes", Type, 0, ""},
    +		{"SIDAndAttributes.Attributes", Field, 0, ""},
    +		{"SIDAndAttributes.Sid", Field, 0, ""},
    +		{"SIGABRT", Const, 0, ""},
    +		{"SIGALRM", Const, 0, ""},
    +		{"SIGBUS", Const, 0, ""},
    +		{"SIGCHLD", Const, 0, ""},
    +		{"SIGCLD", Const, 0, ""},
    +		{"SIGCONT", Const, 0, ""},
    +		{"SIGEMT", Const, 0, ""},
    +		{"SIGFPE", Const, 0, ""},
    +		{"SIGHUP", Const, 0, ""},
    +		{"SIGILL", Const, 0, ""},
    +		{"SIGINFO", Const, 0, ""},
    +		{"SIGINT", Const, 0, ""},
    +		{"SIGIO", Const, 0, ""},
    +		{"SIGIOT", Const, 0, ""},
    +		{"SIGKILL", Const, 0, ""},
    +		{"SIGLIBRT", Const, 1, ""},
    +		{"SIGLWP", Const, 0, ""},
    +		{"SIGPIPE", Const, 0, ""},
    +		{"SIGPOLL", Const, 0, ""},
    +		{"SIGPROF", Const, 0, ""},
    +		{"SIGPWR", Const, 0, ""},
    +		{"SIGQUIT", Const, 0, ""},
    +		{"SIGSEGV", Const, 0, ""},
    +		{"SIGSTKFLT", Const, 0, ""},
    +		{"SIGSTOP", Const, 0, ""},
    +		{"SIGSYS", Const, 0, ""},
    +		{"SIGTERM", Const, 0, ""},
    +		{"SIGTHR", Const, 0, ""},
    +		{"SIGTRAP", Const, 0, ""},
    +		{"SIGTSTP", Const, 0, ""},
    +		{"SIGTTIN", Const, 0, ""},
    +		{"SIGTTOU", Const, 0, ""},
    +		{"SIGUNUSED", Const, 0, ""},
    +		{"SIGURG", Const, 0, ""},
    +		{"SIGUSR1", Const, 0, ""},
    +		{"SIGUSR2", Const, 0, ""},
    +		{"SIGVTALRM", Const, 0, ""},
    +		{"SIGWINCH", Const, 0, ""},
    +		{"SIGXCPU", Const, 0, ""},
    +		{"SIGXFSZ", Const, 0, ""},
    +		{"SIOCADDDLCI", Const, 0, ""},
    +		{"SIOCADDMULTI", Const, 0, ""},
    +		{"SIOCADDRT", Const, 0, ""},
    +		{"SIOCAIFADDR", Const, 0, ""},
    +		{"SIOCAIFGROUP", Const, 0, ""},
    +		{"SIOCALIFADDR", Const, 0, ""},
    +		{"SIOCARPIPLL", Const, 0, ""},
    +		{"SIOCATMARK", Const, 0, ""},
    +		{"SIOCAUTOADDR", Const, 0, ""},
    +		{"SIOCAUTONETMASK", Const, 0, ""},
    +		{"SIOCBRDGADD", Const, 1, ""},
    +		{"SIOCBRDGADDS", Const, 1, ""},
    +		{"SIOCBRDGARL", Const, 1, ""},
    +		{"SIOCBRDGDADDR", Const, 1, ""},
    +		{"SIOCBRDGDEL", Const, 1, ""},
    +		{"SIOCBRDGDELS", Const, 1, ""},
    +		{"SIOCBRDGFLUSH", Const, 1, ""},
    +		{"SIOCBRDGFRL", Const, 1, ""},
    +		{"SIOCBRDGGCACHE", Const, 1, ""},
    +		{"SIOCBRDGGFD", Const, 1, ""},
    +		{"SIOCBRDGGHT", Const, 1, ""},
    +		{"SIOCBRDGGIFFLGS", Const, 1, ""},
    +		{"SIOCBRDGGMA", Const, 1, ""},
    +		{"SIOCBRDGGPARAM", Const, 1, ""},
    +		{"SIOCBRDGGPRI", Const, 1, ""},
    +		{"SIOCBRDGGRL", Const, 1, ""},
    +		{"SIOCBRDGGSIFS", Const, 1, ""},
    +		{"SIOCBRDGGTO", Const, 1, ""},
    +		{"SIOCBRDGIFS", Const, 1, ""},
    +		{"SIOCBRDGRTS", Const, 1, ""},
    +		{"SIOCBRDGSADDR", Const, 1, ""},
    +		{"SIOCBRDGSCACHE", Const, 1, ""},
    +		{"SIOCBRDGSFD", Const, 1, ""},
    +		{"SIOCBRDGSHT", Const, 1, ""},
    +		{"SIOCBRDGSIFCOST", Const, 1, ""},
    +		{"SIOCBRDGSIFFLGS", Const, 1, ""},
    +		{"SIOCBRDGSIFPRIO", Const, 1, ""},
    +		{"SIOCBRDGSMA", Const, 1, ""},
    +		{"SIOCBRDGSPRI", Const, 1, ""},
    +		{"SIOCBRDGSPROTO", Const, 1, ""},
    +		{"SIOCBRDGSTO", Const, 1, ""},
    +		{"SIOCBRDGSTXHC", Const, 1, ""},
    +		{"SIOCDARP", Const, 0, ""},
    +		{"SIOCDELDLCI", Const, 0, ""},
    +		{"SIOCDELMULTI", Const, 0, ""},
    +		{"SIOCDELRT", Const, 0, ""},
    +		{"SIOCDEVPRIVATE", Const, 0, ""},
    +		{"SIOCDIFADDR", Const, 0, ""},
    +		{"SIOCDIFGROUP", Const, 0, ""},
    +		{"SIOCDIFPHYADDR", Const, 0, ""},
    +		{"SIOCDLIFADDR", Const, 0, ""},
    +		{"SIOCDRARP", Const, 0, ""},
    +		{"SIOCGARP", Const, 0, ""},
    +		{"SIOCGDRVSPEC", Const, 0, ""},
    +		{"SIOCGETKALIVE", Const, 1, ""},
    +		{"SIOCGETLABEL", Const, 1, ""},
    +		{"SIOCGETPFLOW", Const, 1, ""},
    +		{"SIOCGETPFSYNC", Const, 1, ""},
    +		{"SIOCGETSGCNT", Const, 0, ""},
    +		{"SIOCGETVIFCNT", Const, 0, ""},
    +		{"SIOCGETVLAN", Const, 0, ""},
    +		{"SIOCGHIWAT", Const, 0, ""},
    +		{"SIOCGIFADDR", Const, 0, ""},
    +		{"SIOCGIFADDRPREF", Const, 1, ""},
    +		{"SIOCGIFALIAS", Const, 1, ""},
    +		{"SIOCGIFALTMTU", Const, 0, ""},
    +		{"SIOCGIFASYNCMAP", Const, 0, ""},
    +		{"SIOCGIFBOND", Const, 0, ""},
    +		{"SIOCGIFBR", Const, 0, ""},
    +		{"SIOCGIFBRDADDR", Const, 0, ""},
    +		{"SIOCGIFCAP", Const, 0, ""},
    +		{"SIOCGIFCONF", Const, 0, ""},
    +		{"SIOCGIFCOUNT", Const, 0, ""},
    +		{"SIOCGIFDATA", Const, 1, ""},
    +		{"SIOCGIFDESCR", Const, 0, ""},
    +		{"SIOCGIFDEVMTU", Const, 0, ""},
    +		{"SIOCGIFDLT", Const, 1, ""},
    +		{"SIOCGIFDSTADDR", Const, 0, ""},
    +		{"SIOCGIFENCAP", Const, 0, ""},
    +		{"SIOCGIFFIB", Const, 1, ""},
    +		{"SIOCGIFFLAGS", Const, 0, ""},
    +		{"SIOCGIFGATTR", Const, 1, ""},
    +		{"SIOCGIFGENERIC", Const, 0, ""},
    +		{"SIOCGIFGMEMB", Const, 0, ""},
    +		{"SIOCGIFGROUP", Const, 0, ""},
    +		{"SIOCGIFHARDMTU", Const, 3, ""},
    +		{"SIOCGIFHWADDR", Const, 0, ""},
    +		{"SIOCGIFINDEX", Const, 0, ""},
    +		{"SIOCGIFKPI", Const, 0, ""},
    +		{"SIOCGIFMAC", Const, 0, ""},
    +		{"SIOCGIFMAP", Const, 0, ""},
    +		{"SIOCGIFMEDIA", Const, 0, ""},
    +		{"SIOCGIFMEM", Const, 0, ""},
    +		{"SIOCGIFMETRIC", Const, 0, ""},
    +		{"SIOCGIFMTU", Const, 0, ""},
    +		{"SIOCGIFNAME", Const, 0, ""},
    +		{"SIOCGIFNETMASK", Const, 0, ""},
    +		{"SIOCGIFPDSTADDR", Const, 0, ""},
    +		{"SIOCGIFPFLAGS", Const, 0, ""},
    +		{"SIOCGIFPHYS", Const, 0, ""},
    +		{"SIOCGIFPRIORITY", Const, 1, ""},
    +		{"SIOCGIFPSRCADDR", Const, 0, ""},
    +		{"SIOCGIFRDOMAIN", Const, 1, ""},
    +		{"SIOCGIFRTLABEL", Const, 1, ""},
    +		{"SIOCGIFSLAVE", Const, 0, ""},
    +		{"SIOCGIFSTATUS", Const, 0, ""},
    +		{"SIOCGIFTIMESLOT", Const, 1, ""},
    +		{"SIOCGIFTXQLEN", Const, 0, ""},
    +		{"SIOCGIFVLAN", Const, 0, ""},
    +		{"SIOCGIFWAKEFLAGS", Const, 0, ""},
    +		{"SIOCGIFXFLAGS", Const, 1, ""},
    +		{"SIOCGLIFADDR", Const, 0, ""},
    +		{"SIOCGLIFPHYADDR", Const, 0, ""},
    +		{"SIOCGLIFPHYRTABLE", Const, 1, ""},
    +		{"SIOCGLIFPHYTTL", Const, 3, ""},
    +		{"SIOCGLINKSTR", Const, 1, ""},
    +		{"SIOCGLOWAT", Const, 0, ""},
    +		{"SIOCGPGRP", Const, 0, ""},
    +		{"SIOCGPRIVATE_0", Const, 0, ""},
    +		{"SIOCGPRIVATE_1", Const, 0, ""},
    +		{"SIOCGRARP", Const, 0, ""},
    +		{"SIOCGSPPPPARAMS", Const, 3, ""},
    +		{"SIOCGSTAMP", Const, 0, ""},
    +		{"SIOCGSTAMPNS", Const, 0, ""},
    +		{"SIOCGVH", Const, 1, ""},
    +		{"SIOCGVNETID", Const, 3, ""},
    +		{"SIOCIFCREATE", Const, 0, ""},
    +		{"SIOCIFCREATE2", Const, 0, ""},
    +		{"SIOCIFDESTROY", Const, 0, ""},
    +		{"SIOCIFGCLONERS", Const, 0, ""},
    +		{"SIOCINITIFADDR", Const, 1, ""},
    +		{"SIOCPROTOPRIVATE", Const, 0, ""},
    +		{"SIOCRSLVMULTI", Const, 0, ""},
    +		{"SIOCRTMSG", Const, 0, ""},
    +		{"SIOCSARP", Const, 0, ""},
    +		{"SIOCSDRVSPEC", Const, 0, ""},
    +		{"SIOCSETKALIVE", Const, 1, ""},
    +		{"SIOCSETLABEL", Const, 1, ""},
    +		{"SIOCSETPFLOW", Const, 1, ""},
    +		{"SIOCSETPFSYNC", Const, 1, ""},
    +		{"SIOCSETVLAN", Const, 0, ""},
    +		{"SIOCSHIWAT", Const, 0, ""},
    +		{"SIOCSIFADDR", Const, 0, ""},
    +		{"SIOCSIFADDRPREF", Const, 1, ""},
    +		{"SIOCSIFALTMTU", Const, 0, ""},
    +		{"SIOCSIFASYNCMAP", Const, 0, ""},
    +		{"SIOCSIFBOND", Const, 0, ""},
    +		{"SIOCSIFBR", Const, 0, ""},
    +		{"SIOCSIFBRDADDR", Const, 0, ""},
    +		{"SIOCSIFCAP", Const, 0, ""},
    +		{"SIOCSIFDESCR", Const, 0, ""},
    +		{"SIOCSIFDSTADDR", Const, 0, ""},
    +		{"SIOCSIFENCAP", Const, 0, ""},
    +		{"SIOCSIFFIB", Const, 1, ""},
    +		{"SIOCSIFFLAGS", Const, 0, ""},
    +		{"SIOCSIFGATTR", Const, 1, ""},
    +		{"SIOCSIFGENERIC", Const, 0, ""},
    +		{"SIOCSIFHWADDR", Const, 0, ""},
    +		{"SIOCSIFHWBROADCAST", Const, 0, ""},
    +		{"SIOCSIFKPI", Const, 0, ""},
    +		{"SIOCSIFLINK", Const, 0, ""},
    +		{"SIOCSIFLLADDR", Const, 0, ""},
    +		{"SIOCSIFMAC", Const, 0, ""},
    +		{"SIOCSIFMAP", Const, 0, ""},
    +		{"SIOCSIFMEDIA", Const, 0, ""},
    +		{"SIOCSIFMEM", Const, 0, ""},
    +		{"SIOCSIFMETRIC", Const, 0, ""},
    +		{"SIOCSIFMTU", Const, 0, ""},
    +		{"SIOCSIFNAME", Const, 0, ""},
    +		{"SIOCSIFNETMASK", Const, 0, ""},
    +		{"SIOCSIFPFLAGS", Const, 0, ""},
    +		{"SIOCSIFPHYADDR", Const, 0, ""},
    +		{"SIOCSIFPHYS", Const, 0, ""},
    +		{"SIOCSIFPRIORITY", Const, 1, ""},
    +		{"SIOCSIFRDOMAIN", Const, 1, ""},
    +		{"SIOCSIFRTLABEL", Const, 1, ""},
    +		{"SIOCSIFRVNET", Const, 0, ""},
    +		{"SIOCSIFSLAVE", Const, 0, ""},
    +		{"SIOCSIFTIMESLOT", Const, 1, ""},
    +		{"SIOCSIFTXQLEN", Const, 0, ""},
    +		{"SIOCSIFVLAN", Const, 0, ""},
    +		{"SIOCSIFVNET", Const, 0, ""},
    +		{"SIOCSIFXFLAGS", Const, 1, ""},
    +		{"SIOCSLIFPHYADDR", Const, 0, ""},
    +		{"SIOCSLIFPHYRTABLE", Const, 1, ""},
    +		{"SIOCSLIFPHYTTL", Const, 3, ""},
    +		{"SIOCSLINKSTR", Const, 1, ""},
    +		{"SIOCSLOWAT", Const, 0, ""},
    +		{"SIOCSPGRP", Const, 0, ""},
    +		{"SIOCSRARP", Const, 0, ""},
    +		{"SIOCSSPPPPARAMS", Const, 3, ""},
    +		{"SIOCSVH", Const, 1, ""},
    +		{"SIOCSVNETID", Const, 3, ""},
    +		{"SIOCZIFDATA", Const, 1, ""},
    +		{"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""},
    +		{"SIO_GET_INTERFACE_LIST", Const, 0, ""},
    +		{"SIO_KEEPALIVE_VALS", Const, 3, ""},
    +		{"SIO_UDP_CONNRESET", Const, 4, ""},
    +		{"SOCK_CLOEXEC", Const, 0, ""},
    +		{"SOCK_DCCP", Const, 0, ""},
    +		{"SOCK_DGRAM", Const, 0, ""},
    +		{"SOCK_FLAGS_MASK", Const, 1, ""},
    +		{"SOCK_MAXADDRLEN", Const, 0, ""},
    +		{"SOCK_NONBLOCK", Const, 0, ""},
    +		{"SOCK_NOSIGPIPE", Const, 1, ""},
    +		{"SOCK_PACKET", Const, 0, ""},
    +		{"SOCK_RAW", Const, 0, ""},
    +		{"SOCK_RDM", Const, 0, ""},
    +		{"SOCK_SEQPACKET", Const, 0, ""},
    +		{"SOCK_STREAM", Const, 0, ""},
    +		{"SOL_AAL", Const, 0, ""},
    +		{"SOL_ATM", Const, 0, ""},
    +		{"SOL_DECNET", Const, 0, ""},
    +		{"SOL_ICMPV6", Const, 0, ""},
    +		{"SOL_IP", Const, 0, ""},
    +		{"SOL_IPV6", Const, 0, ""},
    +		{"SOL_IRDA", Const, 0, ""},
    +		{"SOL_PACKET", Const, 0, ""},
    +		{"SOL_RAW", Const, 0, ""},
    +		{"SOL_SOCKET", Const, 0, ""},
    +		{"SOL_TCP", Const, 0, ""},
    +		{"SOL_X25", Const, 0, ""},
    +		{"SOMAXCONN", Const, 0, ""},
    +		{"SO_ACCEPTCONN", Const, 0, ""},
    +		{"SO_ACCEPTFILTER", Const, 0, ""},
    +		{"SO_ATTACH_FILTER", Const, 0, ""},
    +		{"SO_BINDANY", Const, 1, ""},
    +		{"SO_BINDTODEVICE", Const, 0, ""},
    +		{"SO_BINTIME", Const, 0, ""},
    +		{"SO_BROADCAST", Const, 0, ""},
    +		{"SO_BSDCOMPAT", Const, 0, ""},
    +		{"SO_DEBUG", Const, 0, ""},
    +		{"SO_DETACH_FILTER", Const, 0, ""},
    +		{"SO_DOMAIN", Const, 0, ""},
    +		{"SO_DONTROUTE", Const, 0, ""},
    +		{"SO_DONTTRUNC", Const, 0, ""},
    +		{"SO_ERROR", Const, 0, ""},
    +		{"SO_KEEPALIVE", Const, 0, ""},
    +		{"SO_LABEL", Const, 0, ""},
    +		{"SO_LINGER", Const, 0, ""},
    +		{"SO_LINGER_SEC", Const, 0, ""},
    +		{"SO_LISTENINCQLEN", Const, 0, ""},
    +		{"SO_LISTENQLEN", Const, 0, ""},
    +		{"SO_LISTENQLIMIT", Const, 0, ""},
    +		{"SO_MARK", Const, 0, ""},
    +		{"SO_NETPROC", Const, 1, ""},
    +		{"SO_NKE", Const, 0, ""},
    +		{"SO_NOADDRERR", Const, 0, ""},
    +		{"SO_NOHEADER", Const, 1, ""},
    +		{"SO_NOSIGPIPE", Const, 0, ""},
    +		{"SO_NOTIFYCONFLICT", Const, 0, ""},
    +		{"SO_NO_CHECK", Const, 0, ""},
    +		{"SO_NO_DDP", Const, 0, ""},
    +		{"SO_NO_OFFLOAD", Const, 0, ""},
    +		{"SO_NP_EXTENSIONS", Const, 0, ""},
    +		{"SO_NREAD", Const, 0, ""},
    +		{"SO_NUMRCVPKT", Const, 16, ""},
    +		{"SO_NWRITE", Const, 0, ""},
    +		{"SO_OOBINLINE", Const, 0, ""},
    +		{"SO_OVERFLOWED", Const, 1, ""},
    +		{"SO_PASSCRED", Const, 0, ""},
    +		{"SO_PASSSEC", Const, 0, ""},
    +		{"SO_PEERCRED", Const, 0, ""},
    +		{"SO_PEERLABEL", Const, 0, ""},
    +		{"SO_PEERNAME", Const, 0, ""},
    +		{"SO_PEERSEC", Const, 0, ""},
    +		{"SO_PRIORITY", Const, 0, ""},
    +		{"SO_PROTOCOL", Const, 0, ""},
    +		{"SO_PROTOTYPE", Const, 1, ""},
    +		{"SO_RANDOMPORT", Const, 0, ""},
    +		{"SO_RCVBUF", Const, 0, ""},
    +		{"SO_RCVBUFFORCE", Const, 0, ""},
    +		{"SO_RCVLOWAT", Const, 0, ""},
    +		{"SO_RCVTIMEO", Const, 0, ""},
    +		{"SO_RESTRICTIONS", Const, 0, ""},
    +		{"SO_RESTRICT_DENYIN", Const, 0, ""},
    +		{"SO_RESTRICT_DENYOUT", Const, 0, ""},
    +		{"SO_RESTRICT_DENYSET", Const, 0, ""},
    +		{"SO_REUSEADDR", Const, 0, ""},
    +		{"SO_REUSEPORT", Const, 0, ""},
    +		{"SO_REUSESHAREUID", Const, 0, ""},
    +		{"SO_RTABLE", Const, 1, ""},
    +		{"SO_RXQ_OVFL", Const, 0, ""},
    +		{"SO_SECURITY_AUTHENTICATION", Const, 0, ""},
    +		{"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""},
    +		{"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""},
    +		{"SO_SETFIB", Const, 0, ""},
    +		{"SO_SNDBUF", Const, 0, ""},
    +		{"SO_SNDBUFFORCE", Const, 0, ""},
    +		{"SO_SNDLOWAT", Const, 0, ""},
    +		{"SO_SNDTIMEO", Const, 0, ""},
    +		{"SO_SPLICE", Const, 1, ""},
    +		{"SO_TIMESTAMP", Const, 0, ""},
    +		{"SO_TIMESTAMPING", Const, 0, ""},
    +		{"SO_TIMESTAMPNS", Const, 0, ""},
    +		{"SO_TIMESTAMP_MONOTONIC", Const, 0, ""},
    +		{"SO_TYPE", Const, 0, ""},
    +		{"SO_UPCALLCLOSEWAIT", Const, 0, ""},
    +		{"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""},
    +		{"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""},
    +		{"SO_USELOOPBACK", Const, 0, ""},
    +		{"SO_USER_COOKIE", Const, 1, ""},
    +		{"SO_VENDOR", Const, 3, ""},
    +		{"SO_WANTMORE", Const, 0, ""},
    +		{"SO_WANTOOBFLAG", Const, 0, ""},
    +		{"SSLExtraCertChainPolicyPara", Type, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""},
    +		{"SSLExtraCertChainPolicyPara.Size", Field, 0, ""},
    +		{"STANDARD_RIGHTS_ALL", Const, 0, ""},
    +		{"STANDARD_RIGHTS_EXECUTE", Const, 0, ""},
    +		{"STANDARD_RIGHTS_READ", Const, 0, ""},
    +		{"STANDARD_RIGHTS_REQUIRED", Const, 0, ""},
    +		{"STANDARD_RIGHTS_WRITE", Const, 0, ""},
    +		{"STARTF_USESHOWWINDOW", Const, 0, ""},
    +		{"STARTF_USESTDHANDLES", Const, 0, ""},
    +		{"STD_ERROR_HANDLE", Const, 0, ""},
    +		{"STD_INPUT_HANDLE", Const, 0, ""},
    +		{"STD_OUTPUT_HANDLE", Const, 0, ""},
    +		{"SUBLANG_ENGLISH_US", Const, 0, ""},
    +		{"SW_FORCEMINIMIZE", Const, 0, ""},
    +		{"SW_HIDE", Const, 0, ""},
    +		{"SW_MAXIMIZE", Const, 0, ""},
    +		{"SW_MINIMIZE", Const, 0, ""},
    +		{"SW_NORMAL", Const, 0, ""},
    +		{"SW_RESTORE", Const, 0, ""},
    +		{"SW_SHOW", Const, 0, ""},
    +		{"SW_SHOWDEFAULT", Const, 0, ""},
    +		{"SW_SHOWMAXIMIZED", Const, 0, ""},
    +		{"SW_SHOWMINIMIZED", Const, 0, ""},
    +		{"SW_SHOWMINNOACTIVE", Const, 0, ""},
    +		{"SW_SHOWNA", Const, 0, ""},
    +		{"SW_SHOWNOACTIVATE", Const, 0, ""},
    +		{"SW_SHOWNORMAL", Const, 0, ""},
    +		{"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""},
    +		{"SYNCHRONIZE", Const, 0, ""},
    +		{"SYSCTL_VERSION", Const, 1, ""},
    +		{"SYSCTL_VERS_0", Const, 1, ""},
    +		{"SYSCTL_VERS_1", Const, 1, ""},
    +		{"SYSCTL_VERS_MASK", Const, 1, ""},
    +		{"SYS_ABORT2", Const, 0, ""},
    +		{"SYS_ACCEPT", Const, 0, ""},
    +		{"SYS_ACCEPT4", Const, 0, ""},
    +		{"SYS_ACCEPT_NOCANCEL", Const, 0, ""},
    +		{"SYS_ACCESS", Const, 0, ""},
    +		{"SYS_ACCESS_EXTENDED", Const, 0, ""},
    +		{"SYS_ACCT", Const, 0, ""},
    +		{"SYS_ADD_KEY", Const, 0, ""},
    +		{"SYS_ADD_PROFIL", Const, 0, ""},
    +		{"SYS_ADJFREQ", Const, 1, ""},
    +		{"SYS_ADJTIME", Const, 0, ""},
    +		{"SYS_ADJTIMEX", Const, 0, ""},
    +		{"SYS_AFS_SYSCALL", Const, 0, ""},
    +		{"SYS_AIO_CANCEL", Const, 0, ""},
    +		{"SYS_AIO_ERROR", Const, 0, ""},
    +		{"SYS_AIO_FSYNC", Const, 0, ""},
    +		{"SYS_AIO_MLOCK", Const, 14, ""},
    +		{"SYS_AIO_READ", Const, 0, ""},
    +		{"SYS_AIO_RETURN", Const, 0, ""},
    +		{"SYS_AIO_SUSPEND", Const, 0, ""},
    +		{"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""},
    +		{"SYS_AIO_WAITCOMPLETE", Const, 14, ""},
    +		{"SYS_AIO_WRITE", Const, 0, ""},
    +		{"SYS_ALARM", Const, 0, ""},
    +		{"SYS_ARCH_PRCTL", Const, 0, ""},
    +		{"SYS_ARM_FADVISE64_64", Const, 0, ""},
    +		{"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""},
    +		{"SYS_ATGETMSG", Const, 0, ""},
    +		{"SYS_ATPGETREQ", Const, 0, ""},
    +		{"SYS_ATPGETRSP", Const, 0, ""},
    +		{"SYS_ATPSNDREQ", Const, 0, ""},
    +		{"SYS_ATPSNDRSP", Const, 0, ""},
    +		{"SYS_ATPUTMSG", Const, 0, ""},
    +		{"SYS_ATSOCKET", Const, 0, ""},
    +		{"SYS_AUDIT", Const, 0, ""},
    +		{"SYS_AUDITCTL", Const, 0, ""},
    +		{"SYS_AUDITON", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_JOIN", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_PORT", Const, 0, ""},
    +		{"SYS_AUDIT_SESSION_SELF", Const, 0, ""},
    +		{"SYS_BDFLUSH", Const, 0, ""},
    +		{"SYS_BIND", Const, 0, ""},
    +		{"SYS_BINDAT", Const, 3, ""},
    +		{"SYS_BREAK", Const, 0, ""},
    +		{"SYS_BRK", Const, 0, ""},
    +		{"SYS_BSDTHREAD_CREATE", Const, 0, ""},
    +		{"SYS_BSDTHREAD_REGISTER", Const, 0, ""},
    +		{"SYS_BSDTHREAD_TERMINATE", Const, 0, ""},
    +		{"SYS_CAPGET", Const, 0, ""},
    +		{"SYS_CAPSET", Const, 0, ""},
    +		{"SYS_CAP_ENTER", Const, 0, ""},
    +		{"SYS_CAP_FCNTLS_GET", Const, 1, ""},
    +		{"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""},
    +		{"SYS_CAP_GETMODE", Const, 0, ""},
    +		{"SYS_CAP_GETRIGHTS", Const, 0, ""},
    +		{"SYS_CAP_IOCTLS_GET", Const, 1, ""},
    +		{"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""},
    +		{"SYS_CAP_NEW", Const, 0, ""},
    +		{"SYS_CAP_RIGHTS_GET", Const, 1, ""},
    +		{"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""},
    +		{"SYS_CHDIR", Const, 0, ""},
    +		{"SYS_CHFLAGS", Const, 0, ""},
    +		{"SYS_CHFLAGSAT", Const, 3, ""},
    +		{"SYS_CHMOD", Const, 0, ""},
    +		{"SYS_CHMOD_EXTENDED", Const, 0, ""},
    +		{"SYS_CHOWN", Const, 0, ""},
    +		{"SYS_CHOWN32", Const, 0, ""},
    +		{"SYS_CHROOT", Const, 0, ""},
    +		{"SYS_CHUD", Const, 0, ""},
    +		{"SYS_CLOCK_ADJTIME", Const, 0, ""},
    +		{"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""},
    +		{"SYS_CLOCK_GETRES", Const, 0, ""},
    +		{"SYS_CLOCK_GETTIME", Const, 0, ""},
    +		{"SYS_CLOCK_NANOSLEEP", Const, 0, ""},
    +		{"SYS_CLOCK_SETTIME", Const, 0, ""},
    +		{"SYS_CLONE", Const, 0, ""},
    +		{"SYS_CLOSE", Const, 0, ""},
    +		{"SYS_CLOSEFROM", Const, 0, ""},
    +		{"SYS_CLOSE_NOCANCEL", Const, 0, ""},
    +		{"SYS_CONNECT", Const, 0, ""},
    +		{"SYS_CONNECTAT", Const, 3, ""},
    +		{"SYS_CONNECT_NOCANCEL", Const, 0, ""},
    +		{"SYS_COPYFILE", Const, 0, ""},
    +		{"SYS_CPUSET", Const, 0, ""},
    +		{"SYS_CPUSET_GETAFFINITY", Const, 0, ""},
    +		{"SYS_CPUSET_GETID", Const, 0, ""},
    +		{"SYS_CPUSET_SETAFFINITY", Const, 0, ""},
    +		{"SYS_CPUSET_SETID", Const, 0, ""},
    +		{"SYS_CREAT", Const, 0, ""},
    +		{"SYS_CREATE_MODULE", Const, 0, ""},
    +		{"SYS_CSOPS", Const, 0, ""},
    +		{"SYS_CSOPS_AUDITTOKEN", Const, 16, ""},
    +		{"SYS_DELETE", Const, 0, ""},
    +		{"SYS_DELETE_MODULE", Const, 0, ""},
    +		{"SYS_DUP", Const, 0, ""},
    +		{"SYS_DUP2", Const, 0, ""},
    +		{"SYS_DUP3", Const, 0, ""},
    +		{"SYS_EACCESS", Const, 0, ""},
    +		{"SYS_EPOLL_CREATE", Const, 0, ""},
    +		{"SYS_EPOLL_CREATE1", Const, 0, ""},
    +		{"SYS_EPOLL_CTL", Const, 0, ""},
    +		{"SYS_EPOLL_CTL_OLD", Const, 0, ""},
    +		{"SYS_EPOLL_PWAIT", Const, 0, ""},
    +		{"SYS_EPOLL_WAIT", Const, 0, ""},
    +		{"SYS_EPOLL_WAIT_OLD", Const, 0, ""},
    +		{"SYS_EVENTFD", Const, 0, ""},
    +		{"SYS_EVENTFD2", Const, 0, ""},
    +		{"SYS_EXCHANGEDATA", Const, 0, ""},
    +		{"SYS_EXECVE", Const, 0, ""},
    +		{"SYS_EXIT", Const, 0, ""},
    +		{"SYS_EXIT_GROUP", Const, 0, ""},
    +		{"SYS_EXTATTRCTL", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_DELETE_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_GET_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_LIST_LINK", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_FD", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_FILE", Const, 0, ""},
    +		{"SYS_EXTATTR_SET_LINK", Const, 0, ""},
    +		{"SYS_FACCESSAT", Const, 0, ""},
    +		{"SYS_FADVISE64", Const, 0, ""},
    +		{"SYS_FADVISE64_64", Const, 0, ""},
    +		{"SYS_FALLOCATE", Const, 0, ""},
    +		{"SYS_FANOTIFY_INIT", Const, 0, ""},
    +		{"SYS_FANOTIFY_MARK", Const, 0, ""},
    +		{"SYS_FCHDIR", Const, 0, ""},
    +		{"SYS_FCHFLAGS", Const, 0, ""},
    +		{"SYS_FCHMOD", Const, 0, ""},
    +		{"SYS_FCHMODAT", Const, 0, ""},
    +		{"SYS_FCHMOD_EXTENDED", Const, 0, ""},
    +		{"SYS_FCHOWN", Const, 0, ""},
    +		{"SYS_FCHOWN32", Const, 0, ""},
    +		{"SYS_FCHOWNAT", Const, 0, ""},
    +		{"SYS_FCHROOT", Const, 1, ""},
    +		{"SYS_FCNTL", Const, 0, ""},
    +		{"SYS_FCNTL64", Const, 0, ""},
    +		{"SYS_FCNTL_NOCANCEL", Const, 0, ""},
    +		{"SYS_FDATASYNC", Const, 0, ""},
    +		{"SYS_FEXECVE", Const, 0, ""},
    +		{"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""},
    +		{"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""},
    +		{"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""},
    +		{"SYS_FFSCTL", Const, 0, ""},
    +		{"SYS_FGETATTRLIST", Const, 0, ""},
    +		{"SYS_FGETXATTR", Const, 0, ""},
    +		{"SYS_FHOPEN", Const, 0, ""},
    +		{"SYS_FHSTAT", Const, 0, ""},
    +		{"SYS_FHSTATFS", Const, 0, ""},
    +		{"SYS_FILEPORT_MAKEFD", Const, 0, ""},
    +		{"SYS_FILEPORT_MAKEPORT", Const, 0, ""},
    +		{"SYS_FKTRACE", Const, 1, ""},
    +		{"SYS_FLISTXATTR", Const, 0, ""},
    +		{"SYS_FLOCK", Const, 0, ""},
    +		{"SYS_FORK", Const, 0, ""},
    +		{"SYS_FPATHCONF", Const, 0, ""},
    +		{"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""},
    +		{"SYS_FREEBSD6_LSEEK", Const, 0, ""},
    +		{"SYS_FREEBSD6_MMAP", Const, 0, ""},
    +		{"SYS_FREEBSD6_PREAD", Const, 0, ""},
    +		{"SYS_FREEBSD6_PWRITE", Const, 0, ""},
    +		{"SYS_FREEBSD6_TRUNCATE", Const, 0, ""},
    +		{"SYS_FREMOVEXATTR", Const, 0, ""},
    +		{"SYS_FSCTL", Const, 0, ""},
    +		{"SYS_FSETATTRLIST", Const, 0, ""},
    +		{"SYS_FSETXATTR", Const, 0, ""},
    +		{"SYS_FSGETPATH", Const, 0, ""},
    +		{"SYS_FSTAT", Const, 0, ""},
    +		{"SYS_FSTAT64", Const, 0, ""},
    +		{"SYS_FSTAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_FSTATAT", Const, 0, ""},
    +		{"SYS_FSTATAT64", Const, 0, ""},
    +		{"SYS_FSTATFS", Const, 0, ""},
    +		{"SYS_FSTATFS64", Const, 0, ""},
    +		{"SYS_FSTATV", Const, 0, ""},
    +		{"SYS_FSTATVFS1", Const, 1, ""},
    +		{"SYS_FSTAT_EXTENDED", Const, 0, ""},
    +		{"SYS_FSYNC", Const, 0, ""},
    +		{"SYS_FSYNC_NOCANCEL", Const, 0, ""},
    +		{"SYS_FSYNC_RANGE", Const, 1, ""},
    +		{"SYS_FTIME", Const, 0, ""},
    +		{"SYS_FTRUNCATE", Const, 0, ""},
    +		{"SYS_FTRUNCATE64", Const, 0, ""},
    +		{"SYS_FUTEX", Const, 0, ""},
    +		{"SYS_FUTIMENS", Const, 1, ""},
    +		{"SYS_FUTIMES", Const, 0, ""},
    +		{"SYS_FUTIMESAT", Const, 0, ""},
    +		{"SYS_GETATTRLIST", Const, 0, ""},
    +		{"SYS_GETAUDIT", Const, 0, ""},
    +		{"SYS_GETAUDIT_ADDR", Const, 0, ""},
    +		{"SYS_GETAUID", Const, 0, ""},
    +		{"SYS_GETCONTEXT", Const, 0, ""},
    +		{"SYS_GETCPU", Const, 0, ""},
    +		{"SYS_GETCWD", Const, 0, ""},
    +		{"SYS_GETDENTS", Const, 0, ""},
    +		{"SYS_GETDENTS64", Const, 0, ""},
    +		{"SYS_GETDIRENTRIES", Const, 0, ""},
    +		{"SYS_GETDIRENTRIES64", Const, 0, ""},
    +		{"SYS_GETDIRENTRIESATTR", Const, 0, ""},
    +		{"SYS_GETDTABLECOUNT", Const, 1, ""},
    +		{"SYS_GETDTABLESIZE", Const, 0, ""},
    +		{"SYS_GETEGID", Const, 0, ""},
    +		{"SYS_GETEGID32", Const, 0, ""},
    +		{"SYS_GETEUID", Const, 0, ""},
    +		{"SYS_GETEUID32", Const, 0, ""},
    +		{"SYS_GETFH", Const, 0, ""},
    +		{"SYS_GETFSSTAT", Const, 0, ""},
    +		{"SYS_GETFSSTAT64", Const, 0, ""},
    +		{"SYS_GETGID", Const, 0, ""},
    +		{"SYS_GETGID32", Const, 0, ""},
    +		{"SYS_GETGROUPS", Const, 0, ""},
    +		{"SYS_GETGROUPS32", Const, 0, ""},
    +		{"SYS_GETHOSTUUID", Const, 0, ""},
    +		{"SYS_GETITIMER", Const, 0, ""},
    +		{"SYS_GETLCID", Const, 0, ""},
    +		{"SYS_GETLOGIN", Const, 0, ""},
    +		{"SYS_GETLOGINCLASS", Const, 0, ""},
    +		{"SYS_GETPEERNAME", Const, 0, ""},
    +		{"SYS_GETPGID", Const, 0, ""},
    +		{"SYS_GETPGRP", Const, 0, ""},
    +		{"SYS_GETPID", Const, 0, ""},
    +		{"SYS_GETPMSG", Const, 0, ""},
    +		{"SYS_GETPPID", Const, 0, ""},
    +		{"SYS_GETPRIORITY", Const, 0, ""},
    +		{"SYS_GETRESGID", Const, 0, ""},
    +		{"SYS_GETRESGID32", Const, 0, ""},
    +		{"SYS_GETRESUID", Const, 0, ""},
    +		{"SYS_GETRESUID32", Const, 0, ""},
    +		{"SYS_GETRLIMIT", Const, 0, ""},
    +		{"SYS_GETRTABLE", Const, 1, ""},
    +		{"SYS_GETRUSAGE", Const, 0, ""},
    +		{"SYS_GETSGROUPS", Const, 0, ""},
    +		{"SYS_GETSID", Const, 0, ""},
    +		{"SYS_GETSOCKNAME", Const, 0, ""},
    +		{"SYS_GETSOCKOPT", Const, 0, ""},
    +		{"SYS_GETTHRID", Const, 1, ""},
    +		{"SYS_GETTID", Const, 0, ""},
    +		{"SYS_GETTIMEOFDAY", Const, 0, ""},
    +		{"SYS_GETUID", Const, 0, ""},
    +		{"SYS_GETUID32", Const, 0, ""},
    +		{"SYS_GETVFSSTAT", Const, 1, ""},
    +		{"SYS_GETWGROUPS", Const, 0, ""},
    +		{"SYS_GETXATTR", Const, 0, ""},
    +		{"SYS_GET_KERNEL_SYMS", Const, 0, ""},
    +		{"SYS_GET_MEMPOLICY", Const, 0, ""},
    +		{"SYS_GET_ROBUST_LIST", Const, 0, ""},
    +		{"SYS_GET_THREAD_AREA", Const, 0, ""},
    +		{"SYS_GSSD_SYSCALL", Const, 14, ""},
    +		{"SYS_GTTY", Const, 0, ""},
    +		{"SYS_IDENTITYSVC", Const, 0, ""},
    +		{"SYS_IDLE", Const, 0, ""},
    +		{"SYS_INITGROUPS", Const, 0, ""},
    +		{"SYS_INIT_MODULE", Const, 0, ""},
    +		{"SYS_INOTIFY_ADD_WATCH", Const, 0, ""},
    +		{"SYS_INOTIFY_INIT", Const, 0, ""},
    +		{"SYS_INOTIFY_INIT1", Const, 0, ""},
    +		{"SYS_INOTIFY_RM_WATCH", Const, 0, ""},
    +		{"SYS_IOCTL", Const, 0, ""},
    +		{"SYS_IOPERM", Const, 0, ""},
    +		{"SYS_IOPL", Const, 0, ""},
    +		{"SYS_IOPOLICYSYS", Const, 0, ""},
    +		{"SYS_IOPRIO_GET", Const, 0, ""},
    +		{"SYS_IOPRIO_SET", Const, 0, ""},
    +		{"SYS_IO_CANCEL", Const, 0, ""},
    +		{"SYS_IO_DESTROY", Const, 0, ""},
    +		{"SYS_IO_GETEVENTS", Const, 0, ""},
    +		{"SYS_IO_SETUP", Const, 0, ""},
    +		{"SYS_IO_SUBMIT", Const, 0, ""},
    +		{"SYS_IPC", Const, 0, ""},
    +		{"SYS_ISSETUGID", Const, 0, ""},
    +		{"SYS_JAIL", Const, 0, ""},
    +		{"SYS_JAIL_ATTACH", Const, 0, ""},
    +		{"SYS_JAIL_GET", Const, 0, ""},
    +		{"SYS_JAIL_REMOVE", Const, 0, ""},
    +		{"SYS_JAIL_SET", Const, 0, ""},
    +		{"SYS_KAS_INFO", Const, 16, ""},
    +		{"SYS_KDEBUG_TRACE", Const, 0, ""},
    +		{"SYS_KENV", Const, 0, ""},
    +		{"SYS_KEVENT", Const, 0, ""},
    +		{"SYS_KEVENT64", Const, 0, ""},
    +		{"SYS_KEXEC_LOAD", Const, 0, ""},
    +		{"SYS_KEYCTL", Const, 0, ""},
    +		{"SYS_KILL", Const, 0, ""},
    +		{"SYS_KLDFIND", Const, 0, ""},
    +		{"SYS_KLDFIRSTMOD", Const, 0, ""},
    +		{"SYS_KLDLOAD", Const, 0, ""},
    +		{"SYS_KLDNEXT", Const, 0, ""},
    +		{"SYS_KLDSTAT", Const, 0, ""},
    +		{"SYS_KLDSYM", Const, 0, ""},
    +		{"SYS_KLDUNLOAD", Const, 0, ""},
    +		{"SYS_KLDUNLOADF", Const, 0, ""},
    +		{"SYS_KMQ_NOTIFY", Const, 14, ""},
    +		{"SYS_KMQ_OPEN", Const, 14, ""},
    +		{"SYS_KMQ_SETATTR", Const, 14, ""},
    +		{"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""},
    +		{"SYS_KMQ_TIMEDSEND", Const, 14, ""},
    +		{"SYS_KMQ_UNLINK", Const, 14, ""},
    +		{"SYS_KQUEUE", Const, 0, ""},
    +		{"SYS_KQUEUE1", Const, 1, ""},
    +		{"SYS_KSEM_CLOSE", Const, 14, ""},
    +		{"SYS_KSEM_DESTROY", Const, 14, ""},
    +		{"SYS_KSEM_GETVALUE", Const, 14, ""},
    +		{"SYS_KSEM_INIT", Const, 14, ""},
    +		{"SYS_KSEM_OPEN", Const, 14, ""},
    +		{"SYS_KSEM_POST", Const, 14, ""},
    +		{"SYS_KSEM_TIMEDWAIT", Const, 14, ""},
    +		{"SYS_KSEM_TRYWAIT", Const, 14, ""},
    +		{"SYS_KSEM_UNLINK", Const, 14, ""},
    +		{"SYS_KSEM_WAIT", Const, 14, ""},
    +		{"SYS_KTIMER_CREATE", Const, 0, ""},
    +		{"SYS_KTIMER_DELETE", Const, 0, ""},
    +		{"SYS_KTIMER_GETOVERRUN", Const, 0, ""},
    +		{"SYS_KTIMER_GETTIME", Const, 0, ""},
    +		{"SYS_KTIMER_SETTIME", Const, 0, ""},
    +		{"SYS_KTRACE", Const, 0, ""},
    +		{"SYS_LCHFLAGS", Const, 0, ""},
    +		{"SYS_LCHMOD", Const, 0, ""},
    +		{"SYS_LCHOWN", Const, 0, ""},
    +		{"SYS_LCHOWN32", Const, 0, ""},
    +		{"SYS_LEDGER", Const, 16, ""},
    +		{"SYS_LGETFH", Const, 0, ""},
    +		{"SYS_LGETXATTR", Const, 0, ""},
    +		{"SYS_LINK", Const, 0, ""},
    +		{"SYS_LINKAT", Const, 0, ""},
    +		{"SYS_LIO_LISTIO", Const, 0, ""},
    +		{"SYS_LISTEN", Const, 0, ""},
    +		{"SYS_LISTXATTR", Const, 0, ""},
    +		{"SYS_LLISTXATTR", Const, 0, ""},
    +		{"SYS_LOCK", Const, 0, ""},
    +		{"SYS_LOOKUP_DCOOKIE", Const, 0, ""},
    +		{"SYS_LPATHCONF", Const, 0, ""},
    +		{"SYS_LREMOVEXATTR", Const, 0, ""},
    +		{"SYS_LSEEK", Const, 0, ""},
    +		{"SYS_LSETXATTR", Const, 0, ""},
    +		{"SYS_LSTAT", Const, 0, ""},
    +		{"SYS_LSTAT64", Const, 0, ""},
    +		{"SYS_LSTAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_LSTATV", Const, 0, ""},
    +		{"SYS_LSTAT_EXTENDED", Const, 0, ""},
    +		{"SYS_LUTIMES", Const, 0, ""},
    +		{"SYS_MAC_SYSCALL", Const, 0, ""},
    +		{"SYS_MADVISE", Const, 0, ""},
    +		{"SYS_MADVISE1", Const, 0, ""},
    +		{"SYS_MAXSYSCALL", Const, 0, ""},
    +		{"SYS_MBIND", Const, 0, ""},
    +		{"SYS_MIGRATE_PAGES", Const, 0, ""},
    +		{"SYS_MINCORE", Const, 0, ""},
    +		{"SYS_MINHERIT", Const, 0, ""},
    +		{"SYS_MKCOMPLEX", Const, 0, ""},
    +		{"SYS_MKDIR", Const, 0, ""},
    +		{"SYS_MKDIRAT", Const, 0, ""},
    +		{"SYS_MKDIR_EXTENDED", Const, 0, ""},
    +		{"SYS_MKFIFO", Const, 0, ""},
    +		{"SYS_MKFIFOAT", Const, 0, ""},
    +		{"SYS_MKFIFO_EXTENDED", Const, 0, ""},
    +		{"SYS_MKNOD", Const, 0, ""},
    +		{"SYS_MKNODAT", Const, 0, ""},
    +		{"SYS_MLOCK", Const, 0, ""},
    +		{"SYS_MLOCKALL", Const, 0, ""},
    +		{"SYS_MMAP", Const, 0, ""},
    +		{"SYS_MMAP2", Const, 0, ""},
    +		{"SYS_MODCTL", Const, 1, ""},
    +		{"SYS_MODFIND", Const, 0, ""},
    +		{"SYS_MODFNEXT", Const, 0, ""},
    +		{"SYS_MODIFY_LDT", Const, 0, ""},
    +		{"SYS_MODNEXT", Const, 0, ""},
    +		{"SYS_MODSTAT", Const, 0, ""},
    +		{"SYS_MODWATCH", Const, 0, ""},
    +		{"SYS_MOUNT", Const, 0, ""},
    +		{"SYS_MOVE_PAGES", Const, 0, ""},
    +		{"SYS_MPROTECT", Const, 0, ""},
    +		{"SYS_MPX", Const, 0, ""},
    +		{"SYS_MQUERY", Const, 1, ""},
    +		{"SYS_MQ_GETSETATTR", Const, 0, ""},
    +		{"SYS_MQ_NOTIFY", Const, 0, ""},
    +		{"SYS_MQ_OPEN", Const, 0, ""},
    +		{"SYS_MQ_TIMEDRECEIVE", Const, 0, ""},
    +		{"SYS_MQ_TIMEDSEND", Const, 0, ""},
    +		{"SYS_MQ_UNLINK", Const, 0, ""},
    +		{"SYS_MREMAP", Const, 0, ""},
    +		{"SYS_MSGCTL", Const, 0, ""},
    +		{"SYS_MSGGET", Const, 0, ""},
    +		{"SYS_MSGRCV", Const, 0, ""},
    +		{"SYS_MSGRCV_NOCANCEL", Const, 0, ""},
    +		{"SYS_MSGSND", Const, 0, ""},
    +		{"SYS_MSGSND_NOCANCEL", Const, 0, ""},
    +		{"SYS_MSGSYS", Const, 0, ""},
    +		{"SYS_MSYNC", Const, 0, ""},
    +		{"SYS_MSYNC_NOCANCEL", Const, 0, ""},
    +		{"SYS_MUNLOCK", Const, 0, ""},
    +		{"SYS_MUNLOCKALL", Const, 0, ""},
    +		{"SYS_MUNMAP", Const, 0, ""},
    +		{"SYS_NAME_TO_HANDLE_AT", Const, 0, ""},
    +		{"SYS_NANOSLEEP", Const, 0, ""},
    +		{"SYS_NEWFSTATAT", Const, 0, ""},
    +		{"SYS_NFSCLNT", Const, 0, ""},
    +		{"SYS_NFSSERVCTL", Const, 0, ""},
    +		{"SYS_NFSSVC", Const, 0, ""},
    +		{"SYS_NFSTAT", Const, 0, ""},
    +		{"SYS_NICE", Const, 0, ""},
    +		{"SYS_NLM_SYSCALL", Const, 14, ""},
    +		{"SYS_NLSTAT", Const, 0, ""},
    +		{"SYS_NMOUNT", Const, 0, ""},
    +		{"SYS_NSTAT", Const, 0, ""},
    +		{"SYS_NTP_ADJTIME", Const, 0, ""},
    +		{"SYS_NTP_GETTIME", Const, 0, ""},
    +		{"SYS_NUMA_GETAFFINITY", Const, 14, ""},
    +		{"SYS_NUMA_SETAFFINITY", Const, 14, ""},
    +		{"SYS_OABI_SYSCALL_BASE", Const, 0, ""},
    +		{"SYS_OBREAK", Const, 0, ""},
    +		{"SYS_OLDFSTAT", Const, 0, ""},
    +		{"SYS_OLDLSTAT", Const, 0, ""},
    +		{"SYS_OLDOLDUNAME", Const, 0, ""},
    +		{"SYS_OLDSTAT", Const, 0, ""},
    +		{"SYS_OLDUNAME", Const, 0, ""},
    +		{"SYS_OPEN", Const, 0, ""},
    +		{"SYS_OPENAT", Const, 0, ""},
    +		{"SYS_OPENBSD_POLL", Const, 0, ""},
    +		{"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""},
    +		{"SYS_OPEN_DPROTECTED_NP", Const, 16, ""},
    +		{"SYS_OPEN_EXTENDED", Const, 0, ""},
    +		{"SYS_OPEN_NOCANCEL", Const, 0, ""},
    +		{"SYS_OVADVISE", Const, 0, ""},
    +		{"SYS_PACCEPT", Const, 1, ""},
    +		{"SYS_PATHCONF", Const, 0, ""},
    +		{"SYS_PAUSE", Const, 0, ""},
    +		{"SYS_PCICONFIG_IOBASE", Const, 0, ""},
    +		{"SYS_PCICONFIG_READ", Const, 0, ""},
    +		{"SYS_PCICONFIG_WRITE", Const, 0, ""},
    +		{"SYS_PDFORK", Const, 0, ""},
    +		{"SYS_PDGETPID", Const, 0, ""},
    +		{"SYS_PDKILL", Const, 0, ""},
    +		{"SYS_PERF_EVENT_OPEN", Const, 0, ""},
    +		{"SYS_PERSONALITY", Const, 0, ""},
    +		{"SYS_PID_HIBERNATE", Const, 0, ""},
    +		{"SYS_PID_RESUME", Const, 0, ""},
    +		{"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""},
    +		{"SYS_PID_SUSPEND", Const, 0, ""},
    +		{"SYS_PIPE", Const, 0, ""},
    +		{"SYS_PIPE2", Const, 0, ""},
    +		{"SYS_PIVOT_ROOT", Const, 0, ""},
    +		{"SYS_PMC_CONTROL", Const, 1, ""},
    +		{"SYS_PMC_GET_INFO", Const, 1, ""},
    +		{"SYS_POLL", Const, 0, ""},
    +		{"SYS_POLLTS", Const, 1, ""},
    +		{"SYS_POLL_NOCANCEL", Const, 0, ""},
    +		{"SYS_POSIX_FADVISE", Const, 0, ""},
    +		{"SYS_POSIX_FALLOCATE", Const, 0, ""},
    +		{"SYS_POSIX_OPENPT", Const, 0, ""},
    +		{"SYS_POSIX_SPAWN", Const, 0, ""},
    +		{"SYS_PPOLL", Const, 0, ""},
    +		{"SYS_PRCTL", Const, 0, ""},
    +		{"SYS_PREAD", Const, 0, ""},
    +		{"SYS_PREAD64", Const, 0, ""},
    +		{"SYS_PREADV", Const, 0, ""},
    +		{"SYS_PREAD_NOCANCEL", Const, 0, ""},
    +		{"SYS_PRLIMIT64", Const, 0, ""},
    +		{"SYS_PROCCTL", Const, 3, ""},
    +		{"SYS_PROCESS_POLICY", Const, 0, ""},
    +		{"SYS_PROCESS_VM_READV", Const, 0, ""},
    +		{"SYS_PROCESS_VM_WRITEV", Const, 0, ""},
    +		{"SYS_PROC_INFO", Const, 0, ""},
    +		{"SYS_PROF", Const, 0, ""},
    +		{"SYS_PROFIL", Const, 0, ""},
    +		{"SYS_PSELECT", Const, 0, ""},
    +		{"SYS_PSELECT6", Const, 0, ""},
    +		{"SYS_PSET_ASSIGN", Const, 1, ""},
    +		{"SYS_PSET_CREATE", Const, 1, ""},
    +		{"SYS_PSET_DESTROY", Const, 1, ""},
    +		{"SYS_PSYNCH_CVBROAD", Const, 0, ""},
    +		{"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""},
    +		{"SYS_PSYNCH_CVSIGNAL", Const, 0, ""},
    +		{"SYS_PSYNCH_CVWAIT", Const, 0, ""},
    +		{"SYS_PSYNCH_MUTEXDROP", Const, 0, ""},
    +		{"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""},
    +		{"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""},
    +		{"SYS_PTRACE", Const, 0, ""},
    +		{"SYS_PUTPMSG", Const, 0, ""},
    +		{"SYS_PWRITE", Const, 0, ""},
    +		{"SYS_PWRITE64", Const, 0, ""},
    +		{"SYS_PWRITEV", Const, 0, ""},
    +		{"SYS_PWRITE_NOCANCEL", Const, 0, ""},
    +		{"SYS_QUERY_MODULE", Const, 0, ""},
    +		{"SYS_QUOTACTL", Const, 0, ""},
    +		{"SYS_RASCTL", Const, 1, ""},
    +		{"SYS_RCTL_ADD_RULE", Const, 0, ""},
    +		{"SYS_RCTL_GET_LIMITS", Const, 0, ""},
    +		{"SYS_RCTL_GET_RACCT", Const, 0, ""},
    +		{"SYS_RCTL_GET_RULES", Const, 0, ""},
    +		{"SYS_RCTL_REMOVE_RULE", Const, 0, ""},
    +		{"SYS_READ", Const, 0, ""},
    +		{"SYS_READAHEAD", Const, 0, ""},
    +		{"SYS_READDIR", Const, 0, ""},
    +		{"SYS_READLINK", Const, 0, ""},
    +		{"SYS_READLINKAT", Const, 0, ""},
    +		{"SYS_READV", Const, 0, ""},
    +		{"SYS_READV_NOCANCEL", Const, 0, ""},
    +		{"SYS_READ_NOCANCEL", Const, 0, ""},
    +		{"SYS_REBOOT", Const, 0, ""},
    +		{"SYS_RECV", Const, 0, ""},
    +		{"SYS_RECVFROM", Const, 0, ""},
    +		{"SYS_RECVFROM_NOCANCEL", Const, 0, ""},
    +		{"SYS_RECVMMSG", Const, 0, ""},
    +		{"SYS_RECVMSG", Const, 0, ""},
    +		{"SYS_RECVMSG_NOCANCEL", Const, 0, ""},
    +		{"SYS_REMAP_FILE_PAGES", Const, 0, ""},
    +		{"SYS_REMOVEXATTR", Const, 0, ""},
    +		{"SYS_RENAME", Const, 0, ""},
    +		{"SYS_RENAMEAT", Const, 0, ""},
    +		{"SYS_REQUEST_KEY", Const, 0, ""},
    +		{"SYS_RESTART_SYSCALL", Const, 0, ""},
    +		{"SYS_REVOKE", Const, 0, ""},
    +		{"SYS_RFORK", Const, 0, ""},
    +		{"SYS_RMDIR", Const, 0, ""},
    +		{"SYS_RTPRIO", Const, 0, ""},
    +		{"SYS_RTPRIO_THREAD", Const, 0, ""},
    +		{"SYS_RT_SIGACTION", Const, 0, ""},
    +		{"SYS_RT_SIGPENDING", Const, 0, ""},
    +		{"SYS_RT_SIGPROCMASK", Const, 0, ""},
    +		{"SYS_RT_SIGQUEUEINFO", Const, 0, ""},
    +		{"SYS_RT_SIGRETURN", Const, 0, ""},
    +		{"SYS_RT_SIGSUSPEND", Const, 0, ""},
    +		{"SYS_RT_SIGTIMEDWAIT", Const, 0, ""},
    +		{"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""},
    +		{"SYS_SBRK", Const, 0, ""},
    +		{"SYS_SCHED_GETAFFINITY", Const, 0, ""},
    +		{"SYS_SCHED_GETPARAM", Const, 0, ""},
    +		{"SYS_SCHED_GETSCHEDULER", Const, 0, ""},
    +		{"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""},
    +		{"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""},
    +		{"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""},
    +		{"SYS_SCHED_SETAFFINITY", Const, 0, ""},
    +		{"SYS_SCHED_SETPARAM", Const, 0, ""},
    +		{"SYS_SCHED_SETSCHEDULER", Const, 0, ""},
    +		{"SYS_SCHED_YIELD", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""},
    +		{"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""},
    +		{"SYS_SCTP_PEELOFF", Const, 0, ""},
    +		{"SYS_SEARCHFS", Const, 0, ""},
    +		{"SYS_SECURITY", Const, 0, ""},
    +		{"SYS_SELECT", Const, 0, ""},
    +		{"SYS_SELECT_NOCANCEL", Const, 0, ""},
    +		{"SYS_SEMCONFIG", Const, 1, ""},
    +		{"SYS_SEMCTL", Const, 0, ""},
    +		{"SYS_SEMGET", Const, 0, ""},
    +		{"SYS_SEMOP", Const, 0, ""},
    +		{"SYS_SEMSYS", Const, 0, ""},
    +		{"SYS_SEMTIMEDOP", Const, 0, ""},
    +		{"SYS_SEM_CLOSE", Const, 0, ""},
    +		{"SYS_SEM_DESTROY", Const, 0, ""},
    +		{"SYS_SEM_GETVALUE", Const, 0, ""},
    +		{"SYS_SEM_INIT", Const, 0, ""},
    +		{"SYS_SEM_OPEN", Const, 0, ""},
    +		{"SYS_SEM_POST", Const, 0, ""},
    +		{"SYS_SEM_TRYWAIT", Const, 0, ""},
    +		{"SYS_SEM_UNLINK", Const, 0, ""},
    +		{"SYS_SEM_WAIT", Const, 0, ""},
    +		{"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""},
    +		{"SYS_SEND", Const, 0, ""},
    +		{"SYS_SENDFILE", Const, 0, ""},
    +		{"SYS_SENDFILE64", Const, 0, ""},
    +		{"SYS_SENDMMSG", Const, 0, ""},
    +		{"SYS_SENDMSG", Const, 0, ""},
    +		{"SYS_SENDMSG_NOCANCEL", Const, 0, ""},
    +		{"SYS_SENDTO", Const, 0, ""},
    +		{"SYS_SENDTO_NOCANCEL", Const, 0, ""},
    +		{"SYS_SETATTRLIST", Const, 0, ""},
    +		{"SYS_SETAUDIT", Const, 0, ""},
    +		{"SYS_SETAUDIT_ADDR", Const, 0, ""},
    +		{"SYS_SETAUID", Const, 0, ""},
    +		{"SYS_SETCONTEXT", Const, 0, ""},
    +		{"SYS_SETDOMAINNAME", Const, 0, ""},
    +		{"SYS_SETEGID", Const, 0, ""},
    +		{"SYS_SETEUID", Const, 0, ""},
    +		{"SYS_SETFIB", Const, 0, ""},
    +		{"SYS_SETFSGID", Const, 0, ""},
    +		{"SYS_SETFSGID32", Const, 0, ""},
    +		{"SYS_SETFSUID", Const, 0, ""},
    +		{"SYS_SETFSUID32", Const, 0, ""},
    +		{"SYS_SETGID", Const, 0, ""},
    +		{"SYS_SETGID32", Const, 0, ""},
    +		{"SYS_SETGROUPS", Const, 0, ""},
    +		{"SYS_SETGROUPS32", Const, 0, ""},
    +		{"SYS_SETHOSTNAME", Const, 0, ""},
    +		{"SYS_SETITIMER", Const, 0, ""},
    +		{"SYS_SETLCID", Const, 0, ""},
    +		{"SYS_SETLOGIN", Const, 0, ""},
    +		{"SYS_SETLOGINCLASS", Const, 0, ""},
    +		{"SYS_SETNS", Const, 0, ""},
    +		{"SYS_SETPGID", Const, 0, ""},
    +		{"SYS_SETPRIORITY", Const, 0, ""},
    +		{"SYS_SETPRIVEXEC", Const, 0, ""},
    +		{"SYS_SETREGID", Const, 0, ""},
    +		{"SYS_SETREGID32", Const, 0, ""},
    +		{"SYS_SETRESGID", Const, 0, ""},
    +		{"SYS_SETRESGID32", Const, 0, ""},
    +		{"SYS_SETRESUID", Const, 0, ""},
    +		{"SYS_SETRESUID32", Const, 0, ""},
    +		{"SYS_SETREUID", Const, 0, ""},
    +		{"SYS_SETREUID32", Const, 0, ""},
    +		{"SYS_SETRLIMIT", Const, 0, ""},
    +		{"SYS_SETRTABLE", Const, 1, ""},
    +		{"SYS_SETSGROUPS", Const, 0, ""},
    +		{"SYS_SETSID", Const, 0, ""},
    +		{"SYS_SETSOCKOPT", Const, 0, ""},
    +		{"SYS_SETTID", Const, 0, ""},
    +		{"SYS_SETTID_WITH_PID", Const, 0, ""},
    +		{"SYS_SETTIMEOFDAY", Const, 0, ""},
    +		{"SYS_SETUID", Const, 0, ""},
    +		{"SYS_SETUID32", Const, 0, ""},
    +		{"SYS_SETWGROUPS", Const, 0, ""},
    +		{"SYS_SETXATTR", Const, 0, ""},
    +		{"SYS_SET_MEMPOLICY", Const, 0, ""},
    +		{"SYS_SET_ROBUST_LIST", Const, 0, ""},
    +		{"SYS_SET_THREAD_AREA", Const, 0, ""},
    +		{"SYS_SET_TID_ADDRESS", Const, 0, ""},
    +		{"SYS_SGETMASK", Const, 0, ""},
    +		{"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""},
    +		{"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""},
    +		{"SYS_SHMAT", Const, 0, ""},
    +		{"SYS_SHMCTL", Const, 0, ""},
    +		{"SYS_SHMDT", Const, 0, ""},
    +		{"SYS_SHMGET", Const, 0, ""},
    +		{"SYS_SHMSYS", Const, 0, ""},
    +		{"SYS_SHM_OPEN", Const, 0, ""},
    +		{"SYS_SHM_UNLINK", Const, 0, ""},
    +		{"SYS_SHUTDOWN", Const, 0, ""},
    +		{"SYS_SIGACTION", Const, 0, ""},
    +		{"SYS_SIGALTSTACK", Const, 0, ""},
    +		{"SYS_SIGNAL", Const, 0, ""},
    +		{"SYS_SIGNALFD", Const, 0, ""},
    +		{"SYS_SIGNALFD4", Const, 0, ""},
    +		{"SYS_SIGPENDING", Const, 0, ""},
    +		{"SYS_SIGPROCMASK", Const, 0, ""},
    +		{"SYS_SIGQUEUE", Const, 0, ""},
    +		{"SYS_SIGQUEUEINFO", Const, 1, ""},
    +		{"SYS_SIGRETURN", Const, 0, ""},
    +		{"SYS_SIGSUSPEND", Const, 0, ""},
    +		{"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""},
    +		{"SYS_SIGTIMEDWAIT", Const, 0, ""},
    +		{"SYS_SIGWAIT", Const, 0, ""},
    +		{"SYS_SIGWAITINFO", Const, 0, ""},
    +		{"SYS_SOCKET", Const, 0, ""},
    +		{"SYS_SOCKETCALL", Const, 0, ""},
    +		{"SYS_SOCKETPAIR", Const, 0, ""},
    +		{"SYS_SPLICE", Const, 0, ""},
    +		{"SYS_SSETMASK", Const, 0, ""},
    +		{"SYS_SSTK", Const, 0, ""},
    +		{"SYS_STACK_SNAPSHOT", Const, 0, ""},
    +		{"SYS_STAT", Const, 0, ""},
    +		{"SYS_STAT64", Const, 0, ""},
    +		{"SYS_STAT64_EXTENDED", Const, 0, ""},
    +		{"SYS_STATFS", Const, 0, ""},
    +		{"SYS_STATFS64", Const, 0, ""},
    +		{"SYS_STATV", Const, 0, ""},
    +		{"SYS_STATVFS1", Const, 1, ""},
    +		{"SYS_STAT_EXTENDED", Const, 0, ""},
    +		{"SYS_STIME", Const, 0, ""},
    +		{"SYS_STTY", Const, 0, ""},
    +		{"SYS_SWAPCONTEXT", Const, 0, ""},
    +		{"SYS_SWAPCTL", Const, 1, ""},
    +		{"SYS_SWAPOFF", Const, 0, ""},
    +		{"SYS_SWAPON", Const, 0, ""},
    +		{"SYS_SYMLINK", Const, 0, ""},
    +		{"SYS_SYMLINKAT", Const, 0, ""},
    +		{"SYS_SYNC", Const, 0, ""},
    +		{"SYS_SYNCFS", Const, 0, ""},
    +		{"SYS_SYNC_FILE_RANGE", Const, 0, ""},
    +		{"SYS_SYSARCH", Const, 0, ""},
    +		{"SYS_SYSCALL", Const, 0, ""},
    +		{"SYS_SYSCALL_BASE", Const, 0, ""},
    +		{"SYS_SYSFS", Const, 0, ""},
    +		{"SYS_SYSINFO", Const, 0, ""},
    +		{"SYS_SYSLOG", Const, 0, ""},
    +		{"SYS_TEE", Const, 0, ""},
    +		{"SYS_TGKILL", Const, 0, ""},
    +		{"SYS_THREAD_SELFID", Const, 0, ""},
    +		{"SYS_THR_CREATE", Const, 0, ""},
    +		{"SYS_THR_EXIT", Const, 0, ""},
    +		{"SYS_THR_KILL", Const, 0, ""},
    +		{"SYS_THR_KILL2", Const, 0, ""},
    +		{"SYS_THR_NEW", Const, 0, ""},
    +		{"SYS_THR_SELF", Const, 0, ""},
    +		{"SYS_THR_SET_NAME", Const, 0, ""},
    +		{"SYS_THR_SUSPEND", Const, 0, ""},
    +		{"SYS_THR_WAKE", Const, 0, ""},
    +		{"SYS_TIME", Const, 0, ""},
    +		{"SYS_TIMERFD_CREATE", Const, 0, ""},
    +		{"SYS_TIMERFD_GETTIME", Const, 0, ""},
    +		{"SYS_TIMERFD_SETTIME", Const, 0, ""},
    +		{"SYS_TIMER_CREATE", Const, 0, ""},
    +		{"SYS_TIMER_DELETE", Const, 0, ""},
    +		{"SYS_TIMER_GETOVERRUN", Const, 0, ""},
    +		{"SYS_TIMER_GETTIME", Const, 0, ""},
    +		{"SYS_TIMER_SETTIME", Const, 0, ""},
    +		{"SYS_TIMES", Const, 0, ""},
    +		{"SYS_TKILL", Const, 0, ""},
    +		{"SYS_TRUNCATE", Const, 0, ""},
    +		{"SYS_TRUNCATE64", Const, 0, ""},
    +		{"SYS_TUXCALL", Const, 0, ""},
    +		{"SYS_UGETRLIMIT", Const, 0, ""},
    +		{"SYS_ULIMIT", Const, 0, ""},
    +		{"SYS_UMASK", Const, 0, ""},
    +		{"SYS_UMASK_EXTENDED", Const, 0, ""},
    +		{"SYS_UMOUNT", Const, 0, ""},
    +		{"SYS_UMOUNT2", Const, 0, ""},
    +		{"SYS_UNAME", Const, 0, ""},
    +		{"SYS_UNDELETE", Const, 0, ""},
    +		{"SYS_UNLINK", Const, 0, ""},
    +		{"SYS_UNLINKAT", Const, 0, ""},
    +		{"SYS_UNMOUNT", Const, 0, ""},
    +		{"SYS_UNSHARE", Const, 0, ""},
    +		{"SYS_USELIB", Const, 0, ""},
    +		{"SYS_USTAT", Const, 0, ""},
    +		{"SYS_UTIME", Const, 0, ""},
    +		{"SYS_UTIMENSAT", Const, 0, ""},
    +		{"SYS_UTIMES", Const, 0, ""},
    +		{"SYS_UTRACE", Const, 0, ""},
    +		{"SYS_UUIDGEN", Const, 0, ""},
    +		{"SYS_VADVISE", Const, 1, ""},
    +		{"SYS_VFORK", Const, 0, ""},
    +		{"SYS_VHANGUP", Const, 0, ""},
    +		{"SYS_VM86", Const, 0, ""},
    +		{"SYS_VM86OLD", Const, 0, ""},
    +		{"SYS_VMSPLICE", Const, 0, ""},
    +		{"SYS_VM_PRESSURE_MONITOR", Const, 0, ""},
    +		{"SYS_VSERVER", Const, 0, ""},
    +		{"SYS_WAIT4", Const, 0, ""},
    +		{"SYS_WAIT4_NOCANCEL", Const, 0, ""},
    +		{"SYS_WAIT6", Const, 1, ""},
    +		{"SYS_WAITEVENT", Const, 0, ""},
    +		{"SYS_WAITID", Const, 0, ""},
    +		{"SYS_WAITID_NOCANCEL", Const, 0, ""},
    +		{"SYS_WAITPID", Const, 0, ""},
    +		{"SYS_WATCHEVENT", Const, 0, ""},
    +		{"SYS_WORKQ_KERNRETURN", Const, 0, ""},
    +		{"SYS_WORKQ_OPEN", Const, 0, ""},
    +		{"SYS_WRITE", Const, 0, ""},
    +		{"SYS_WRITEV", Const, 0, ""},
    +		{"SYS_WRITEV_NOCANCEL", Const, 0, ""},
    +		{"SYS_WRITE_NOCANCEL", Const, 0, ""},
    +		{"SYS_YIELD", Const, 0, ""},
    +		{"SYS__LLSEEK", Const, 0, ""},
    +		{"SYS__LWP_CONTINUE", Const, 1, ""},
    +		{"SYS__LWP_CREATE", Const, 1, ""},
    +		{"SYS__LWP_CTL", Const, 1, ""},
    +		{"SYS__LWP_DETACH", Const, 1, ""},
    +		{"SYS__LWP_EXIT", Const, 1, ""},
    +		{"SYS__LWP_GETNAME", Const, 1, ""},
    +		{"SYS__LWP_GETPRIVATE", Const, 1, ""},
    +		{"SYS__LWP_KILL", Const, 1, ""},
    +		{"SYS__LWP_PARK", Const, 1, ""},
    +		{"SYS__LWP_SELF", Const, 1, ""},
    +		{"SYS__LWP_SETNAME", Const, 1, ""},
    +		{"SYS__LWP_SETPRIVATE", Const, 1, ""},
    +		{"SYS__LWP_SUSPEND", Const, 1, ""},
    +		{"SYS__LWP_UNPARK", Const, 1, ""},
    +		{"SYS__LWP_UNPARK_ALL", Const, 1, ""},
    +		{"SYS__LWP_WAIT", Const, 1, ""},
    +		{"SYS__LWP_WAKEUP", Const, 1, ""},
    +		{"SYS__NEWSELECT", Const, 0, ""},
    +		{"SYS__PSET_BIND", Const, 1, ""},
    +		{"SYS__SCHED_GETAFFINITY", Const, 1, ""},
    +		{"SYS__SCHED_GETPARAM", Const, 1, ""},
    +		{"SYS__SCHED_SETAFFINITY", Const, 1, ""},
    +		{"SYS__SCHED_SETPARAM", Const, 1, ""},
    +		{"SYS__SYSCTL", Const, 0, ""},
    +		{"SYS__UMTX_LOCK", Const, 0, ""},
    +		{"SYS__UMTX_OP", Const, 0, ""},
    +		{"SYS__UMTX_UNLOCK", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_FD", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_FILE", Const, 0, ""},
    +		{"SYS___ACL_ACLCHECK_LINK", Const, 0, ""},
    +		{"SYS___ACL_DELETE_FD", Const, 0, ""},
    +		{"SYS___ACL_DELETE_FILE", Const, 0, ""},
    +		{"SYS___ACL_DELETE_LINK", Const, 0, ""},
    +		{"SYS___ACL_GET_FD", Const, 0, ""},
    +		{"SYS___ACL_GET_FILE", Const, 0, ""},
    +		{"SYS___ACL_GET_LINK", Const, 0, ""},
    +		{"SYS___ACL_SET_FD", Const, 0, ""},
    +		{"SYS___ACL_SET_FILE", Const, 0, ""},
    +		{"SYS___ACL_SET_LINK", Const, 0, ""},
    +		{"SYS___CAP_RIGHTS_GET", Const, 14, ""},
    +		{"SYS___CLONE", Const, 1, ""},
    +		{"SYS___DISABLE_THREADSIGNAL", Const, 0, ""},
    +		{"SYS___GETCWD", Const, 0, ""},
    +		{"SYS___GETLOGIN", Const, 1, ""},
    +		{"SYS___GET_TCB", Const, 1, ""},
    +		{"SYS___MAC_EXECVE", Const, 0, ""},
    +		{"SYS___MAC_GETFSSTAT", Const, 0, ""},
    +		{"SYS___MAC_GET_FD", Const, 0, ""},
    +		{"SYS___MAC_GET_FILE", Const, 0, ""},
    +		{"SYS___MAC_GET_LCID", Const, 0, ""},
    +		{"SYS___MAC_GET_LCTX", Const, 0, ""},
    +		{"SYS___MAC_GET_LINK", Const, 0, ""},
    +		{"SYS___MAC_GET_MOUNT", Const, 0, ""},
    +		{"SYS___MAC_GET_PID", Const, 0, ""},
    +		{"SYS___MAC_GET_PROC", Const, 0, ""},
    +		{"SYS___MAC_MOUNT", Const, 0, ""},
    +		{"SYS___MAC_SET_FD", Const, 0, ""},
    +		{"SYS___MAC_SET_FILE", Const, 0, ""},
    +		{"SYS___MAC_SET_LCTX", Const, 0, ""},
    +		{"SYS___MAC_SET_LINK", Const, 0, ""},
    +		{"SYS___MAC_SET_PROC", Const, 0, ""},
    +		{"SYS___MAC_SYSCALL", Const, 0, ""},
    +		{"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""},
    +		{"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
    +		{"SYS___POSIX_CHOWN", Const, 1, ""},
    +		{"SYS___POSIX_FCHOWN", Const, 1, ""},
    +		{"SYS___POSIX_LCHOWN", Const, 1, ""},
    +		{"SYS___POSIX_RENAME", Const, 1, ""},
    +		{"SYS___PTHREAD_CANCELED", Const, 0, ""},
    +		{"SYS___PTHREAD_CHDIR", Const, 0, ""},
    +		{"SYS___PTHREAD_FCHDIR", Const, 0, ""},
    +		{"SYS___PTHREAD_KILL", Const, 0, ""},
    +		{"SYS___PTHREAD_MARKCANCEL", Const, 0, ""},
    +		{"SYS___PTHREAD_SIGMASK", Const, 0, ""},
    +		{"SYS___QUOTACTL", Const, 1, ""},
    +		{"SYS___SEMCTL", Const, 1, ""},
    +		{"SYS___SEMWAIT_SIGNAL", Const, 0, ""},
    +		{"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
    +		{"SYS___SETLOGIN", Const, 1, ""},
    +		{"SYS___SETUGID", Const, 0, ""},
    +		{"SYS___SET_TCB", Const, 1, ""},
    +		{"SYS___SIGACTION_SIGTRAMP", Const, 1, ""},
    +		{"SYS___SIGTIMEDWAIT", Const, 1, ""},
    +		{"SYS___SIGWAIT", Const, 0, ""},
    +		{"SYS___SIGWAIT_NOCANCEL", Const, 0, ""},
    +		{"SYS___SYSCTL", Const, 0, ""},
    +		{"SYS___TFORK", Const, 1, ""},
    +		{"SYS___THREXIT", Const, 1, ""},
    +		{"SYS___THRSIGDIVERT", Const, 1, ""},
    +		{"SYS___THRSLEEP", Const, 1, ""},
    +		{"SYS___THRWAKEUP", Const, 1, ""},
    +		{"S_ARCH1", Const, 1, ""},
    +		{"S_ARCH2", Const, 1, ""},
    +		{"S_BLKSIZE", Const, 0, ""},
    +		{"S_IEXEC", Const, 0, ""},
    +		{"S_IFBLK", Const, 0, ""},
    +		{"S_IFCHR", Const, 0, ""},
    +		{"S_IFDIR", Const, 0, ""},
    +		{"S_IFIFO", Const, 0, ""},
    +		{"S_IFLNK", Const, 0, ""},
    +		{"S_IFMT", Const, 0, ""},
    +		{"S_IFREG", Const, 0, ""},
    +		{"S_IFSOCK", Const, 0, ""},
    +		{"S_IFWHT", Const, 0, ""},
    +		{"S_IREAD", Const, 0, ""},
    +		{"S_IRGRP", Const, 0, ""},
    +		{"S_IROTH", Const, 0, ""},
    +		{"S_IRUSR", Const, 0, ""},
    +		{"S_IRWXG", Const, 0, ""},
    +		{"S_IRWXO", Const, 0, ""},
    +		{"S_IRWXU", Const, 0, ""},
    +		{"S_ISGID", Const, 0, ""},
    +		{"S_ISTXT", Const, 0, ""},
    +		{"S_ISUID", Const, 0, ""},
    +		{"S_ISVTX", Const, 0, ""},
    +		{"S_IWGRP", Const, 0, ""},
    +		{"S_IWOTH", Const, 0, ""},
    +		{"S_IWRITE", Const, 0, ""},
    +		{"S_IWUSR", Const, 0, ""},
    +		{"S_IXGRP", Const, 0, ""},
    +		{"S_IXOTH", Const, 0, ""},
    +		{"S_IXUSR", Const, 0, ""},
    +		{"S_LOGIN_SET", Const, 1, ""},
    +		{"SecurityAttributes", Type, 0, ""},
    +		{"SecurityAttributes.InheritHandle", Field, 0, ""},
    +		{"SecurityAttributes.Length", Field, 0, ""},
    +		{"SecurityAttributes.SecurityDescriptor", Field, 0, ""},
    +		{"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"},
    +		{"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"},
    +		{"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"},
    +		{"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"},
    +		{"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"},
    +		{"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"},
    +		{"Servent", Type, 0, ""},
    +		{"Servent.Aliases", Field, 0, ""},
    +		{"Servent.Name", Field, 0, ""},
    +		{"Servent.Port", Field, 0, ""},
    +		{"Servent.Proto", Field, 0, ""},
    +		{"SetBpf", Func, 0, ""},
    +		{"SetBpfBuflen", Func, 0, ""},
    +		{"SetBpfDatalink", Func, 0, ""},
    +		{"SetBpfHeadercmpl", Func, 0, ""},
    +		{"SetBpfImmediate", Func, 0, ""},
    +		{"SetBpfInterface", Func, 0, ""},
    +		{"SetBpfPromisc", Func, 0, ""},
    +		{"SetBpfTimeout", Func, 0, ""},
    +		{"SetCurrentDirectory", Func, 0, ""},
    +		{"SetEndOfFile", Func, 0, ""},
    +		{"SetEnvironmentVariable", Func, 0, ""},
    +		{"SetFileAttributes", Func, 0, ""},
    +		{"SetFileCompletionNotificationModes", Func, 2, ""},
    +		{"SetFilePointer", Func, 0, ""},
    +		{"SetFileTime", Func, 0, ""},
    +		{"SetHandleInformation", Func, 0, ""},
    +		{"SetKevent", Func, 0, ""},
    +		{"SetLsfPromisc", Func, 0, "func(name string, m bool) error"},
    +		{"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"},
    +		{"Setdomainname", Func, 0, "func(p []byte) (err error)"},
    +		{"Setegid", Func, 0, "func(egid int) (err error)"},
    +		{"Setenv", Func, 0, "func(key string, value string) error"},
    +		{"Seteuid", Func, 0, "func(euid int) (err error)"},
    +		{"Setfsgid", Func, 0, "func(gid int) (err error)"},
    +		{"Setfsuid", Func, 0, "func(uid int) (err error)"},
    +		{"Setgid", Func, 0, "func(gid int) (err error)"},
    +		{"Setgroups", Func, 0, "func(gids []int) (err error)"},
    +		{"Sethostname", Func, 0, "func(p []byte) (err error)"},
    +		{"Setlogin", Func, 0, ""},
    +		{"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"},
    +		{"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"},
    +		{"Setprivexec", Func, 0, ""},
    +		{"Setregid", Func, 0, "func(rgid int, egid int) (err error)"},
    +		{"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"},
    +		{"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"},
    +		{"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"},
    +		{"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"},
    +		{"Setsid", Func, 0, "func() (pid int, err error)"},
    +		{"Setsockopt", Func, 0, ""},
    +		{"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"},
    +		{"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"},
    +		{"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"},
    +		{"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"},
    +		{"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"},
    +		{"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"},
    +		{"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"},
    +		{"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"},
    +		{"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"},
    +		{"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"},
    +		{"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"},
    +		{"Setuid", Func, 0, "func(uid int) (err error)"},
    +		{"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"},
    +		{"Shutdown", Func, 0, "func(fd int, how int) (err error)"},
    +		{"SidTypeAlias", Const, 0, ""},
    +		{"SidTypeComputer", Const, 0, ""},
    +		{"SidTypeDeletedAccount", Const, 0, ""},
    +		{"SidTypeDomain", Const, 0, ""},
    +		{"SidTypeGroup", Const, 0, ""},
    +		{"SidTypeInvalid", Const, 0, ""},
    +		{"SidTypeLabel", Const, 0, ""},
    +		{"SidTypeUnknown", Const, 0, ""},
    +		{"SidTypeUser", Const, 0, ""},
    +		{"SidTypeWellKnownGroup", Const, 0, ""},
    +		{"Signal", Type, 0, ""},
    +		{"SizeofBpfHdr", Const, 0, ""},
    +		{"SizeofBpfInsn", Const, 0, ""},
    +		{"SizeofBpfProgram", Const, 0, ""},
    +		{"SizeofBpfStat", Const, 0, ""},
    +		{"SizeofBpfVersion", Const, 0, ""},
    +		{"SizeofBpfZbuf", Const, 0, ""},
    +		{"SizeofBpfZbufHeader", Const, 0, ""},
    +		{"SizeofCmsghdr", Const, 0, ""},
    +		{"SizeofICMPv6Filter", Const, 2, ""},
    +		{"SizeofIPMreq", Const, 0, ""},
    +		{"SizeofIPMreqn", Const, 0, ""},
    +		{"SizeofIPv6MTUInfo", Const, 2, ""},
    +		{"SizeofIPv6Mreq", Const, 0, ""},
    +		{"SizeofIfAddrmsg", Const, 0, ""},
    +		{"SizeofIfAnnounceMsghdr", Const, 1, ""},
    +		{"SizeofIfData", Const, 0, ""},
    +		{"SizeofIfInfomsg", Const, 0, ""},
    +		{"SizeofIfMsghdr", Const, 0, ""},
    +		{"SizeofIfaMsghdr", Const, 0, ""},
    +		{"SizeofIfmaMsghdr", Const, 0, ""},
    +		{"SizeofIfmaMsghdr2", Const, 0, ""},
    +		{"SizeofInet4Pktinfo", Const, 0, ""},
    +		{"SizeofInet6Pktinfo", Const, 0, ""},
    +		{"SizeofInotifyEvent", Const, 0, ""},
    +		{"SizeofLinger", Const, 0, ""},
    +		{"SizeofMsghdr", Const, 0, ""},
    +		{"SizeofNlAttr", Const, 0, ""},
    +		{"SizeofNlMsgerr", Const, 0, ""},
    +		{"SizeofNlMsghdr", Const, 0, ""},
    +		{"SizeofRtAttr", Const, 0, ""},
    +		{"SizeofRtGenmsg", Const, 0, ""},
    +		{"SizeofRtMetrics", Const, 0, ""},
    +		{"SizeofRtMsg", Const, 0, ""},
    +		{"SizeofRtMsghdr", Const, 0, ""},
    +		{"SizeofRtNexthop", Const, 0, ""},
    +		{"SizeofSockFilter", Const, 0, ""},
    +		{"SizeofSockFprog", Const, 0, ""},
    +		{"SizeofSockaddrAny", Const, 0, ""},
    +		{"SizeofSockaddrDatalink", Const, 0, ""},
    +		{"SizeofSockaddrInet4", Const, 0, ""},
    +		{"SizeofSockaddrInet6", Const, 0, ""},
    +		{"SizeofSockaddrLinklayer", Const, 0, ""},
    +		{"SizeofSockaddrNetlink", Const, 0, ""},
    +		{"SizeofSockaddrUnix", Const, 0, ""},
    +		{"SizeofTCPInfo", Const, 1, ""},
    +		{"SizeofUcred", Const, 0, ""},
    +		{"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"},
    +		{"SockFilter", Type, 0, ""},
    +		{"SockFilter.Code", Field, 0, ""},
    +		{"SockFilter.Jf", Field, 0, ""},
    +		{"SockFilter.Jt", Field, 0, ""},
    +		{"SockFilter.K", Field, 0, ""},
    +		{"SockFprog", Type, 0, ""},
    +		{"SockFprog.Filter", Field, 0, ""},
    +		{"SockFprog.Len", Field, 0, ""},
    +		{"SockFprog.Pad_cgo_0", Field, 0, ""},
    +		{"Sockaddr", Type, 0, ""},
    +		{"SockaddrDatalink", Type, 0, ""},
    +		{"SockaddrDatalink.Alen", Field, 0, ""},
    +		{"SockaddrDatalink.Data", Field, 0, ""},
    +		{"SockaddrDatalink.Family", Field, 0, ""},
    +		{"SockaddrDatalink.Index", Field, 0, ""},
    +		{"SockaddrDatalink.Len", Field, 0, ""},
    +		{"SockaddrDatalink.Nlen", Field, 0, ""},
    +		{"SockaddrDatalink.Slen", Field, 0, ""},
    +		{"SockaddrDatalink.Type", Field, 0, ""},
    +		{"SockaddrGen", Type, 0, ""},
    +		{"SockaddrInet4", Type, 0, ""},
    +		{"SockaddrInet4.Addr", Field, 0, ""},
    +		{"SockaddrInet4.Port", Field, 0, ""},
    +		{"SockaddrInet6", Type, 0, ""},
    +		{"SockaddrInet6.Addr", Field, 0, ""},
    +		{"SockaddrInet6.Port", Field, 0, ""},
    +		{"SockaddrInet6.ZoneId", Field, 0, ""},
    +		{"SockaddrLinklayer", Type, 0, ""},
    +		{"SockaddrLinklayer.Addr", Field, 0, ""},
    +		{"SockaddrLinklayer.Halen", Field, 0, ""},
    +		{"SockaddrLinklayer.Hatype", Field, 0, ""},
    +		{"SockaddrLinklayer.Ifindex", Field, 0, ""},
    +		{"SockaddrLinklayer.Pkttype", Field, 0, ""},
    +		{"SockaddrLinklayer.Protocol", Field, 0, ""},
    +		{"SockaddrNetlink", Type, 0, ""},
    +		{"SockaddrNetlink.Family", Field, 0, ""},
    +		{"SockaddrNetlink.Groups", Field, 0, ""},
    +		{"SockaddrNetlink.Pad", Field, 0, ""},
    +		{"SockaddrNetlink.Pid", Field, 0, ""},
    +		{"SockaddrUnix", Type, 0, ""},
    +		{"SockaddrUnix.Name", Field, 0, ""},
    +		{"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"},
    +		{"SocketControlMessage", Type, 0, ""},
    +		{"SocketControlMessage.Data", Field, 0, ""},
    +		{"SocketControlMessage.Header", Field, 0, ""},
    +		{"SocketDisableIPv6", Var, 0, ""},
    +		{"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"},
    +		{"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"},
    +		{"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"},
    +		{"StartupInfo", Type, 0, ""},
    +		{"StartupInfo.Cb", Field, 0, ""},
    +		{"StartupInfo.Desktop", Field, 0, ""},
    +		{"StartupInfo.FillAttribute", Field, 0, ""},
    +		{"StartupInfo.Flags", Field, 0, ""},
    +		{"StartupInfo.ShowWindow", Field, 0, ""},
    +		{"StartupInfo.StdErr", Field, 0, ""},
    +		{"StartupInfo.StdInput", Field, 0, ""},
    +		{"StartupInfo.StdOutput", Field, 0, ""},
    +		{"StartupInfo.Title", Field, 0, ""},
    +		{"StartupInfo.X", Field, 0, ""},
    +		{"StartupInfo.XCountChars", Field, 0, ""},
    +		{"StartupInfo.XSize", Field, 0, ""},
    +		{"StartupInfo.Y", Field, 0, ""},
    +		{"StartupInfo.YCountChars", Field, 0, ""},
    +		{"StartupInfo.YSize", Field, 0, ""},
    +		{"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
    +		{"Stat_t", Type, 0, ""},
    +		{"Stat_t.Atim", Field, 0, ""},
    +		{"Stat_t.Atim_ext", Field, 12, ""},
    +		{"Stat_t.Atimespec", Field, 0, ""},
    +		{"Stat_t.Birthtimespec", Field, 0, ""},
    +		{"Stat_t.Blksize", Field, 0, ""},
    +		{"Stat_t.Blocks", Field, 0, ""},
    +		{"Stat_t.Btim_ext", Field, 12, ""},
    +		{"Stat_t.Ctim", Field, 0, ""},
    +		{"Stat_t.Ctim_ext", Field, 12, ""},
    +		{"Stat_t.Ctimespec", Field, 0, ""},
    +		{"Stat_t.Dev", Field, 0, ""},
    +		{"Stat_t.Flags", Field, 0, ""},
    +		{"Stat_t.Gen", Field, 0, ""},
    +		{"Stat_t.Gid", Field, 0, ""},
    +		{"Stat_t.Ino", Field, 0, ""},
    +		{"Stat_t.Lspare", Field, 0, ""},
    +		{"Stat_t.Lspare0", Field, 2, ""},
    +		{"Stat_t.Lspare1", Field, 2, ""},
    +		{"Stat_t.Mode", Field, 0, ""},
    +		{"Stat_t.Mtim", Field, 0, ""},
    +		{"Stat_t.Mtim_ext", Field, 12, ""},
    +		{"Stat_t.Mtimespec", Field, 0, ""},
    +		{"Stat_t.Nlink", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_0", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_1", Field, 0, ""},
    +		{"Stat_t.Pad_cgo_2", Field, 0, ""},
    +		{"Stat_t.Padding0", Field, 12, ""},
    +		{"Stat_t.Padding1", Field, 12, ""},
    +		{"Stat_t.Qspare", Field, 0, ""},
    +		{"Stat_t.Rdev", Field, 0, ""},
    +		{"Stat_t.Size", Field, 0, ""},
    +		{"Stat_t.Spare", Field, 2, ""},
    +		{"Stat_t.Uid", Field, 0, ""},
    +		{"Stat_t.X__pad0", Field, 0, ""},
    +		{"Stat_t.X__pad1", Field, 0, ""},
    +		{"Stat_t.X__pad2", Field, 0, ""},
    +		{"Stat_t.X__st_birthtim", Field, 2, ""},
    +		{"Stat_t.X__st_ino", Field, 0, ""},
    +		{"Stat_t.X__unused", Field, 0, ""},
    +		{"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"},
    +		{"Statfs_t", Type, 0, ""},
    +		{"Statfs_t.Asyncreads", Field, 0, ""},
    +		{"Statfs_t.Asyncwrites", Field, 0, ""},
    +		{"Statfs_t.Bavail", Field, 0, ""},
    +		{"Statfs_t.Bfree", Field, 0, ""},
    +		{"Statfs_t.Blocks", Field, 0, ""},
    +		{"Statfs_t.Bsize", Field, 0, ""},
    +		{"Statfs_t.Charspare", Field, 0, ""},
    +		{"Statfs_t.F_asyncreads", Field, 2, ""},
    +		{"Statfs_t.F_asyncwrites", Field, 2, ""},
    +		{"Statfs_t.F_bavail", Field, 2, ""},
    +		{"Statfs_t.F_bfree", Field, 2, ""},
    +		{"Statfs_t.F_blocks", Field, 2, ""},
    +		{"Statfs_t.F_bsize", Field, 2, ""},
    +		{"Statfs_t.F_ctime", Field, 2, ""},
    +		{"Statfs_t.F_favail", Field, 2, ""},
    +		{"Statfs_t.F_ffree", Field, 2, ""},
    +		{"Statfs_t.F_files", Field, 2, ""},
    +		{"Statfs_t.F_flags", Field, 2, ""},
    +		{"Statfs_t.F_fsid", Field, 2, ""},
    +		{"Statfs_t.F_fstypename", Field, 2, ""},
    +		{"Statfs_t.F_iosize", Field, 2, ""},
    +		{"Statfs_t.F_mntfromname", Field, 2, ""},
    +		{"Statfs_t.F_mntfromspec", Field, 3, ""},
    +		{"Statfs_t.F_mntonname", Field, 2, ""},
    +		{"Statfs_t.F_namemax", Field, 2, ""},
    +		{"Statfs_t.F_owner", Field, 2, ""},
    +		{"Statfs_t.F_spare", Field, 2, ""},
    +		{"Statfs_t.F_syncreads", Field, 2, ""},
    +		{"Statfs_t.F_syncwrites", Field, 2, ""},
    +		{"Statfs_t.Ffree", Field, 0, ""},
    +		{"Statfs_t.Files", Field, 0, ""},
    +		{"Statfs_t.Flags", Field, 0, ""},
    +		{"Statfs_t.Frsize", Field, 0, ""},
    +		{"Statfs_t.Fsid", Field, 0, ""},
    +		{"Statfs_t.Fssubtype", Field, 0, ""},
    +		{"Statfs_t.Fstypename", Field, 0, ""},
    +		{"Statfs_t.Iosize", Field, 0, ""},
    +		{"Statfs_t.Mntfromname", Field, 0, ""},
    +		{"Statfs_t.Mntonname", Field, 0, ""},
    +		{"Statfs_t.Mount_info", Field, 2, ""},
    +		{"Statfs_t.Namelen", Field, 0, ""},
    +		{"Statfs_t.Namemax", Field, 0, ""},
    +		{"Statfs_t.Owner", Field, 0, ""},
    +		{"Statfs_t.Pad_cgo_0", Field, 0, ""},
    +		{"Statfs_t.Pad_cgo_1", Field, 2, ""},
    +		{"Statfs_t.Reserved", Field, 0, ""},
    +		{"Statfs_t.Spare", Field, 0, ""},
    +		{"Statfs_t.Syncreads", Field, 0, ""},
    +		{"Statfs_t.Syncwrites", Field, 0, ""},
    +		{"Statfs_t.Type", Field, 0, ""},
    +		{"Statfs_t.Version", Field, 0, ""},
    +		{"Stderr", Var, 0, ""},
    +		{"Stdin", Var, 0, ""},
    +		{"Stdout", Var, 0, ""},
    +		{"StringBytePtr", Func, 0, "func(s string) *byte"},
    +		{"StringByteSlice", Func, 0, "func(s string) []byte"},
    +		{"StringSlicePtr", Func, 0, "func(ss []string) []*byte"},
    +		{"StringToSid", Func, 0, ""},
    +		{"StringToUTF16", Func, 0, ""},
    +		{"StringToUTF16Ptr", Func, 0, ""},
    +		{"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"},
    +		{"Sync", Func, 0, "func()"},
    +		{"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"},
    +		{"SysProcAttr", Type, 0, ""},
    +		{"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""},
    +		{"SysProcAttr.AmbientCaps", Field, 9, ""},
    +		{"SysProcAttr.CgroupFD", Field, 20, ""},
    +		{"SysProcAttr.Chroot", Field, 0, ""},
    +		{"SysProcAttr.Cloneflags", Field, 2, ""},
    +		{"SysProcAttr.CmdLine", Field, 0, ""},
    +		{"SysProcAttr.CreationFlags", Field, 1, ""},
    +		{"SysProcAttr.Credential", Field, 0, ""},
    +		{"SysProcAttr.Ctty", Field, 1, ""},
    +		{"SysProcAttr.Foreground", Field, 5, ""},
    +		{"SysProcAttr.GidMappings", Field, 4, ""},
    +		{"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""},
    +		{"SysProcAttr.HideWindow", Field, 0, ""},
    +		{"SysProcAttr.Jail", Field, 21, ""},
    +		{"SysProcAttr.NoInheritHandles", Field, 16, ""},
    +		{"SysProcAttr.Noctty", Field, 0, ""},
    +		{"SysProcAttr.ParentProcess", Field, 17, ""},
    +		{"SysProcAttr.Pdeathsig", Field, 0, ""},
    +		{"SysProcAttr.Pgid", Field, 5, ""},
    +		{"SysProcAttr.PidFD", Field, 22, ""},
    +		{"SysProcAttr.ProcessAttributes", Field, 13, ""},
    +		{"SysProcAttr.Ptrace", Field, 0, ""},
    +		{"SysProcAttr.Setctty", Field, 0, ""},
    +		{"SysProcAttr.Setpgid", Field, 0, ""},
    +		{"SysProcAttr.Setsid", Field, 0, ""},
    +		{"SysProcAttr.ThreadAttributes", Field, 13, ""},
    +		{"SysProcAttr.Token", Field, 10, ""},
    +		{"SysProcAttr.UidMappings", Field, 4, ""},
    +		{"SysProcAttr.Unshareflags", Field, 7, ""},
    +		{"SysProcAttr.UseCgroupFD", Field, 20, ""},
    +		{"SysProcIDMap", Type, 4, ""},
    +		{"SysProcIDMap.ContainerID", Field, 4, ""},
    +		{"SysProcIDMap.HostID", Field, 4, ""},
    +		{"SysProcIDMap.Size", Field, 4, ""},
    +		{"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Syscall12", Func, 0, ""},
    +		{"Syscall15", Func, 0, ""},
    +		{"Syscall18", Func, 12, ""},
    +		{"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
    +		{"Syscall9", Func, 0, ""},
    +		{"SyscallN", Func, 18, ""},
    +		{"Sysctl", Func, 0, ""},
    +		{"SysctlUint32", Func, 0, ""},
    +		{"Sysctlnode", Type, 2, ""},
    +		{"Sysctlnode.Flags", Field, 2, ""},
    +		{"Sysctlnode.Name", Field, 2, ""},
    +		{"Sysctlnode.Num", Field, 2, ""},
    +		{"Sysctlnode.Un", Field, 2, ""},
    +		{"Sysctlnode.Ver", Field, 2, ""},
    +		{"Sysctlnode.X__rsvd", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_desc", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_func", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_parent", Field, 2, ""},
    +		{"Sysctlnode.X_sysctl_size", Field, 2, ""},
    +		{"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"},
    +		{"Sysinfo_t", Type, 0, ""},
    +		{"Sysinfo_t.Bufferram", Field, 0, ""},
    +		{"Sysinfo_t.Freehigh", Field, 0, ""},
    +		{"Sysinfo_t.Freeram", Field, 0, ""},
    +		{"Sysinfo_t.Freeswap", Field, 0, ""},
    +		{"Sysinfo_t.Loads", Field, 0, ""},
    +		{"Sysinfo_t.Pad", Field, 0, ""},
    +		{"Sysinfo_t.Pad_cgo_0", Field, 0, ""},
    +		{"Sysinfo_t.Pad_cgo_1", Field, 0, ""},
    +		{"Sysinfo_t.Procs", Field, 0, ""},
    +		{"Sysinfo_t.Sharedram", Field, 0, ""},
    +		{"Sysinfo_t.Totalhigh", Field, 0, ""},
    +		{"Sysinfo_t.Totalram", Field, 0, ""},
    +		{"Sysinfo_t.Totalswap", Field, 0, ""},
    +		{"Sysinfo_t.Unit", Field, 0, ""},
    +		{"Sysinfo_t.Uptime", Field, 0, ""},
    +		{"Sysinfo_t.X_f", Field, 0, ""},
    +		{"Systemtime", Type, 0, ""},
    +		{"Systemtime.Day", Field, 0, ""},
    +		{"Systemtime.DayOfWeek", Field, 0, ""},
    +		{"Systemtime.Hour", Field, 0, ""},
    +		{"Systemtime.Milliseconds", Field, 0, ""},
    +		{"Systemtime.Minute", Field, 0, ""},
    +		{"Systemtime.Month", Field, 0, ""},
    +		{"Systemtime.Second", Field, 0, ""},
    +		{"Systemtime.Year", Field, 0, ""},
    +		{"TCGETS", Const, 0, ""},
    +		{"TCIFLUSH", Const, 1, ""},
    +		{"TCIOFLUSH", Const, 1, ""},
    +		{"TCOFLUSH", Const, 1, ""},
    +		{"TCPInfo", Type, 1, ""},
    +		{"TCPInfo.Advmss", Field, 1, ""},
    +		{"TCPInfo.Ato", Field, 1, ""},
    +		{"TCPInfo.Backoff", Field, 1, ""},
    +		{"TCPInfo.Ca_state", Field, 1, ""},
    +		{"TCPInfo.Fackets", Field, 1, ""},
    +		{"TCPInfo.Last_ack_recv", Field, 1, ""},
    +		{"TCPInfo.Last_ack_sent", Field, 1, ""},
    +		{"TCPInfo.Last_data_recv", Field, 1, ""},
    +		{"TCPInfo.Last_data_sent", Field, 1, ""},
    +		{"TCPInfo.Lost", Field, 1, ""},
    +		{"TCPInfo.Options", Field, 1, ""},
    +		{"TCPInfo.Pad_cgo_0", Field, 1, ""},
    +		{"TCPInfo.Pmtu", Field, 1, ""},
    +		{"TCPInfo.Probes", Field, 1, ""},
    +		{"TCPInfo.Rcv_mss", Field, 1, ""},
    +		{"TCPInfo.Rcv_rtt", Field, 1, ""},
    +		{"TCPInfo.Rcv_space", Field, 1, ""},
    +		{"TCPInfo.Rcv_ssthresh", Field, 1, ""},
    +		{"TCPInfo.Reordering", Field, 1, ""},
    +		{"TCPInfo.Retrans", Field, 1, ""},
    +		{"TCPInfo.Retransmits", Field, 1, ""},
    +		{"TCPInfo.Rto", Field, 1, ""},
    +		{"TCPInfo.Rtt", Field, 1, ""},
    +		{"TCPInfo.Rttvar", Field, 1, ""},
    +		{"TCPInfo.Sacked", Field, 1, ""},
    +		{"TCPInfo.Snd_cwnd", Field, 1, ""},
    +		{"TCPInfo.Snd_mss", Field, 1, ""},
    +		{"TCPInfo.Snd_ssthresh", Field, 1, ""},
    +		{"TCPInfo.State", Field, 1, ""},
    +		{"TCPInfo.Total_retrans", Field, 1, ""},
    +		{"TCPInfo.Unacked", Field, 1, ""},
    +		{"TCPKeepalive", Type, 3, ""},
    +		{"TCPKeepalive.Interval", Field, 3, ""},
    +		{"TCPKeepalive.OnOff", Field, 3, ""},
    +		{"TCPKeepalive.Time", Field, 3, ""},
    +		{"TCP_CA_NAME_MAX", Const, 0, ""},
    +		{"TCP_CONGCTL", Const, 1, ""},
    +		{"TCP_CONGESTION", Const, 0, ""},
    +		{"TCP_CONNECTIONTIMEOUT", Const, 0, ""},
    +		{"TCP_CORK", Const, 0, ""},
    +		{"TCP_DEFER_ACCEPT", Const, 0, ""},
    +		{"TCP_ENABLE_ECN", Const, 16, ""},
    +		{"TCP_INFO", Const, 0, ""},
    +		{"TCP_KEEPALIVE", Const, 0, ""},
    +		{"TCP_KEEPCNT", Const, 0, ""},
    +		{"TCP_KEEPIDLE", Const, 0, ""},
    +		{"TCP_KEEPINIT", Const, 1, ""},
    +		{"TCP_KEEPINTVL", Const, 0, ""},
    +		{"TCP_LINGER2", Const, 0, ""},
    +		{"TCP_MAXBURST", Const, 0, ""},
    +		{"TCP_MAXHLEN", Const, 0, ""},
    +		{"TCP_MAXOLEN", Const, 0, ""},
    +		{"TCP_MAXSEG", Const, 0, ""},
    +		{"TCP_MAXWIN", Const, 0, ""},
    +		{"TCP_MAX_SACK", Const, 0, ""},
    +		{"TCP_MAX_WINSHIFT", Const, 0, ""},
    +		{"TCP_MD5SIG", Const, 0, ""},
    +		{"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""},
    +		{"TCP_MINMSS", Const, 0, ""},
    +		{"TCP_MINMSSOVERLOAD", Const, 0, ""},
    +		{"TCP_MSS", Const, 0, ""},
    +		{"TCP_NODELAY", Const, 0, ""},
    +		{"TCP_NOOPT", Const, 0, ""},
    +		{"TCP_NOPUSH", Const, 0, ""},
    +		{"TCP_NOTSENT_LOWAT", Const, 16, ""},
    +		{"TCP_NSTATES", Const, 1, ""},
    +		{"TCP_QUICKACK", Const, 0, ""},
    +		{"TCP_RXT_CONNDROPTIME", Const, 0, ""},
    +		{"TCP_RXT_FINDROP", Const, 0, ""},
    +		{"TCP_SACK_ENABLE", Const, 1, ""},
    +		{"TCP_SENDMOREACKS", Const, 16, ""},
    +		{"TCP_SYNCNT", Const, 0, ""},
    +		{"TCP_VENDOR", Const, 3, ""},
    +		{"TCP_WINDOW_CLAMP", Const, 0, ""},
    +		{"TCSAFLUSH", Const, 1, ""},
    +		{"TCSETS", Const, 0, ""},
    +		{"TF_DISCONNECT", Const, 0, ""},
    +		{"TF_REUSE_SOCKET", Const, 0, ""},
    +		{"TF_USE_DEFAULT_WORKER", Const, 0, ""},
    +		{"TF_USE_KERNEL_APC", Const, 0, ""},
    +		{"TF_USE_SYSTEM_THREAD", Const, 0, ""},
    +		{"TF_WRITE_BEHIND", Const, 0, ""},
    +		{"TH32CS_INHERIT", Const, 4, ""},
    +		{"TH32CS_SNAPALL", Const, 4, ""},
    +		{"TH32CS_SNAPHEAPLIST", Const, 4, ""},
    +		{"TH32CS_SNAPMODULE", Const, 4, ""},
    +		{"TH32CS_SNAPMODULE32", Const, 4, ""},
    +		{"TH32CS_SNAPPROCESS", Const, 4, ""},
    +		{"TH32CS_SNAPTHREAD", Const, 4, ""},
    +		{"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""},
    +		{"TIME_ZONE_ID_STANDARD", Const, 0, ""},
    +		{"TIME_ZONE_ID_UNKNOWN", Const, 0, ""},
    +		{"TIOCCBRK", Const, 0, ""},
    +		{"TIOCCDTR", Const, 0, ""},
    +		{"TIOCCONS", Const, 0, ""},
    +		{"TIOCDCDTIMESTAMP", Const, 0, ""},
    +		{"TIOCDRAIN", Const, 0, ""},
    +		{"TIOCDSIMICROCODE", Const, 0, ""},
    +		{"TIOCEXCL", Const, 0, ""},
    +		{"TIOCEXT", Const, 0, ""},
    +		{"TIOCFLAG_CDTRCTS", Const, 1, ""},
    +		{"TIOCFLAG_CLOCAL", Const, 1, ""},
    +		{"TIOCFLAG_CRTSCTS", Const, 1, ""},
    +		{"TIOCFLAG_MDMBUF", Const, 1, ""},
    +		{"TIOCFLAG_PPS", Const, 1, ""},
    +		{"TIOCFLAG_SOFTCAR", Const, 1, ""},
    +		{"TIOCFLUSH", Const, 0, ""},
    +		{"TIOCGDEV", Const, 0, ""},
    +		{"TIOCGDRAINWAIT", Const, 0, ""},
    +		{"TIOCGETA", Const, 0, ""},
    +		{"TIOCGETD", Const, 0, ""},
    +		{"TIOCGFLAGS", Const, 1, ""},
    +		{"TIOCGICOUNT", Const, 0, ""},
    +		{"TIOCGLCKTRMIOS", Const, 0, ""},
    +		{"TIOCGLINED", Const, 1, ""},
    +		{"TIOCGPGRP", Const, 0, ""},
    +		{"TIOCGPTN", Const, 0, ""},
    +		{"TIOCGQSIZE", Const, 1, ""},
    +		{"TIOCGRANTPT", Const, 1, ""},
    +		{"TIOCGRS485", Const, 0, ""},
    +		{"TIOCGSERIAL", Const, 0, ""},
    +		{"TIOCGSID", Const, 0, ""},
    +		{"TIOCGSIZE", Const, 1, ""},
    +		{"TIOCGSOFTCAR", Const, 0, ""},
    +		{"TIOCGTSTAMP", Const, 1, ""},
    +		{"TIOCGWINSZ", Const, 0, ""},
    +		{"TIOCINQ", Const, 0, ""},
    +		{"TIOCIXOFF", Const, 0, ""},
    +		{"TIOCIXON", Const, 0, ""},
    +		{"TIOCLINUX", Const, 0, ""},
    +		{"TIOCMBIC", Const, 0, ""},
    +		{"TIOCMBIS", Const, 0, ""},
    +		{"TIOCMGDTRWAIT", Const, 0, ""},
    +		{"TIOCMGET", Const, 0, ""},
    +		{"TIOCMIWAIT", Const, 0, ""},
    +		{"TIOCMODG", Const, 0, ""},
    +		{"TIOCMODS", Const, 0, ""},
    +		{"TIOCMSDTRWAIT", Const, 0, ""},
    +		{"TIOCMSET", Const, 0, ""},
    +		{"TIOCM_CAR", Const, 0, ""},
    +		{"TIOCM_CD", Const, 0, ""},
    +		{"TIOCM_CTS", Const, 0, ""},
    +		{"TIOCM_DCD", Const, 0, ""},
    +		{"TIOCM_DSR", Const, 0, ""},
    +		{"TIOCM_DTR", Const, 0, ""},
    +		{"TIOCM_LE", Const, 0, ""},
    +		{"TIOCM_RI", Const, 0, ""},
    +		{"TIOCM_RNG", Const, 0, ""},
    +		{"TIOCM_RTS", Const, 0, ""},
    +		{"TIOCM_SR", Const, 0, ""},
    +		{"TIOCM_ST", Const, 0, ""},
    +		{"TIOCNOTTY", Const, 0, ""},
    +		{"TIOCNXCL", Const, 0, ""},
    +		{"TIOCOUTQ", Const, 0, ""},
    +		{"TIOCPKT", Const, 0, ""},
    +		{"TIOCPKT_DATA", Const, 0, ""},
    +		{"TIOCPKT_DOSTOP", Const, 0, ""},
    +		{"TIOCPKT_FLUSHREAD", Const, 0, ""},
    +		{"TIOCPKT_FLUSHWRITE", Const, 0, ""},
    +		{"TIOCPKT_IOCTL", Const, 0, ""},
    +		{"TIOCPKT_NOSTOP", Const, 0, ""},
    +		{"TIOCPKT_START", Const, 0, ""},
    +		{"TIOCPKT_STOP", Const, 0, ""},
    +		{"TIOCPTMASTER", Const, 0, ""},
    +		{"TIOCPTMGET", Const, 1, ""},
    +		{"TIOCPTSNAME", Const, 1, ""},
    +		{"TIOCPTYGNAME", Const, 0, ""},
    +		{"TIOCPTYGRANT", Const, 0, ""},
    +		{"TIOCPTYUNLK", Const, 0, ""},
    +		{"TIOCRCVFRAME", Const, 1, ""},
    +		{"TIOCREMOTE", Const, 0, ""},
    +		{"TIOCSBRK", Const, 0, ""},
    +		{"TIOCSCONS", Const, 0, ""},
    +		{"TIOCSCTTY", Const, 0, ""},
    +		{"TIOCSDRAINWAIT", Const, 0, ""},
    +		{"TIOCSDTR", Const, 0, ""},
    +		{"TIOCSERCONFIG", Const, 0, ""},
    +		{"TIOCSERGETLSR", Const, 0, ""},
    +		{"TIOCSERGETMULTI", Const, 0, ""},
    +		{"TIOCSERGSTRUCT", Const, 0, ""},
    +		{"TIOCSERGWILD", Const, 0, ""},
    +		{"TIOCSERSETMULTI", Const, 0, ""},
    +		{"TIOCSERSWILD", Const, 0, ""},
    +		{"TIOCSER_TEMT", Const, 0, ""},
    +		{"TIOCSETA", Const, 0, ""},
    +		{"TIOCSETAF", Const, 0, ""},
    +		{"TIOCSETAW", Const, 0, ""},
    +		{"TIOCSETD", Const, 0, ""},
    +		{"TIOCSFLAGS", Const, 1, ""},
    +		{"TIOCSIG", Const, 0, ""},
    +		{"TIOCSLCKTRMIOS", Const, 0, ""},
    +		{"TIOCSLINED", Const, 1, ""},
    +		{"TIOCSPGRP", Const, 0, ""},
    +		{"TIOCSPTLCK", Const, 0, ""},
    +		{"TIOCSQSIZE", Const, 1, ""},
    +		{"TIOCSRS485", Const, 0, ""},
    +		{"TIOCSSERIAL", Const, 0, ""},
    +		{"TIOCSSIZE", Const, 1, ""},
    +		{"TIOCSSOFTCAR", Const, 0, ""},
    +		{"TIOCSTART", Const, 0, ""},
    +		{"TIOCSTAT", Const, 0, ""},
    +		{"TIOCSTI", Const, 0, ""},
    +		{"TIOCSTOP", Const, 0, ""},
    +		{"TIOCSTSTAMP", Const, 1, ""},
    +		{"TIOCSWINSZ", Const, 0, ""},
    +		{"TIOCTIMESTAMP", Const, 0, ""},
    +		{"TIOCUCNTL", Const, 0, ""},
    +		{"TIOCVHANGUP", Const, 0, ""},
    +		{"TIOCXMTFRAME", Const, 1, ""},
    +		{"TOKEN_ADJUST_DEFAULT", Const, 0, ""},
    +		{"TOKEN_ADJUST_GROUPS", Const, 0, ""},
    +		{"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""},
    +		{"TOKEN_ADJUST_SESSIONID", Const, 11, ""},
    +		{"TOKEN_ALL_ACCESS", Const, 0, ""},
    +		{"TOKEN_ASSIGN_PRIMARY", Const, 0, ""},
    +		{"TOKEN_DUPLICATE", Const, 0, ""},
    +		{"TOKEN_EXECUTE", Const, 0, ""},
    +		{"TOKEN_IMPERSONATE", Const, 0, ""},
    +		{"TOKEN_QUERY", Const, 0, ""},
    +		{"TOKEN_QUERY_SOURCE", Const, 0, ""},
    +		{"TOKEN_READ", Const, 0, ""},
    +		{"TOKEN_WRITE", Const, 0, ""},
    +		{"TOSTOP", Const, 0, ""},
    +		{"TRUNCATE_EXISTING", Const, 0, ""},
    +		{"TUNATTACHFILTER", Const, 0, ""},
    +		{"TUNDETACHFILTER", Const, 0, ""},
    +		{"TUNGETFEATURES", Const, 0, ""},
    +		{"TUNGETIFF", Const, 0, ""},
    +		{"TUNGETSNDBUF", Const, 0, ""},
    +		{"TUNGETVNETHDRSZ", Const, 0, ""},
    +		{"TUNSETDEBUG", Const, 0, ""},
    +		{"TUNSETGROUP", Const, 0, ""},
    +		{"TUNSETIFF", Const, 0, ""},
    +		{"TUNSETLINK", Const, 0, ""},
    +		{"TUNSETNOCSUM", Const, 0, ""},
    +		{"TUNSETOFFLOAD", Const, 0, ""},
    +		{"TUNSETOWNER", Const, 0, ""},
    +		{"TUNSETPERSIST", Const, 0, ""},
    +		{"TUNSETSNDBUF", Const, 0, ""},
    +		{"TUNSETTXFILTER", Const, 0, ""},
    +		{"TUNSETVNETHDRSZ", Const, 0, ""},
    +		{"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"},
    +		{"TerminateProcess", Func, 0, ""},
    +		{"Termios", Type, 0, ""},
    +		{"Termios.Cc", Field, 0, ""},
    +		{"Termios.Cflag", Field, 0, ""},
    +		{"Termios.Iflag", Field, 0, ""},
    +		{"Termios.Ispeed", Field, 0, ""},
    +		{"Termios.Lflag", Field, 0, ""},
    +		{"Termios.Line", Field, 0, ""},
    +		{"Termios.Oflag", Field, 0, ""},
    +		{"Termios.Ospeed", Field, 0, ""},
    +		{"Termios.Pad_cgo_0", Field, 0, ""},
    +		{"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"},
    +		{"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"},
    +		{"Time_t", Type, 0, ""},
    +		{"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"},
    +		{"Timespec", Type, 0, ""},
    +		{"Timespec.Nsec", Field, 0, ""},
    +		{"Timespec.Pad_cgo_0", Field, 2, ""},
    +		{"Timespec.Sec", Field, 0, ""},
    +		{"TimespecToNsec", Func, 0, "func(ts Timespec) int64"},
    +		{"Timeval", Type, 0, ""},
    +		{"Timeval.Pad_cgo_0", Field, 0, ""},
    +		{"Timeval.Sec", Field, 0, ""},
    +		{"Timeval.Usec", Field, 0, ""},
    +		{"Timeval32", Type, 0, ""},
    +		{"Timeval32.Sec", Field, 0, ""},
    +		{"Timeval32.Usec", Field, 0, ""},
    +		{"TimevalToNsec", Func, 0, "func(tv Timeval) int64"},
    +		{"Timex", Type, 0, ""},
    +		{"Timex.Calcnt", Field, 0, ""},
    +		{"Timex.Constant", Field, 0, ""},
    +		{"Timex.Errcnt", Field, 0, ""},
    +		{"Timex.Esterror", Field, 0, ""},
    +		{"Timex.Freq", Field, 0, ""},
    +		{"Timex.Jitcnt", Field, 0, ""},
    +		{"Timex.Jitter", Field, 0, ""},
    +		{"Timex.Maxerror", Field, 0, ""},
    +		{"Timex.Modes", Field, 0, ""},
    +		{"Timex.Offset", Field, 0, ""},
    +		{"Timex.Pad_cgo_0", Field, 0, ""},
    +		{"Timex.Pad_cgo_1", Field, 0, ""},
    +		{"Timex.Pad_cgo_2", Field, 0, ""},
    +		{"Timex.Pad_cgo_3", Field, 0, ""},
    +		{"Timex.Ppsfreq", Field, 0, ""},
    +		{"Timex.Precision", Field, 0, ""},
    +		{"Timex.Shift", Field, 0, ""},
    +		{"Timex.Stabil", Field, 0, ""},
    +		{"Timex.Status", Field, 0, ""},
    +		{"Timex.Stbcnt", Field, 0, ""},
    +		{"Timex.Tai", Field, 0, ""},
    +		{"Timex.Tick", Field, 0, ""},
    +		{"Timex.Time", Field, 0, ""},
    +		{"Timex.Tolerance", Field, 0, ""},
    +		{"Timezoneinformation", Type, 0, ""},
    +		{"Timezoneinformation.Bias", Field, 0, ""},
    +		{"Timezoneinformation.DaylightBias", Field, 0, ""},
    +		{"Timezoneinformation.DaylightDate", Field, 0, ""},
    +		{"Timezoneinformation.DaylightName", Field, 0, ""},
    +		{"Timezoneinformation.StandardBias", Field, 0, ""},
    +		{"Timezoneinformation.StandardDate", Field, 0, ""},
    +		{"Timezoneinformation.StandardName", Field, 0, ""},
    +		{"Tms", Type, 0, ""},
    +		{"Tms.Cstime", Field, 0, ""},
    +		{"Tms.Cutime", Field, 0, ""},
    +		{"Tms.Stime", Field, 0, ""},
    +		{"Tms.Utime", Field, 0, ""},
    +		{"Token", Type, 0, ""},
    +		{"TokenAccessInformation", Const, 0, ""},
    +		{"TokenAuditPolicy", Const, 0, ""},
    +		{"TokenDefaultDacl", Const, 0, ""},
    +		{"TokenElevation", Const, 0, ""},
    +		{"TokenElevationType", Const, 0, ""},
    +		{"TokenGroups", Const, 0, ""},
    +		{"TokenGroupsAndPrivileges", Const, 0, ""},
    +		{"TokenHasRestrictions", Const, 0, ""},
    +		{"TokenImpersonationLevel", Const, 0, ""},
    +		{"TokenIntegrityLevel", Const, 0, ""},
    +		{"TokenLinkedToken", Const, 0, ""},
    +		{"TokenLogonSid", Const, 0, ""},
    +		{"TokenMandatoryPolicy", Const, 0, ""},
    +		{"TokenOrigin", Const, 0, ""},
    +		{"TokenOwner", Const, 0, ""},
    +		{"TokenPrimaryGroup", Const, 0, ""},
    +		{"TokenPrivileges", Const, 0, ""},
    +		{"TokenRestrictedSids", Const, 0, ""},
    +		{"TokenSandBoxInert", Const, 0, ""},
    +		{"TokenSessionId", Const, 0, ""},
    +		{"TokenSessionReference", Const, 0, ""},
    +		{"TokenSource", Const, 0, ""},
    +		{"TokenStatistics", Const, 0, ""},
    +		{"TokenType", Const, 0, ""},
    +		{"TokenUIAccess", Const, 0, ""},
    +		{"TokenUser", Const, 0, ""},
    +		{"TokenVirtualizationAllowed", Const, 0, ""},
    +		{"TokenVirtualizationEnabled", Const, 0, ""},
    +		{"Tokenprimarygroup", Type, 0, ""},
    +		{"Tokenprimarygroup.PrimaryGroup", Field, 0, ""},
    +		{"Tokenuser", Type, 0, ""},
    +		{"Tokenuser.User", Field, 0, ""},
    +		{"TranslateAccountName", Func, 0, ""},
    +		{"TranslateName", Func, 0, ""},
    +		{"TransmitFile", Func, 0, ""},
    +		{"TransmitFileBuffers", Type, 0, ""},
    +		{"TransmitFileBuffers.Head", Field, 0, ""},
    +		{"TransmitFileBuffers.HeadLength", Field, 0, ""},
    +		{"TransmitFileBuffers.Tail", Field, 0, ""},
    +		{"TransmitFileBuffers.TailLength", Field, 0, ""},
    +		{"Truncate", Func, 0, "func(path string, length int64) (err error)"},
    +		{"UNIX_PATH_MAX", Const, 12, ""},
    +		{"USAGE_MATCH_TYPE_AND", Const, 0, ""},
    +		{"USAGE_MATCH_TYPE_OR", Const, 0, ""},
    +		{"UTF16FromString", Func, 1, ""},
    +		{"UTF16PtrFromString", Func, 1, ""},
    +		{"UTF16ToString", Func, 0, ""},
    +		{"Ucred", Type, 0, ""},
    +		{"Ucred.Gid", Field, 0, ""},
    +		{"Ucred.Pid", Field, 0, ""},
    +		{"Ucred.Uid", Field, 0, ""},
    +		{"Umask", Func, 0, "func(mask int) (oldmask int)"},
    +		{"Uname", Func, 0, "func(buf *Utsname) (err error)"},
    +		{"Undelete", Func, 0, ""},
    +		{"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"},
    +		{"UnixRights", Func, 0, "func(fds ...int) []byte"},
    +		{"Unlink", Func, 0, "func(path string) error"},
    +		{"Unlinkat", Func, 0, "func(dirfd int, path string) error"},
    +		{"UnmapViewOfFile", Func, 0, ""},
    +		{"Unmount", Func, 0, "func(target string, flags int) (err error)"},
    +		{"Unsetenv", Func, 4, "func(key string) error"},
    +		{"Unshare", Func, 0, "func(flags int) (err error)"},
    +		{"UserInfo10", Type, 0, ""},
    +		{"UserInfo10.Comment", Field, 0, ""},
    +		{"UserInfo10.FullName", Field, 0, ""},
    +		{"UserInfo10.Name", Field, 0, ""},
    +		{"UserInfo10.UsrComment", Field, 0, ""},
    +		{"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"},
    +		{"Ustat_t", Type, 0, ""},
    +		{"Ustat_t.Fname", Field, 0, ""},
    +		{"Ustat_t.Fpack", Field, 0, ""},
    +		{"Ustat_t.Pad_cgo_0", Field, 0, ""},
    +		{"Ustat_t.Pad_cgo_1", Field, 0, ""},
    +		{"Ustat_t.Tfree", Field, 0, ""},
    +		{"Ustat_t.Tinode", Field, 0, ""},
    +		{"Utimbuf", Type, 0, ""},
    +		{"Utimbuf.Actime", Field, 0, ""},
    +		{"Utimbuf.Modtime", Field, 0, ""},
    +		{"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"},
    +		{"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"},
    +		{"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"},
    +		{"Utsname", Type, 0, ""},
    +		{"Utsname.Domainname", Field, 0, ""},
    +		{"Utsname.Machine", Field, 0, ""},
    +		{"Utsname.Nodename", Field, 0, ""},
    +		{"Utsname.Release", Field, 0, ""},
    +		{"Utsname.Sysname", Field, 0, ""},
    +		{"Utsname.Version", Field, 0, ""},
    +		{"VDISCARD", Const, 0, ""},
    +		{"VDSUSP", Const, 1, ""},
    +		{"VEOF", Const, 0, ""},
    +		{"VEOL", Const, 0, ""},
    +		{"VEOL2", Const, 0, ""},
    +		{"VERASE", Const, 0, ""},
    +		{"VERASE2", Const, 1, ""},
    +		{"VINTR", Const, 0, ""},
    +		{"VKILL", Const, 0, ""},
    +		{"VLNEXT", Const, 0, ""},
    +		{"VMIN", Const, 0, ""},
    +		{"VQUIT", Const, 0, ""},
    +		{"VREPRINT", Const, 0, ""},
    +		{"VSTART", Const, 0, ""},
    +		{"VSTATUS", Const, 1, ""},
    +		{"VSTOP", Const, 0, ""},
    +		{"VSUSP", Const, 0, ""},
    +		{"VSWTC", Const, 0, ""},
    +		{"VT0", Const, 1, ""},
    +		{"VT1", Const, 1, ""},
    +		{"VTDLY", Const, 1, ""},
    +		{"VTIME", Const, 0, ""},
    +		{"VWERASE", Const, 0, ""},
    +		{"VirtualLock", Func, 0, ""},
    +		{"VirtualUnlock", Func, 0, ""},
    +		{"WAIT_ABANDONED", Const, 0, ""},
    +		{"WAIT_FAILED", Const, 0, ""},
    +		{"WAIT_OBJECT_0", Const, 0, ""},
    +		{"WAIT_TIMEOUT", Const, 0, ""},
    +		{"WALL", Const, 0, ""},
    +		{"WALLSIG", Const, 1, ""},
    +		{"WALTSIG", Const, 1, ""},
    +		{"WCLONE", Const, 0, ""},
    +		{"WCONTINUED", Const, 0, ""},
    +		{"WCOREFLAG", Const, 0, ""},
    +		{"WEXITED", Const, 0, ""},
    +		{"WLINUXCLONE", Const, 0, ""},
    +		{"WNOHANG", Const, 0, ""},
    +		{"WNOTHREAD", Const, 0, ""},
    +		{"WNOWAIT", Const, 0, ""},
    +		{"WNOZOMBIE", Const, 1, ""},
    +		{"WOPTSCHECKED", Const, 1, ""},
    +		{"WORDSIZE", Const, 0, ""},
    +		{"WSABuf", Type, 0, ""},
    +		{"WSABuf.Buf", Field, 0, ""},
    +		{"WSABuf.Len", Field, 0, ""},
    +		{"WSACleanup", Func, 0, ""},
    +		{"WSADESCRIPTION_LEN", Const, 0, ""},
    +		{"WSAData", Type, 0, ""},
    +		{"WSAData.Description", Field, 0, ""},
    +		{"WSAData.HighVersion", Field, 0, ""},
    +		{"WSAData.MaxSockets", Field, 0, ""},
    +		{"WSAData.MaxUdpDg", Field, 0, ""},
    +		{"WSAData.SystemStatus", Field, 0, ""},
    +		{"WSAData.VendorInfo", Field, 0, ""},
    +		{"WSAData.Version", Field, 0, ""},
    +		{"WSAEACCES", Const, 2, ""},
    +		{"WSAECONNABORTED", Const, 9, ""},
    +		{"WSAECONNRESET", Const, 3, ""},
    +		{"WSAENOPROTOOPT", Const, 23, ""},
    +		{"WSAEnumProtocols", Func, 2, ""},
    +		{"WSAID_CONNECTEX", Var, 1, ""},
    +		{"WSAIoctl", Func, 0, ""},
    +		{"WSAPROTOCOL_LEN", Const, 2, ""},
    +		{"WSAProtocolChain", Type, 2, ""},
    +		{"WSAProtocolChain.ChainEntries", Field, 2, ""},
    +		{"WSAProtocolChain.ChainLen", Field, 2, ""},
    +		{"WSAProtocolInfo", Type, 2, ""},
    +		{"WSAProtocolInfo.AddressFamily", Field, 2, ""},
    +		{"WSAProtocolInfo.CatalogEntryId", Field, 2, ""},
    +		{"WSAProtocolInfo.MaxSockAddr", Field, 2, ""},
    +		{"WSAProtocolInfo.MessageSize", Field, 2, ""},
    +		{"WSAProtocolInfo.MinSockAddr", Field, 2, ""},
    +		{"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""},
    +		{"WSAProtocolInfo.Protocol", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolChain", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""},
    +		{"WSAProtocolInfo.ProtocolName", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderFlags", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderId", Field, 2, ""},
    +		{"WSAProtocolInfo.ProviderReserved", Field, 2, ""},
    +		{"WSAProtocolInfo.SecurityScheme", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags1", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags2", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags3", Field, 2, ""},
    +		{"WSAProtocolInfo.ServiceFlags4", Field, 2, ""},
    +		{"WSAProtocolInfo.SocketType", Field, 2, ""},
    +		{"WSAProtocolInfo.Version", Field, 2, ""},
    +		{"WSARecv", Func, 0, ""},
    +		{"WSARecvFrom", Func, 0, ""},
    +		{"WSASYS_STATUS_LEN", Const, 0, ""},
    +		{"WSASend", Func, 0, ""},
    +		{"WSASendTo", Func, 0, ""},
    +		{"WSASendto", Func, 0, ""},
    +		{"WSAStartup", Func, 0, ""},
    +		{"WSTOPPED", Const, 0, ""},
    +		{"WTRAPPED", Const, 1, ""},
    +		{"WUNTRACED", Const, 0, ""},
    +		{"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"},
    +		{"WaitForSingleObject", Func, 0, ""},
    +		{"WaitStatus", Type, 0, ""},
    +		{"WaitStatus.ExitCode", Field, 0, ""},
    +		{"Win32FileAttributeData", Type, 0, ""},
    +		{"Win32FileAttributeData.CreationTime", Field, 0, ""},
    +		{"Win32FileAttributeData.FileAttributes", Field, 0, ""},
    +		{"Win32FileAttributeData.FileSizeHigh", Field, 0, ""},
    +		{"Win32FileAttributeData.FileSizeLow", Field, 0, ""},
    +		{"Win32FileAttributeData.LastAccessTime", Field, 0, ""},
    +		{"Win32FileAttributeData.LastWriteTime", Field, 0, ""},
    +		{"Win32finddata", Type, 0, ""},
    +		{"Win32finddata.AlternateFileName", Field, 0, ""},
    +		{"Win32finddata.CreationTime", Field, 0, ""},
    +		{"Win32finddata.FileAttributes", Field, 0, ""},
    +		{"Win32finddata.FileName", Field, 0, ""},
    +		{"Win32finddata.FileSizeHigh", Field, 0, ""},
    +		{"Win32finddata.FileSizeLow", Field, 0, ""},
    +		{"Win32finddata.LastAccessTime", Field, 0, ""},
    +		{"Win32finddata.LastWriteTime", Field, 0, ""},
    +		{"Win32finddata.Reserved0", Field, 0, ""},
    +		{"Win32finddata.Reserved1", Field, 0, ""},
    +		{"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"},
    +		{"WriteConsole", Func, 1, ""},
    +		{"WriteFile", Func, 0, ""},
    +		{"X509_ASN_ENCODING", Const, 0, ""},
    +		{"XCASE", Const, 0, ""},
    +		{"XP1_CONNECTIONLESS", Const, 2, ""},
    +		{"XP1_CONNECT_DATA", Const, 2, ""},
    +		{"XP1_DISCONNECT_DATA", Const, 2, ""},
    +		{"XP1_EXPEDITED_DATA", Const, 2, ""},
    +		{"XP1_GRACEFUL_CLOSE", Const, 2, ""},
    +		{"XP1_GUARANTEED_DELIVERY", Const, 2, ""},
    +		{"XP1_GUARANTEED_ORDER", Const, 2, ""},
    +		{"XP1_IFS_HANDLES", Const, 2, ""},
    +		{"XP1_MESSAGE_ORIENTED", Const, 2, ""},
    +		{"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""},
    +		{"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""},
    +		{"XP1_PARTIAL_MESSAGE", Const, 2, ""},
    +		{"XP1_PSEUDO_STREAM", Const, 2, ""},
    +		{"XP1_QOS_SUPPORTED", Const, 2, ""},
    +		{"XP1_SAN_SUPPORT_SDP", Const, 2, ""},
    +		{"XP1_SUPPORT_BROADCAST", Const, 2, ""},
    +		{"XP1_SUPPORT_MULTIPOINT", Const, 2, ""},
    +		{"XP1_UNI_RECV", Const, 2, ""},
    +		{"XP1_UNI_SEND", Const, 2, ""},
     	},
     	"syscall/js": {
    -		{"CopyBytesToGo", Func, 0},
    -		{"CopyBytesToJS", Func, 0},
    -		{"Error", Type, 0},
    -		{"Func", Type, 0},
    -		{"FuncOf", Func, 0},
    -		{"Global", Func, 0},
    -		{"Null", Func, 0},
    -		{"Type", Type, 0},
    -		{"TypeBoolean", Const, 0},
    -		{"TypeFunction", Const, 0},
    -		{"TypeNull", Const, 0},
    -		{"TypeNumber", Const, 0},
    -		{"TypeObject", Const, 0},
    -		{"TypeString", Const, 0},
    -		{"TypeSymbol", Const, 0},
    -		{"TypeUndefined", Const, 0},
    -		{"Undefined", Func, 0},
    -		{"Value", Type, 0},
    -		{"ValueError", Type, 0},
    -		{"ValueOf", Func, 0},
    +		{"CopyBytesToGo", Func, 0, ""},
    +		{"CopyBytesToJS", Func, 0, ""},
    +		{"Error", Type, 0, ""},
    +		{"Func", Type, 0, ""},
    +		{"FuncOf", Func, 0, ""},
    +		{"Global", Func, 0, ""},
    +		{"Null", Func, 0, ""},
    +		{"Type", Type, 0, ""},
    +		{"TypeBoolean", Const, 0, ""},
    +		{"TypeFunction", Const, 0, ""},
    +		{"TypeNull", Const, 0, ""},
    +		{"TypeNumber", Const, 0, ""},
    +		{"TypeObject", Const, 0, ""},
    +		{"TypeString", Const, 0, ""},
    +		{"TypeSymbol", Const, 0, ""},
    +		{"TypeUndefined", Const, 0, ""},
    +		{"Undefined", Func, 0, ""},
    +		{"Value", Type, 0, ""},
    +		{"ValueError", Type, 0, ""},
    +		{"ValueOf", Func, 0, ""},
     	},
     	"testing": {
    -		{"(*B).Chdir", Method, 24},
    -		{"(*B).Cleanup", Method, 14},
    -		{"(*B).Context", Method, 24},
    -		{"(*B).Elapsed", Method, 20},
    -		{"(*B).Error", Method, 0},
    -		{"(*B).Errorf", Method, 0},
    -		{"(*B).Fail", Method, 0},
    -		{"(*B).FailNow", Method, 0},
    -		{"(*B).Failed", Method, 0},
    -		{"(*B).Fatal", Method, 0},
    -		{"(*B).Fatalf", Method, 0},
    -		{"(*B).Helper", Method, 9},
    -		{"(*B).Log", Method, 0},
    -		{"(*B).Logf", Method, 0},
    -		{"(*B).Loop", Method, 24},
    -		{"(*B).Name", Method, 8},
    -		{"(*B).ReportAllocs", Method, 1},
    -		{"(*B).ReportMetric", Method, 13},
    -		{"(*B).ResetTimer", Method, 0},
    -		{"(*B).Run", Method, 7},
    -		{"(*B).RunParallel", Method, 3},
    -		{"(*B).SetBytes", Method, 0},
    -		{"(*B).SetParallelism", Method, 3},
    -		{"(*B).Setenv", Method, 17},
    -		{"(*B).Skip", Method, 1},
    -		{"(*B).SkipNow", Method, 1},
    -		{"(*B).Skipf", Method, 1},
    -		{"(*B).Skipped", Method, 1},
    -		{"(*B).StartTimer", Method, 0},
    -		{"(*B).StopTimer", Method, 0},
    -		{"(*B).TempDir", Method, 15},
    -		{"(*F).Add", Method, 18},
    -		{"(*F).Chdir", Method, 24},
    -		{"(*F).Cleanup", Method, 18},
    -		{"(*F).Context", Method, 24},
    -		{"(*F).Error", Method, 18},
    -		{"(*F).Errorf", Method, 18},
    -		{"(*F).Fail", Method, 18},
    -		{"(*F).FailNow", Method, 18},
    -		{"(*F).Failed", Method, 18},
    -		{"(*F).Fatal", Method, 18},
    -		{"(*F).Fatalf", Method, 18},
    -		{"(*F).Fuzz", Method, 18},
    -		{"(*F).Helper", Method, 18},
    -		{"(*F).Log", Method, 18},
    -		{"(*F).Logf", Method, 18},
    -		{"(*F).Name", Method, 18},
    -		{"(*F).Setenv", Method, 18},
    -		{"(*F).Skip", Method, 18},
    -		{"(*F).SkipNow", Method, 18},
    -		{"(*F).Skipf", Method, 18},
    -		{"(*F).Skipped", Method, 18},
    -		{"(*F).TempDir", Method, 18},
    -		{"(*M).Run", Method, 4},
    -		{"(*PB).Next", Method, 3},
    -		{"(*T).Chdir", Method, 24},
    -		{"(*T).Cleanup", Method, 14},
    -		{"(*T).Context", Method, 24},
    -		{"(*T).Deadline", Method, 15},
    -		{"(*T).Error", Method, 0},
    -		{"(*T).Errorf", Method, 0},
    -		{"(*T).Fail", Method, 0},
    -		{"(*T).FailNow", Method, 0},
    -		{"(*T).Failed", Method, 0},
    -		{"(*T).Fatal", Method, 0},
    -		{"(*T).Fatalf", Method, 0},
    -		{"(*T).Helper", Method, 9},
    -		{"(*T).Log", Method, 0},
    -		{"(*T).Logf", Method, 0},
    -		{"(*T).Name", Method, 8},
    -		{"(*T).Parallel", Method, 0},
    -		{"(*T).Run", Method, 7},
    -		{"(*T).Setenv", Method, 17},
    -		{"(*T).Skip", Method, 1},
    -		{"(*T).SkipNow", Method, 1},
    -		{"(*T).Skipf", Method, 1},
    -		{"(*T).Skipped", Method, 1},
    -		{"(*T).TempDir", Method, 15},
    -		{"(BenchmarkResult).AllocedBytesPerOp", Method, 1},
    -		{"(BenchmarkResult).AllocsPerOp", Method, 1},
    -		{"(BenchmarkResult).MemString", Method, 1},
    -		{"(BenchmarkResult).NsPerOp", Method, 0},
    -		{"(BenchmarkResult).String", Method, 0},
    -		{"AllocsPerRun", Func, 1},
    -		{"B", Type, 0},
    -		{"B.N", Field, 0},
    -		{"Benchmark", Func, 0},
    -		{"BenchmarkResult", Type, 0},
    -		{"BenchmarkResult.Bytes", Field, 0},
    -		{"BenchmarkResult.Extra", Field, 13},
    -		{"BenchmarkResult.MemAllocs", Field, 1},
    -		{"BenchmarkResult.MemBytes", Field, 1},
    -		{"BenchmarkResult.N", Field, 0},
    -		{"BenchmarkResult.T", Field, 0},
    -		{"Cover", Type, 2},
    -		{"Cover.Blocks", Field, 2},
    -		{"Cover.Counters", Field, 2},
    -		{"Cover.CoveredPackages", Field, 2},
    -		{"Cover.Mode", Field, 2},
    -		{"CoverBlock", Type, 2},
    -		{"CoverBlock.Col0", Field, 2},
    -		{"CoverBlock.Col1", Field, 2},
    -		{"CoverBlock.Line0", Field, 2},
    -		{"CoverBlock.Line1", Field, 2},
    -		{"CoverBlock.Stmts", Field, 2},
    -		{"CoverMode", Func, 8},
    -		{"Coverage", Func, 4},
    -		{"F", Type, 18},
    -		{"Init", Func, 13},
    -		{"InternalBenchmark", Type, 0},
    -		{"InternalBenchmark.F", Field, 0},
    -		{"InternalBenchmark.Name", Field, 0},
    -		{"InternalExample", Type, 0},
    -		{"InternalExample.F", Field, 0},
    -		{"InternalExample.Name", Field, 0},
    -		{"InternalExample.Output", Field, 0},
    -		{"InternalExample.Unordered", Field, 7},
    -		{"InternalFuzzTarget", Type, 18},
    -		{"InternalFuzzTarget.Fn", Field, 18},
    -		{"InternalFuzzTarget.Name", Field, 18},
    -		{"InternalTest", Type, 0},
    -		{"InternalTest.F", Field, 0},
    -		{"InternalTest.Name", Field, 0},
    -		{"M", Type, 4},
    -		{"Main", Func, 0},
    -		{"MainStart", Func, 4},
    -		{"PB", Type, 3},
    -		{"RegisterCover", Func, 2},
    -		{"RunBenchmarks", Func, 0},
    -		{"RunExamples", Func, 0},
    -		{"RunTests", Func, 0},
    -		{"Short", Func, 0},
    -		{"T", Type, 0},
    -		{"TB", Type, 2},
    -		{"Testing", Func, 21},
    -		{"Verbose", Func, 1},
    +		{"(*B).Attr", Method, 25, ""},
    +		{"(*B).Chdir", Method, 24, ""},
    +		{"(*B).Cleanup", Method, 14, ""},
    +		{"(*B).Context", Method, 24, ""},
    +		{"(*B).Elapsed", Method, 20, ""},
    +		{"(*B).Error", Method, 0, ""},
    +		{"(*B).Errorf", Method, 0, ""},
    +		{"(*B).Fail", Method, 0, ""},
    +		{"(*B).FailNow", Method, 0, ""},
    +		{"(*B).Failed", Method, 0, ""},
    +		{"(*B).Fatal", Method, 0, ""},
    +		{"(*B).Fatalf", Method, 0, ""},
    +		{"(*B).Helper", Method, 9, ""},
    +		{"(*B).Log", Method, 0, ""},
    +		{"(*B).Logf", Method, 0, ""},
    +		{"(*B).Loop", Method, 24, ""},
    +		{"(*B).Name", Method, 8, ""},
    +		{"(*B).Output", Method, 25, ""},
    +		{"(*B).ReportAllocs", Method, 1, ""},
    +		{"(*B).ReportMetric", Method, 13, ""},
    +		{"(*B).ResetTimer", Method, 0, ""},
    +		{"(*B).Run", Method, 7, ""},
    +		{"(*B).RunParallel", Method, 3, ""},
    +		{"(*B).SetBytes", Method, 0, ""},
    +		{"(*B).SetParallelism", Method, 3, ""},
    +		{"(*B).Setenv", Method, 17, ""},
    +		{"(*B).Skip", Method, 1, ""},
    +		{"(*B).SkipNow", Method, 1, ""},
    +		{"(*B).Skipf", Method, 1, ""},
    +		{"(*B).Skipped", Method, 1, ""},
    +		{"(*B).StartTimer", Method, 0, ""},
    +		{"(*B).StopTimer", Method, 0, ""},
    +		{"(*B).TempDir", Method, 15, ""},
    +		{"(*F).Add", Method, 18, ""},
    +		{"(*F).Attr", Method, 25, ""},
    +		{"(*F).Chdir", Method, 24, ""},
    +		{"(*F).Cleanup", Method, 18, ""},
    +		{"(*F).Context", Method, 24, ""},
    +		{"(*F).Error", Method, 18, ""},
    +		{"(*F).Errorf", Method, 18, ""},
    +		{"(*F).Fail", Method, 18, ""},
    +		{"(*F).FailNow", Method, 18, ""},
    +		{"(*F).Failed", Method, 18, ""},
    +		{"(*F).Fatal", Method, 18, ""},
    +		{"(*F).Fatalf", Method, 18, ""},
    +		{"(*F).Fuzz", Method, 18, ""},
    +		{"(*F).Helper", Method, 18, ""},
    +		{"(*F).Log", Method, 18, ""},
    +		{"(*F).Logf", Method, 18, ""},
    +		{"(*F).Name", Method, 18, ""},
    +		{"(*F).Output", Method, 25, ""},
    +		{"(*F).Setenv", Method, 18, ""},
    +		{"(*F).Skip", Method, 18, ""},
    +		{"(*F).SkipNow", Method, 18, ""},
    +		{"(*F).Skipf", Method, 18, ""},
    +		{"(*F).Skipped", Method, 18, ""},
    +		{"(*F).TempDir", Method, 18, ""},
    +		{"(*M).Run", Method, 4, ""},
    +		{"(*PB).Next", Method, 3, ""},
    +		{"(*T).Attr", Method, 25, ""},
    +		{"(*T).Chdir", Method, 24, ""},
    +		{"(*T).Cleanup", Method, 14, ""},
    +		{"(*T).Context", Method, 24, ""},
    +		{"(*T).Deadline", Method, 15, ""},
    +		{"(*T).Error", Method, 0, ""},
    +		{"(*T).Errorf", Method, 0, ""},
    +		{"(*T).Fail", Method, 0, ""},
    +		{"(*T).FailNow", Method, 0, ""},
    +		{"(*T).Failed", Method, 0, ""},
    +		{"(*T).Fatal", Method, 0, ""},
    +		{"(*T).Fatalf", Method, 0, ""},
    +		{"(*T).Helper", Method, 9, ""},
    +		{"(*T).Log", Method, 0, ""},
    +		{"(*T).Logf", Method, 0, ""},
    +		{"(*T).Name", Method, 8, ""},
    +		{"(*T).Output", Method, 25, ""},
    +		{"(*T).Parallel", Method, 0, ""},
    +		{"(*T).Run", Method, 7, ""},
    +		{"(*T).Setenv", Method, 17, ""},
    +		{"(*T).Skip", Method, 1, ""},
    +		{"(*T).SkipNow", Method, 1, ""},
    +		{"(*T).Skipf", Method, 1, ""},
    +		{"(*T).Skipped", Method, 1, ""},
    +		{"(*T).TempDir", Method, 15, ""},
    +		{"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""},
    +		{"(BenchmarkResult).AllocsPerOp", Method, 1, ""},
    +		{"(BenchmarkResult).MemString", Method, 1, ""},
    +		{"(BenchmarkResult).NsPerOp", Method, 0, ""},
    +		{"(BenchmarkResult).String", Method, 0, ""},
    +		{"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
    +		{"B", Type, 0, ""},
    +		{"B.N", Field, 0, ""},
    +		{"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"},
    +		{"BenchmarkResult", Type, 0, ""},
    +		{"BenchmarkResult.Bytes", Field, 0, ""},
    +		{"BenchmarkResult.Extra", Field, 13, ""},
    +		{"BenchmarkResult.MemAllocs", Field, 1, ""},
    +		{"BenchmarkResult.MemBytes", Field, 1, ""},
    +		{"BenchmarkResult.N", Field, 0, ""},
    +		{"BenchmarkResult.T", Field, 0, ""},
    +		{"Cover", Type, 2, ""},
    +		{"Cover.Blocks", Field, 2, ""},
    +		{"Cover.Counters", Field, 2, ""},
    +		{"Cover.CoveredPackages", Field, 2, ""},
    +		{"Cover.Mode", Field, 2, ""},
    +		{"CoverBlock", Type, 2, ""},
    +		{"CoverBlock.Col0", Field, 2, ""},
    +		{"CoverBlock.Col1", Field, 2, ""},
    +		{"CoverBlock.Line0", Field, 2, ""},
    +		{"CoverBlock.Line1", Field, 2, ""},
    +		{"CoverBlock.Stmts", Field, 2, ""},
    +		{"CoverMode", Func, 8, "func() string"},
    +		{"Coverage", Func, 4, "func() float64"},
    +		{"F", Type, 18, ""},
    +		{"Init", Func, 13, "func()"},
    +		{"InternalBenchmark", Type, 0, ""},
    +		{"InternalBenchmark.F", Field, 0, ""},
    +		{"InternalBenchmark.Name", Field, 0, ""},
    +		{"InternalExample", Type, 0, ""},
    +		{"InternalExample.F", Field, 0, ""},
    +		{"InternalExample.Name", Field, 0, ""},
    +		{"InternalExample.Output", Field, 0, ""},
    +		{"InternalExample.Unordered", Field, 7, ""},
    +		{"InternalFuzzTarget", Type, 18, ""},
    +		{"InternalFuzzTarget.Fn", Field, 18, ""},
    +		{"InternalFuzzTarget.Name", Field, 18, ""},
    +		{"InternalTest", Type, 0, ""},
    +		{"InternalTest.F", Field, 0, ""},
    +		{"InternalTest.Name", Field, 0, ""},
    +		{"M", Type, 4, ""},
    +		{"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"},
    +		{"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"},
    +		{"PB", Type, 3, ""},
    +		{"RegisterCover", Func, 2, "func(c Cover)"},
    +		{"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"},
    +		{"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"},
    +		{"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
    +		{"Short", Func, 0, "func() bool"},
    +		{"T", Type, 0, ""},
    +		{"TB", Type, 2, ""},
    +		{"Testing", Func, 21, "func() bool"},
    +		{"Verbose", Func, 1, "func() bool"},
     	},
     	"testing/fstest": {
    -		{"(MapFS).Glob", Method, 16},
    -		{"(MapFS).Lstat", Method, 25},
    -		{"(MapFS).Open", Method, 16},
    -		{"(MapFS).ReadDir", Method, 16},
    -		{"(MapFS).ReadFile", Method, 16},
    -		{"(MapFS).ReadLink", Method, 25},
    -		{"(MapFS).Stat", Method, 16},
    -		{"(MapFS).Sub", Method, 16},
    -		{"MapFS", Type, 16},
    -		{"MapFile", Type, 16},
    -		{"MapFile.Data", Field, 16},
    -		{"MapFile.ModTime", Field, 16},
    -		{"MapFile.Mode", Field, 16},
    -		{"MapFile.Sys", Field, 16},
    -		{"TestFS", Func, 16},
    +		{"(MapFS).Glob", Method, 16, ""},
    +		{"(MapFS).Lstat", Method, 25, ""},
    +		{"(MapFS).Open", Method, 16, ""},
    +		{"(MapFS).ReadDir", Method, 16, ""},
    +		{"(MapFS).ReadFile", Method, 16, ""},
    +		{"(MapFS).ReadLink", Method, 25, ""},
    +		{"(MapFS).Stat", Method, 16, ""},
    +		{"(MapFS).Sub", Method, 16, ""},
    +		{"MapFS", Type, 16, ""},
    +		{"MapFile", Type, 16, ""},
    +		{"MapFile.Data", Field, 16, ""},
    +		{"MapFile.ModTime", Field, 16, ""},
    +		{"MapFile.Mode", Field, 16, ""},
    +		{"MapFile.Sys", Field, 16, ""},
    +		{"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"},
     	},
     	"testing/iotest": {
    -		{"DataErrReader", Func, 0},
    -		{"ErrReader", Func, 16},
    -		{"ErrTimeout", Var, 0},
    -		{"HalfReader", Func, 0},
    -		{"NewReadLogger", Func, 0},
    -		{"NewWriteLogger", Func, 0},
    -		{"OneByteReader", Func, 0},
    -		{"TestReader", Func, 16},
    -		{"TimeoutReader", Func, 0},
    -		{"TruncateWriter", Func, 0},
    +		{"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"ErrReader", Func, 16, "func(err error) io.Reader"},
    +		{"ErrTimeout", Var, 0, ""},
    +		{"HalfReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"},
    +		{"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"},
    +		{"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"TestReader", Func, 16, "func(r io.Reader, content []byte) error"},
    +		{"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"},
    +		{"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"},
     	},
     	"testing/quick": {
    -		{"(*CheckEqualError).Error", Method, 0},
    -		{"(*CheckError).Error", Method, 0},
    -		{"(SetupError).Error", Method, 0},
    -		{"Check", Func, 0},
    -		{"CheckEqual", Func, 0},
    -		{"CheckEqualError", Type, 0},
    -		{"CheckEqualError.CheckError", Field, 0},
    -		{"CheckEqualError.Out1", Field, 0},
    -		{"CheckEqualError.Out2", Field, 0},
    -		{"CheckError", Type, 0},
    -		{"CheckError.Count", Field, 0},
    -		{"CheckError.In", Field, 0},
    -		{"Config", Type, 0},
    -		{"Config.MaxCount", Field, 0},
    -		{"Config.MaxCountScale", Field, 0},
    -		{"Config.Rand", Field, 0},
    -		{"Config.Values", Field, 0},
    -		{"Generator", Type, 0},
    -		{"SetupError", Type, 0},
    -		{"Value", Func, 0},
    +		{"(*CheckEqualError).Error", Method, 0, ""},
    +		{"(*CheckError).Error", Method, 0, ""},
    +		{"(SetupError).Error", Method, 0, ""},
    +		{"Check", Func, 0, "func(f any, config *Config) error"},
    +		{"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
    +		{"CheckEqualError", Type, 0, ""},
    +		{"CheckEqualError.CheckError", Field, 0, ""},
    +		{"CheckEqualError.Out1", Field, 0, ""},
    +		{"CheckEqualError.Out2", Field, 0, ""},
    +		{"CheckError", Type, 0, ""},
    +		{"CheckError.Count", Field, 0, ""},
    +		{"CheckError.In", Field, 0, ""},
    +		{"Config", Type, 0, ""},
    +		{"Config.MaxCount", Field, 0, ""},
    +		{"Config.MaxCountScale", Field, 0, ""},
    +		{"Config.Rand", Field, 0, ""},
    +		{"Config.Values", Field, 0, ""},
    +		{"Generator", Type, 0, ""},
    +		{"SetupError", Type, 0, ""},
    +		{"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"},
     	},
     	"testing/slogtest": {
    -		{"Run", Func, 22},
    -		{"TestHandler", Func, 21},
    +		{"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
    +		{"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
    +	},
    +	"testing/synctest": {
    +		{"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
    +		{"Wait", Func, 25, "func()"},
     	},
     	"text/scanner": {
    -		{"(*Position).IsValid", Method, 0},
    -		{"(*Scanner).Init", Method, 0},
    -		{"(*Scanner).IsValid", Method, 0},
    -		{"(*Scanner).Next", Method, 0},
    -		{"(*Scanner).Peek", Method, 0},
    -		{"(*Scanner).Pos", Method, 0},
    -		{"(*Scanner).Scan", Method, 0},
    -		{"(*Scanner).TokenText", Method, 0},
    -		{"(Position).String", Method, 0},
    -		{"(Scanner).String", Method, 0},
    -		{"Char", Const, 0},
    -		{"Comment", Const, 0},
    -		{"EOF", Const, 0},
    -		{"Float", Const, 0},
    -		{"GoTokens", Const, 0},
    -		{"GoWhitespace", Const, 0},
    -		{"Ident", Const, 0},
    -		{"Int", Const, 0},
    -		{"Position", Type, 0},
    -		{"Position.Column", Field, 0},
    -		{"Position.Filename", Field, 0},
    -		{"Position.Line", Field, 0},
    -		{"Position.Offset", Field, 0},
    -		{"RawString", Const, 0},
    -		{"ScanChars", Const, 0},
    -		{"ScanComments", Const, 0},
    -		{"ScanFloats", Const, 0},
    -		{"ScanIdents", Const, 0},
    -		{"ScanInts", Const, 0},
    -		{"ScanRawStrings", Const, 0},
    -		{"ScanStrings", Const, 0},
    -		{"Scanner", Type, 0},
    -		{"Scanner.Error", Field, 0},
    -		{"Scanner.ErrorCount", Field, 0},
    -		{"Scanner.IsIdentRune", Field, 4},
    -		{"Scanner.Mode", Field, 0},
    -		{"Scanner.Position", Field, 0},
    -		{"Scanner.Whitespace", Field, 0},
    -		{"SkipComments", Const, 0},
    -		{"String", Const, 0},
    -		{"TokenString", Func, 0},
    +		{"(*Position).IsValid", Method, 0, ""},
    +		{"(*Scanner).Init", Method, 0, ""},
    +		{"(*Scanner).IsValid", Method, 0, ""},
    +		{"(*Scanner).Next", Method, 0, ""},
    +		{"(*Scanner).Peek", Method, 0, ""},
    +		{"(*Scanner).Pos", Method, 0, ""},
    +		{"(*Scanner).Scan", Method, 0, ""},
    +		{"(*Scanner).TokenText", Method, 0, ""},
    +		{"(Position).String", Method, 0, ""},
    +		{"(Scanner).String", Method, 0, ""},
    +		{"Char", Const, 0, ""},
    +		{"Comment", Const, 0, ""},
    +		{"EOF", Const, 0, ""},
    +		{"Float", Const, 0, ""},
    +		{"GoTokens", Const, 0, ""},
    +		{"GoWhitespace", Const, 0, ""},
    +		{"Ident", Const, 0, ""},
    +		{"Int", Const, 0, ""},
    +		{"Position", Type, 0, ""},
    +		{"Position.Column", Field, 0, ""},
    +		{"Position.Filename", Field, 0, ""},
    +		{"Position.Line", Field, 0, ""},
    +		{"Position.Offset", Field, 0, ""},
    +		{"RawString", Const, 0, ""},
    +		{"ScanChars", Const, 0, ""},
    +		{"ScanComments", Const, 0, ""},
    +		{"ScanFloats", Const, 0, ""},
    +		{"ScanIdents", Const, 0, ""},
    +		{"ScanInts", Const, 0, ""},
    +		{"ScanRawStrings", Const, 0, ""},
    +		{"ScanStrings", Const, 0, ""},
    +		{"Scanner", Type, 0, ""},
    +		{"Scanner.Error", Field, 0, ""},
    +		{"Scanner.ErrorCount", Field, 0, ""},
    +		{"Scanner.IsIdentRune", Field, 4, ""},
    +		{"Scanner.Mode", Field, 0, ""},
    +		{"Scanner.Position", Field, 0, ""},
    +		{"Scanner.Whitespace", Field, 0, ""},
    +		{"SkipComments", Const, 0, ""},
    +		{"String", Const, 0, ""},
    +		{"TokenString", Func, 0, "func(tok rune) string"},
     	},
     	"text/tabwriter": {
    -		{"(*Writer).Flush", Method, 0},
    -		{"(*Writer).Init", Method, 0},
    -		{"(*Writer).Write", Method, 0},
    -		{"AlignRight", Const, 0},
    -		{"Debug", Const, 0},
    -		{"DiscardEmptyColumns", Const, 0},
    -		{"Escape", Const, 0},
    -		{"FilterHTML", Const, 0},
    -		{"NewWriter", Func, 0},
    -		{"StripEscape", Const, 0},
    -		{"TabIndent", Const, 0},
    -		{"Writer", Type, 0},
    +		{"(*Writer).Flush", Method, 0, ""},
    +		{"(*Writer).Init", Method, 0, ""},
    +		{"(*Writer).Write", Method, 0, ""},
    +		{"AlignRight", Const, 0, ""},
    +		{"Debug", Const, 0, ""},
    +		{"DiscardEmptyColumns", Const, 0, ""},
    +		{"Escape", Const, 0, ""},
    +		{"FilterHTML", Const, 0, ""},
    +		{"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"},
    +		{"StripEscape", Const, 0, ""},
    +		{"TabIndent", Const, 0, ""},
    +		{"Writer", Type, 0, ""},
     	},
     	"text/template": {
    -		{"(*Template).AddParseTree", Method, 0},
    -		{"(*Template).Clone", Method, 0},
    -		{"(*Template).DefinedTemplates", Method, 5},
    -		{"(*Template).Delims", Method, 0},
    -		{"(*Template).Execute", Method, 0},
    -		{"(*Template).ExecuteTemplate", Method, 0},
    -		{"(*Template).Funcs", Method, 0},
    -		{"(*Template).Lookup", Method, 0},
    -		{"(*Template).Name", Method, 0},
    -		{"(*Template).New", Method, 0},
    -		{"(*Template).Option", Method, 5},
    -		{"(*Template).Parse", Method, 0},
    -		{"(*Template).ParseFS", Method, 16},
    -		{"(*Template).ParseFiles", Method, 0},
    -		{"(*Template).ParseGlob", Method, 0},
    -		{"(*Template).Templates", Method, 0},
    -		{"(ExecError).Error", Method, 6},
    -		{"(ExecError).Unwrap", Method, 13},
    -		{"(Template).Copy", Method, 2},
    -		{"(Template).ErrorContext", Method, 1},
    -		{"ExecError", Type, 6},
    -		{"ExecError.Err", Field, 6},
    -		{"ExecError.Name", Field, 6},
    -		{"FuncMap", Type, 0},
    -		{"HTMLEscape", Func, 0},
    -		{"HTMLEscapeString", Func, 0},
    -		{"HTMLEscaper", Func, 0},
    -		{"IsTrue", Func, 6},
    -		{"JSEscape", Func, 0},
    -		{"JSEscapeString", Func, 0},
    -		{"JSEscaper", Func, 0},
    -		{"Must", Func, 0},
    -		{"New", Func, 0},
    -		{"ParseFS", Func, 16},
    -		{"ParseFiles", Func, 0},
    -		{"ParseGlob", Func, 0},
    -		{"Template", Type, 0},
    -		{"Template.Tree", Field, 0},
    -		{"URLQueryEscaper", Func, 0},
    +		{"(*Template).AddParseTree", Method, 0, ""},
    +		{"(*Template).Clone", Method, 0, ""},
    +		{"(*Template).DefinedTemplates", Method, 5, ""},
    +		{"(*Template).Delims", Method, 0, ""},
    +		{"(*Template).Execute", Method, 0, ""},
    +		{"(*Template).ExecuteTemplate", Method, 0, ""},
    +		{"(*Template).Funcs", Method, 0, ""},
    +		{"(*Template).Lookup", Method, 0, ""},
    +		{"(*Template).Name", Method, 0, ""},
    +		{"(*Template).New", Method, 0, ""},
    +		{"(*Template).Option", Method, 5, ""},
    +		{"(*Template).Parse", Method, 0, ""},
    +		{"(*Template).ParseFS", Method, 16, ""},
    +		{"(*Template).ParseFiles", Method, 0, ""},
    +		{"(*Template).ParseGlob", Method, 0, ""},
    +		{"(*Template).Templates", Method, 0, ""},
    +		{"(ExecError).Error", Method, 6, ""},
    +		{"(ExecError).Unwrap", Method, 13, ""},
    +		{"(Template).Copy", Method, 2, ""},
    +		{"(Template).ErrorContext", Method, 1, ""},
    +		{"ExecError", Type, 6, ""},
    +		{"ExecError.Err", Field, 6, ""},
    +		{"ExecError.Name", Field, 6, ""},
    +		{"FuncMap", Type, 0, ""},
    +		{"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"HTMLEscapeString", Func, 0, "func(s string) string"},
    +		{"HTMLEscaper", Func, 0, "func(args ...any) string"},
    +		{"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
    +		{"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
    +		{"JSEscapeString", Func, 0, "func(s string) string"},
    +		{"JSEscaper", Func, 0, "func(args ...any) string"},
    +		{"Must", Func, 0, "func(t *Template, err error) *Template"},
    +		{"New", Func, 0, "func(name string) *Template"},
    +		{"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"},
    +		{"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
    +		{"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
    +		{"Template", Type, 0, ""},
    +		{"Template.Tree", Field, 0, ""},
    +		{"URLQueryEscaper", Func, 0, "func(args ...any) string"},
     	},
     	"text/template/parse": {
    -		{"(*ActionNode).Copy", Method, 0},
    -		{"(*ActionNode).String", Method, 0},
    -		{"(*BoolNode).Copy", Method, 0},
    -		{"(*BoolNode).String", Method, 0},
    -		{"(*BranchNode).Copy", Method, 4},
    -		{"(*BranchNode).String", Method, 0},
    -		{"(*BreakNode).Copy", Method, 18},
    -		{"(*BreakNode).String", Method, 18},
    -		{"(*ChainNode).Add", Method, 1},
    -		{"(*ChainNode).Copy", Method, 1},
    -		{"(*ChainNode).String", Method, 1},
    -		{"(*CommandNode).Copy", Method, 0},
    -		{"(*CommandNode).String", Method, 0},
    -		{"(*CommentNode).Copy", Method, 16},
    -		{"(*CommentNode).String", Method, 16},
    -		{"(*ContinueNode).Copy", Method, 18},
    -		{"(*ContinueNode).String", Method, 18},
    -		{"(*DotNode).Copy", Method, 0},
    -		{"(*DotNode).String", Method, 0},
    -		{"(*DotNode).Type", Method, 0},
    -		{"(*FieldNode).Copy", Method, 0},
    -		{"(*FieldNode).String", Method, 0},
    -		{"(*IdentifierNode).Copy", Method, 0},
    -		{"(*IdentifierNode).SetPos", Method, 1},
    -		{"(*IdentifierNode).SetTree", Method, 4},
    -		{"(*IdentifierNode).String", Method, 0},
    -		{"(*IfNode).Copy", Method, 0},
    -		{"(*IfNode).String", Method, 0},
    -		{"(*ListNode).Copy", Method, 0},
    -		{"(*ListNode).CopyList", Method, 0},
    -		{"(*ListNode).String", Method, 0},
    -		{"(*NilNode).Copy", Method, 1},
    -		{"(*NilNode).String", Method, 1},
    -		{"(*NilNode).Type", Method, 1},
    -		{"(*NumberNode).Copy", Method, 0},
    -		{"(*NumberNode).String", Method, 0},
    -		{"(*PipeNode).Copy", Method, 0},
    -		{"(*PipeNode).CopyPipe", Method, 0},
    -		{"(*PipeNode).String", Method, 0},
    -		{"(*RangeNode).Copy", Method, 0},
    -		{"(*RangeNode).String", Method, 0},
    -		{"(*StringNode).Copy", Method, 0},
    -		{"(*StringNode).String", Method, 0},
    -		{"(*TemplateNode).Copy", Method, 0},
    -		{"(*TemplateNode).String", Method, 0},
    -		{"(*TextNode).Copy", Method, 0},
    -		{"(*TextNode).String", Method, 0},
    -		{"(*Tree).Copy", Method, 2},
    -		{"(*Tree).ErrorContext", Method, 1},
    -		{"(*Tree).Parse", Method, 0},
    -		{"(*VariableNode).Copy", Method, 0},
    -		{"(*VariableNode).String", Method, 0},
    -		{"(*WithNode).Copy", Method, 0},
    -		{"(*WithNode).String", Method, 0},
    -		{"(ActionNode).Position", Method, 1},
    -		{"(ActionNode).Type", Method, 0},
    -		{"(BoolNode).Position", Method, 1},
    -		{"(BoolNode).Type", Method, 0},
    -		{"(BranchNode).Position", Method, 1},
    -		{"(BranchNode).Type", Method, 0},
    -		{"(BreakNode).Position", Method, 18},
    -		{"(BreakNode).Type", Method, 18},
    -		{"(ChainNode).Position", Method, 1},
    -		{"(ChainNode).Type", Method, 1},
    -		{"(CommandNode).Position", Method, 1},
    -		{"(CommandNode).Type", Method, 0},
    -		{"(CommentNode).Position", Method, 16},
    -		{"(CommentNode).Type", Method, 16},
    -		{"(ContinueNode).Position", Method, 18},
    -		{"(ContinueNode).Type", Method, 18},
    -		{"(DotNode).Position", Method, 1},
    -		{"(FieldNode).Position", Method, 1},
    -		{"(FieldNode).Type", Method, 0},
    -		{"(IdentifierNode).Position", Method, 1},
    -		{"(IdentifierNode).Type", Method, 0},
    -		{"(IfNode).Position", Method, 1},
    -		{"(IfNode).Type", Method, 0},
    -		{"(ListNode).Position", Method, 1},
    -		{"(ListNode).Type", Method, 0},
    -		{"(NilNode).Position", Method, 1},
    -		{"(NodeType).Type", Method, 0},
    -		{"(NumberNode).Position", Method, 1},
    -		{"(NumberNode).Type", Method, 0},
    -		{"(PipeNode).Position", Method, 1},
    -		{"(PipeNode).Type", Method, 0},
    -		{"(Pos).Position", Method, 1},
    -		{"(RangeNode).Position", Method, 1},
    -		{"(RangeNode).Type", Method, 0},
    -		{"(StringNode).Position", Method, 1},
    -		{"(StringNode).Type", Method, 0},
    -		{"(TemplateNode).Position", Method, 1},
    -		{"(TemplateNode).Type", Method, 0},
    -		{"(TextNode).Position", Method, 1},
    -		{"(TextNode).Type", Method, 0},
    -		{"(VariableNode).Position", Method, 1},
    -		{"(VariableNode).Type", Method, 0},
    -		{"(WithNode).Position", Method, 1},
    -		{"(WithNode).Type", Method, 0},
    -		{"ActionNode", Type, 0},
    -		{"ActionNode.Line", Field, 0},
    -		{"ActionNode.NodeType", Field, 0},
    -		{"ActionNode.Pipe", Field, 0},
    -		{"ActionNode.Pos", Field, 1},
    -		{"BoolNode", Type, 0},
    -		{"BoolNode.NodeType", Field, 0},
    -		{"BoolNode.Pos", Field, 1},
    -		{"BoolNode.True", Field, 0},
    -		{"BranchNode", Type, 0},
    -		{"BranchNode.ElseList", Field, 0},
    -		{"BranchNode.Line", Field, 0},
    -		{"BranchNode.List", Field, 0},
    -		{"BranchNode.NodeType", Field, 0},
    -		{"BranchNode.Pipe", Field, 0},
    -		{"BranchNode.Pos", Field, 1},
    -		{"BreakNode", Type, 18},
    -		{"BreakNode.Line", Field, 18},
    -		{"BreakNode.NodeType", Field, 18},
    -		{"BreakNode.Pos", Field, 18},
    -		{"ChainNode", Type, 1},
    -		{"ChainNode.Field", Field, 1},
    -		{"ChainNode.Node", Field, 1},
    -		{"ChainNode.NodeType", Field, 1},
    -		{"ChainNode.Pos", Field, 1},
    -		{"CommandNode", Type, 0},
    -		{"CommandNode.Args", Field, 0},
    -		{"CommandNode.NodeType", Field, 0},
    -		{"CommandNode.Pos", Field, 1},
    -		{"CommentNode", Type, 16},
    -		{"CommentNode.NodeType", Field, 16},
    -		{"CommentNode.Pos", Field, 16},
    -		{"CommentNode.Text", Field, 16},
    -		{"ContinueNode", Type, 18},
    -		{"ContinueNode.Line", Field, 18},
    -		{"ContinueNode.NodeType", Field, 18},
    -		{"ContinueNode.Pos", Field, 18},
    -		{"DotNode", Type, 0},
    -		{"DotNode.NodeType", Field, 4},
    -		{"DotNode.Pos", Field, 1},
    -		{"FieldNode", Type, 0},
    -		{"FieldNode.Ident", Field, 0},
    -		{"FieldNode.NodeType", Field, 0},
    -		{"FieldNode.Pos", Field, 1},
    -		{"IdentifierNode", Type, 0},
    -		{"IdentifierNode.Ident", Field, 0},
    -		{"IdentifierNode.NodeType", Field, 0},
    -		{"IdentifierNode.Pos", Field, 1},
    -		{"IfNode", Type, 0},
    -		{"IfNode.BranchNode", Field, 0},
    -		{"IsEmptyTree", Func, 0},
    -		{"ListNode", Type, 0},
    -		{"ListNode.NodeType", Field, 0},
    -		{"ListNode.Nodes", Field, 0},
    -		{"ListNode.Pos", Field, 1},
    -		{"Mode", Type, 16},
    -		{"New", Func, 0},
    -		{"NewIdentifier", Func, 0},
    -		{"NilNode", Type, 1},
    -		{"NilNode.NodeType", Field, 4},
    -		{"NilNode.Pos", Field, 1},
    -		{"Node", Type, 0},
    -		{"NodeAction", Const, 0},
    -		{"NodeBool", Const, 0},
    -		{"NodeBreak", Const, 18},
    -		{"NodeChain", Const, 1},
    -		{"NodeCommand", Const, 0},
    -		{"NodeComment", Const, 16},
    -		{"NodeContinue", Const, 18},
    -		{"NodeDot", Const, 0},
    -		{"NodeField", Const, 0},
    -		{"NodeIdentifier", Const, 0},
    -		{"NodeIf", Const, 0},
    -		{"NodeList", Const, 0},
    -		{"NodeNil", Const, 1},
    -		{"NodeNumber", Const, 0},
    -		{"NodePipe", Const, 0},
    -		{"NodeRange", Const, 0},
    -		{"NodeString", Const, 0},
    -		{"NodeTemplate", Const, 0},
    -		{"NodeText", Const, 0},
    -		{"NodeType", Type, 0},
    -		{"NodeVariable", Const, 0},
    -		{"NodeWith", Const, 0},
    -		{"NumberNode", Type, 0},
    -		{"NumberNode.Complex128", Field, 0},
    -		{"NumberNode.Float64", Field, 0},
    -		{"NumberNode.Int64", Field, 0},
    -		{"NumberNode.IsComplex", Field, 0},
    -		{"NumberNode.IsFloat", Field, 0},
    -		{"NumberNode.IsInt", Field, 0},
    -		{"NumberNode.IsUint", Field, 0},
    -		{"NumberNode.NodeType", Field, 0},
    -		{"NumberNode.Pos", Field, 1},
    -		{"NumberNode.Text", Field, 0},
    -		{"NumberNode.Uint64", Field, 0},
    -		{"Parse", Func, 0},
    -		{"ParseComments", Const, 16},
    -		{"PipeNode", Type, 0},
    -		{"PipeNode.Cmds", Field, 0},
    -		{"PipeNode.Decl", Field, 0},
    -		{"PipeNode.IsAssign", Field, 11},
    -		{"PipeNode.Line", Field, 0},
    -		{"PipeNode.NodeType", Field, 0},
    -		{"PipeNode.Pos", Field, 1},
    -		{"Pos", Type, 1},
    -		{"RangeNode", Type, 0},
    -		{"RangeNode.BranchNode", Field, 0},
    -		{"SkipFuncCheck", Const, 17},
    -		{"StringNode", Type, 0},
    -		{"StringNode.NodeType", Field, 0},
    -		{"StringNode.Pos", Field, 1},
    -		{"StringNode.Quoted", Field, 0},
    -		{"StringNode.Text", Field, 0},
    -		{"TemplateNode", Type, 0},
    -		{"TemplateNode.Line", Field, 0},
    -		{"TemplateNode.Name", Field, 0},
    -		{"TemplateNode.NodeType", Field, 0},
    -		{"TemplateNode.Pipe", Field, 0},
    -		{"TemplateNode.Pos", Field, 1},
    -		{"TextNode", Type, 0},
    -		{"TextNode.NodeType", Field, 0},
    -		{"TextNode.Pos", Field, 1},
    -		{"TextNode.Text", Field, 0},
    -		{"Tree", Type, 0},
    -		{"Tree.Mode", Field, 16},
    -		{"Tree.Name", Field, 0},
    -		{"Tree.ParseName", Field, 1},
    -		{"Tree.Root", Field, 0},
    -		{"VariableNode", Type, 0},
    -		{"VariableNode.Ident", Field, 0},
    -		{"VariableNode.NodeType", Field, 0},
    -		{"VariableNode.Pos", Field, 1},
    -		{"WithNode", Type, 0},
    -		{"WithNode.BranchNode", Field, 0},
    +		{"(*ActionNode).Copy", Method, 0, ""},
    +		{"(*ActionNode).String", Method, 0, ""},
    +		{"(*BoolNode).Copy", Method, 0, ""},
    +		{"(*BoolNode).String", Method, 0, ""},
    +		{"(*BranchNode).Copy", Method, 4, ""},
    +		{"(*BranchNode).String", Method, 0, ""},
    +		{"(*BreakNode).Copy", Method, 18, ""},
    +		{"(*BreakNode).String", Method, 18, ""},
    +		{"(*ChainNode).Add", Method, 1, ""},
    +		{"(*ChainNode).Copy", Method, 1, ""},
    +		{"(*ChainNode).String", Method, 1, ""},
    +		{"(*CommandNode).Copy", Method, 0, ""},
    +		{"(*CommandNode).String", Method, 0, ""},
    +		{"(*CommentNode).Copy", Method, 16, ""},
    +		{"(*CommentNode).String", Method, 16, ""},
    +		{"(*ContinueNode).Copy", Method, 18, ""},
    +		{"(*ContinueNode).String", Method, 18, ""},
    +		{"(*DotNode).Copy", Method, 0, ""},
    +		{"(*DotNode).String", Method, 0, ""},
    +		{"(*DotNode).Type", Method, 0, ""},
    +		{"(*FieldNode).Copy", Method, 0, ""},
    +		{"(*FieldNode).String", Method, 0, ""},
    +		{"(*IdentifierNode).Copy", Method, 0, ""},
    +		{"(*IdentifierNode).SetPos", Method, 1, ""},
    +		{"(*IdentifierNode).SetTree", Method, 4, ""},
    +		{"(*IdentifierNode).String", Method, 0, ""},
    +		{"(*IfNode).Copy", Method, 0, ""},
    +		{"(*IfNode).String", Method, 0, ""},
    +		{"(*ListNode).Copy", Method, 0, ""},
    +		{"(*ListNode).CopyList", Method, 0, ""},
    +		{"(*ListNode).String", Method, 0, ""},
    +		{"(*NilNode).Copy", Method, 1, ""},
    +		{"(*NilNode).String", Method, 1, ""},
    +		{"(*NilNode).Type", Method, 1, ""},
    +		{"(*NumberNode).Copy", Method, 0, ""},
    +		{"(*NumberNode).String", Method, 0, ""},
    +		{"(*PipeNode).Copy", Method, 0, ""},
    +		{"(*PipeNode).CopyPipe", Method, 0, ""},
    +		{"(*PipeNode).String", Method, 0, ""},
    +		{"(*RangeNode).Copy", Method, 0, ""},
    +		{"(*RangeNode).String", Method, 0, ""},
    +		{"(*StringNode).Copy", Method, 0, ""},
    +		{"(*StringNode).String", Method, 0, ""},
    +		{"(*TemplateNode).Copy", Method, 0, ""},
    +		{"(*TemplateNode).String", Method, 0, ""},
    +		{"(*TextNode).Copy", Method, 0, ""},
    +		{"(*TextNode).String", Method, 0, ""},
    +		{"(*Tree).Copy", Method, 2, ""},
    +		{"(*Tree).ErrorContext", Method, 1, ""},
    +		{"(*Tree).Parse", Method, 0, ""},
    +		{"(*VariableNode).Copy", Method, 0, ""},
    +		{"(*VariableNode).String", Method, 0, ""},
    +		{"(*WithNode).Copy", Method, 0, ""},
    +		{"(*WithNode).String", Method, 0, ""},
    +		{"(ActionNode).Position", Method, 1, ""},
    +		{"(ActionNode).Type", Method, 0, ""},
    +		{"(BoolNode).Position", Method, 1, ""},
    +		{"(BoolNode).Type", Method, 0, ""},
    +		{"(BranchNode).Position", Method, 1, ""},
    +		{"(BranchNode).Type", Method, 0, ""},
    +		{"(BreakNode).Position", Method, 18, ""},
    +		{"(BreakNode).Type", Method, 18, ""},
    +		{"(ChainNode).Position", Method, 1, ""},
    +		{"(ChainNode).Type", Method, 1, ""},
    +		{"(CommandNode).Position", Method, 1, ""},
    +		{"(CommandNode).Type", Method, 0, ""},
    +		{"(CommentNode).Position", Method, 16, ""},
    +		{"(CommentNode).Type", Method, 16, ""},
    +		{"(ContinueNode).Position", Method, 18, ""},
    +		{"(ContinueNode).Type", Method, 18, ""},
    +		{"(DotNode).Position", Method, 1, ""},
    +		{"(FieldNode).Position", Method, 1, ""},
    +		{"(FieldNode).Type", Method, 0, ""},
    +		{"(IdentifierNode).Position", Method, 1, ""},
    +		{"(IdentifierNode).Type", Method, 0, ""},
    +		{"(IfNode).Position", Method, 1, ""},
    +		{"(IfNode).Type", Method, 0, ""},
    +		{"(ListNode).Position", Method, 1, ""},
    +		{"(ListNode).Type", Method, 0, ""},
    +		{"(NilNode).Position", Method, 1, ""},
    +		{"(NodeType).Type", Method, 0, ""},
    +		{"(NumberNode).Position", Method, 1, ""},
    +		{"(NumberNode).Type", Method, 0, ""},
    +		{"(PipeNode).Position", Method, 1, ""},
    +		{"(PipeNode).Type", Method, 0, ""},
    +		{"(Pos).Position", Method, 1, ""},
    +		{"(RangeNode).Position", Method, 1, ""},
    +		{"(RangeNode).Type", Method, 0, ""},
    +		{"(StringNode).Position", Method, 1, ""},
    +		{"(StringNode).Type", Method, 0, ""},
    +		{"(TemplateNode).Position", Method, 1, ""},
    +		{"(TemplateNode).Type", Method, 0, ""},
    +		{"(TextNode).Position", Method, 1, ""},
    +		{"(TextNode).Type", Method, 0, ""},
    +		{"(VariableNode).Position", Method, 1, ""},
    +		{"(VariableNode).Type", Method, 0, ""},
    +		{"(WithNode).Position", Method, 1, ""},
    +		{"(WithNode).Type", Method, 0, ""},
    +		{"ActionNode", Type, 0, ""},
    +		{"ActionNode.Line", Field, 0, ""},
    +		{"ActionNode.NodeType", Field, 0, ""},
    +		{"ActionNode.Pipe", Field, 0, ""},
    +		{"ActionNode.Pos", Field, 1, ""},
    +		{"BoolNode", Type, 0, ""},
    +		{"BoolNode.NodeType", Field, 0, ""},
    +		{"BoolNode.Pos", Field, 1, ""},
    +		{"BoolNode.True", Field, 0, ""},
    +		{"BranchNode", Type, 0, ""},
    +		{"BranchNode.ElseList", Field, 0, ""},
    +		{"BranchNode.Line", Field, 0, ""},
    +		{"BranchNode.List", Field, 0, ""},
    +		{"BranchNode.NodeType", Field, 0, ""},
    +		{"BranchNode.Pipe", Field, 0, ""},
    +		{"BranchNode.Pos", Field, 1, ""},
    +		{"BreakNode", Type, 18, ""},
    +		{"BreakNode.Line", Field, 18, ""},
    +		{"BreakNode.NodeType", Field, 18, ""},
    +		{"BreakNode.Pos", Field, 18, ""},
    +		{"ChainNode", Type, 1, ""},
    +		{"ChainNode.Field", Field, 1, ""},
    +		{"ChainNode.Node", Field, 1, ""},
    +		{"ChainNode.NodeType", Field, 1, ""},
    +		{"ChainNode.Pos", Field, 1, ""},
    +		{"CommandNode", Type, 0, ""},
    +		{"CommandNode.Args", Field, 0, ""},
    +		{"CommandNode.NodeType", Field, 0, ""},
    +		{"CommandNode.Pos", Field, 1, ""},
    +		{"CommentNode", Type, 16, ""},
    +		{"CommentNode.NodeType", Field, 16, ""},
    +		{"CommentNode.Pos", Field, 16, ""},
    +		{"CommentNode.Text", Field, 16, ""},
    +		{"ContinueNode", Type, 18, ""},
    +		{"ContinueNode.Line", Field, 18, ""},
    +		{"ContinueNode.NodeType", Field, 18, ""},
    +		{"ContinueNode.Pos", Field, 18, ""},
    +		{"DotNode", Type, 0, ""},
    +		{"DotNode.NodeType", Field, 4, ""},
    +		{"DotNode.Pos", Field, 1, ""},
    +		{"FieldNode", Type, 0, ""},
    +		{"FieldNode.Ident", Field, 0, ""},
    +		{"FieldNode.NodeType", Field, 0, ""},
    +		{"FieldNode.Pos", Field, 1, ""},
    +		{"IdentifierNode", Type, 0, ""},
    +		{"IdentifierNode.Ident", Field, 0, ""},
    +		{"IdentifierNode.NodeType", Field, 0, ""},
    +		{"IdentifierNode.Pos", Field, 1, ""},
    +		{"IfNode", Type, 0, ""},
    +		{"IfNode.BranchNode", Field, 0, ""},
    +		{"IsEmptyTree", Func, 0, "func(n Node) bool"},
    +		{"ListNode", Type, 0, ""},
    +		{"ListNode.NodeType", Field, 0, ""},
    +		{"ListNode.Nodes", Field, 0, ""},
    +		{"ListNode.Pos", Field, 1, ""},
    +		{"Mode", Type, 16, ""},
    +		{"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"},
    +		{"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"},
    +		{"NilNode", Type, 1, ""},
    +		{"NilNode.NodeType", Field, 4, ""},
    +		{"NilNode.Pos", Field, 1, ""},
    +		{"Node", Type, 0, ""},
    +		{"NodeAction", Const, 0, ""},
    +		{"NodeBool", Const, 0, ""},
    +		{"NodeBreak", Const, 18, ""},
    +		{"NodeChain", Const, 1, ""},
    +		{"NodeCommand", Const, 0, ""},
    +		{"NodeComment", Const, 16, ""},
    +		{"NodeContinue", Const, 18, ""},
    +		{"NodeDot", Const, 0, ""},
    +		{"NodeField", Const, 0, ""},
    +		{"NodeIdentifier", Const, 0, ""},
    +		{"NodeIf", Const, 0, ""},
    +		{"NodeList", Const, 0, ""},
    +		{"NodeNil", Const, 1, ""},
    +		{"NodeNumber", Const, 0, ""},
    +		{"NodePipe", Const, 0, ""},
    +		{"NodeRange", Const, 0, ""},
    +		{"NodeString", Const, 0, ""},
    +		{"NodeTemplate", Const, 0, ""},
    +		{"NodeText", Const, 0, ""},
    +		{"NodeType", Type, 0, ""},
    +		{"NodeVariable", Const, 0, ""},
    +		{"NodeWith", Const, 0, ""},
    +		{"NumberNode", Type, 0, ""},
    +		{"NumberNode.Complex128", Field, 0, ""},
    +		{"NumberNode.Float64", Field, 0, ""},
    +		{"NumberNode.Int64", Field, 0, ""},
    +		{"NumberNode.IsComplex", Field, 0, ""},
    +		{"NumberNode.IsFloat", Field, 0, ""},
    +		{"NumberNode.IsInt", Field, 0, ""},
    +		{"NumberNode.IsUint", Field, 0, ""},
    +		{"NumberNode.NodeType", Field, 0, ""},
    +		{"NumberNode.Pos", Field, 1, ""},
    +		{"NumberNode.Text", Field, 0, ""},
    +		{"NumberNode.Uint64", Field, 0, ""},
    +		{"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"},
    +		{"ParseComments", Const, 16, ""},
    +		{"PipeNode", Type, 0, ""},
    +		{"PipeNode.Cmds", Field, 0, ""},
    +		{"PipeNode.Decl", Field, 0, ""},
    +		{"PipeNode.IsAssign", Field, 11, ""},
    +		{"PipeNode.Line", Field, 0, ""},
    +		{"PipeNode.NodeType", Field, 0, ""},
    +		{"PipeNode.Pos", Field, 1, ""},
    +		{"Pos", Type, 1, ""},
    +		{"RangeNode", Type, 0, ""},
    +		{"RangeNode.BranchNode", Field, 0, ""},
    +		{"SkipFuncCheck", Const, 17, ""},
    +		{"StringNode", Type, 0, ""},
    +		{"StringNode.NodeType", Field, 0, ""},
    +		{"StringNode.Pos", Field, 1, ""},
    +		{"StringNode.Quoted", Field, 0, ""},
    +		{"StringNode.Text", Field, 0, ""},
    +		{"TemplateNode", Type, 0, ""},
    +		{"TemplateNode.Line", Field, 0, ""},
    +		{"TemplateNode.Name", Field, 0, ""},
    +		{"TemplateNode.NodeType", Field, 0, ""},
    +		{"TemplateNode.Pipe", Field, 0, ""},
    +		{"TemplateNode.Pos", Field, 1, ""},
    +		{"TextNode", Type, 0, ""},
    +		{"TextNode.NodeType", Field, 0, ""},
    +		{"TextNode.Pos", Field, 1, ""},
    +		{"TextNode.Text", Field, 0, ""},
    +		{"Tree", Type, 0, ""},
    +		{"Tree.Mode", Field, 16, ""},
    +		{"Tree.Name", Field, 0, ""},
    +		{"Tree.ParseName", Field, 1, ""},
    +		{"Tree.Root", Field, 0, ""},
    +		{"VariableNode", Type, 0, ""},
    +		{"VariableNode.Ident", Field, 0, ""},
    +		{"VariableNode.NodeType", Field, 0, ""},
    +		{"VariableNode.Pos", Field, 1, ""},
    +		{"WithNode", Type, 0, ""},
    +		{"WithNode.BranchNode", Field, 0, ""},
     	},
     	"time": {
    -		{"(*Location).String", Method, 0},
    -		{"(*ParseError).Error", Method, 0},
    -		{"(*Ticker).Reset", Method, 15},
    -		{"(*Ticker).Stop", Method, 0},
    -		{"(*Time).GobDecode", Method, 0},
    -		{"(*Time).UnmarshalBinary", Method, 2},
    -		{"(*Time).UnmarshalJSON", Method, 0},
    -		{"(*Time).UnmarshalText", Method, 2},
    -		{"(*Timer).Reset", Method, 1},
    -		{"(*Timer).Stop", Method, 0},
    -		{"(Duration).Abs", Method, 19},
    -		{"(Duration).Hours", Method, 0},
    -		{"(Duration).Microseconds", Method, 13},
    -		{"(Duration).Milliseconds", Method, 13},
    -		{"(Duration).Minutes", Method, 0},
    -		{"(Duration).Nanoseconds", Method, 0},
    -		{"(Duration).Round", Method, 9},
    -		{"(Duration).Seconds", Method, 0},
    -		{"(Duration).String", Method, 0},
    -		{"(Duration).Truncate", Method, 9},
    -		{"(Month).String", Method, 0},
    -		{"(Time).Add", Method, 0},
    -		{"(Time).AddDate", Method, 0},
    -		{"(Time).After", Method, 0},
    -		{"(Time).AppendBinary", Method, 24},
    -		{"(Time).AppendFormat", Method, 5},
    -		{"(Time).AppendText", Method, 24},
    -		{"(Time).Before", Method, 0},
    -		{"(Time).Clock", Method, 0},
    -		{"(Time).Compare", Method, 20},
    -		{"(Time).Date", Method, 0},
    -		{"(Time).Day", Method, 0},
    -		{"(Time).Equal", Method, 0},
    -		{"(Time).Format", Method, 0},
    -		{"(Time).GoString", Method, 17},
    -		{"(Time).GobEncode", Method, 0},
    -		{"(Time).Hour", Method, 0},
    -		{"(Time).ISOWeek", Method, 0},
    -		{"(Time).In", Method, 0},
    -		{"(Time).IsDST", Method, 17},
    -		{"(Time).IsZero", Method, 0},
    -		{"(Time).Local", Method, 0},
    -		{"(Time).Location", Method, 0},
    -		{"(Time).MarshalBinary", Method, 2},
    -		{"(Time).MarshalJSON", Method, 0},
    -		{"(Time).MarshalText", Method, 2},
    -		{"(Time).Minute", Method, 0},
    -		{"(Time).Month", Method, 0},
    -		{"(Time).Nanosecond", Method, 0},
    -		{"(Time).Round", Method, 1},
    -		{"(Time).Second", Method, 0},
    -		{"(Time).String", Method, 0},
    -		{"(Time).Sub", Method, 0},
    -		{"(Time).Truncate", Method, 1},
    -		{"(Time).UTC", Method, 0},
    -		{"(Time).Unix", Method, 0},
    -		{"(Time).UnixMicro", Method, 17},
    -		{"(Time).UnixMilli", Method, 17},
    -		{"(Time).UnixNano", Method, 0},
    -		{"(Time).Weekday", Method, 0},
    -		{"(Time).Year", Method, 0},
    -		{"(Time).YearDay", Method, 1},
    -		{"(Time).Zone", Method, 0},
    -		{"(Time).ZoneBounds", Method, 19},
    -		{"(Weekday).String", Method, 0},
    -		{"ANSIC", Const, 0},
    -		{"After", Func, 0},
    -		{"AfterFunc", Func, 0},
    -		{"April", Const, 0},
    -		{"August", Const, 0},
    -		{"Date", Func, 0},
    -		{"DateOnly", Const, 20},
    -		{"DateTime", Const, 20},
    -		{"December", Const, 0},
    -		{"Duration", Type, 0},
    -		{"February", Const, 0},
    -		{"FixedZone", Func, 0},
    -		{"Friday", Const, 0},
    -		{"Hour", Const, 0},
    -		{"January", Const, 0},
    -		{"July", Const, 0},
    -		{"June", Const, 0},
    -		{"Kitchen", Const, 0},
    -		{"Layout", Const, 17},
    -		{"LoadLocation", Func, 0},
    -		{"LoadLocationFromTZData", Func, 10},
    -		{"Local", Var, 0},
    -		{"Location", Type, 0},
    -		{"March", Const, 0},
    -		{"May", Const, 0},
    -		{"Microsecond", Const, 0},
    -		{"Millisecond", Const, 0},
    -		{"Minute", Const, 0},
    -		{"Monday", Const, 0},
    -		{"Month", Type, 0},
    -		{"Nanosecond", Const, 0},
    -		{"NewTicker", Func, 0},
    -		{"NewTimer", Func, 0},
    -		{"November", Const, 0},
    -		{"Now", Func, 0},
    -		{"October", Const, 0},
    -		{"Parse", Func, 0},
    -		{"ParseDuration", Func, 0},
    -		{"ParseError", Type, 0},
    -		{"ParseError.Layout", Field, 0},
    -		{"ParseError.LayoutElem", Field, 0},
    -		{"ParseError.Message", Field, 0},
    -		{"ParseError.Value", Field, 0},
    -		{"ParseError.ValueElem", Field, 0},
    -		{"ParseInLocation", Func, 1},
    -		{"RFC1123", Const, 0},
    -		{"RFC1123Z", Const, 0},
    -		{"RFC3339", Const, 0},
    -		{"RFC3339Nano", Const, 0},
    -		{"RFC822", Const, 0},
    -		{"RFC822Z", Const, 0},
    -		{"RFC850", Const, 0},
    -		{"RubyDate", Const, 0},
    -		{"Saturday", Const, 0},
    -		{"Second", Const, 0},
    -		{"September", Const, 0},
    -		{"Since", Func, 0},
    -		{"Sleep", Func, 0},
    -		{"Stamp", Const, 0},
    -		{"StampMicro", Const, 0},
    -		{"StampMilli", Const, 0},
    -		{"StampNano", Const, 0},
    -		{"Sunday", Const, 0},
    -		{"Thursday", Const, 0},
    -		{"Tick", Func, 0},
    -		{"Ticker", Type, 0},
    -		{"Ticker.C", Field, 0},
    -		{"Time", Type, 0},
    -		{"TimeOnly", Const, 20},
    -		{"Timer", Type, 0},
    -		{"Timer.C", Field, 0},
    -		{"Tuesday", Const, 0},
    -		{"UTC", Var, 0},
    -		{"Unix", Func, 0},
    -		{"UnixDate", Const, 0},
    -		{"UnixMicro", Func, 17},
    -		{"UnixMilli", Func, 17},
    -		{"Until", Func, 8},
    -		{"Wednesday", Const, 0},
    -		{"Weekday", Type, 0},
    +		{"(*Location).String", Method, 0, ""},
    +		{"(*ParseError).Error", Method, 0, ""},
    +		{"(*Ticker).Reset", Method, 15, ""},
    +		{"(*Ticker).Stop", Method, 0, ""},
    +		{"(*Time).GobDecode", Method, 0, ""},
    +		{"(*Time).UnmarshalBinary", Method, 2, ""},
    +		{"(*Time).UnmarshalJSON", Method, 0, ""},
    +		{"(*Time).UnmarshalText", Method, 2, ""},
    +		{"(*Timer).Reset", Method, 1, ""},
    +		{"(*Timer).Stop", Method, 0, ""},
    +		{"(Duration).Abs", Method, 19, ""},
    +		{"(Duration).Hours", Method, 0, ""},
    +		{"(Duration).Microseconds", Method, 13, ""},
    +		{"(Duration).Milliseconds", Method, 13, ""},
    +		{"(Duration).Minutes", Method, 0, ""},
    +		{"(Duration).Nanoseconds", Method, 0, ""},
    +		{"(Duration).Round", Method, 9, ""},
    +		{"(Duration).Seconds", Method, 0, ""},
    +		{"(Duration).String", Method, 0, ""},
    +		{"(Duration).Truncate", Method, 9, ""},
    +		{"(Month).String", Method, 0, ""},
    +		{"(Time).Add", Method, 0, ""},
    +		{"(Time).AddDate", Method, 0, ""},
    +		{"(Time).After", Method, 0, ""},
    +		{"(Time).AppendBinary", Method, 24, ""},
    +		{"(Time).AppendFormat", Method, 5, ""},
    +		{"(Time).AppendText", Method, 24, ""},
    +		{"(Time).Before", Method, 0, ""},
    +		{"(Time).Clock", Method, 0, ""},
    +		{"(Time).Compare", Method, 20, ""},
    +		{"(Time).Date", Method, 0, ""},
    +		{"(Time).Day", Method, 0, ""},
    +		{"(Time).Equal", Method, 0, ""},
    +		{"(Time).Format", Method, 0, ""},
    +		{"(Time).GoString", Method, 17, ""},
    +		{"(Time).GobEncode", Method, 0, ""},
    +		{"(Time).Hour", Method, 0, ""},
    +		{"(Time).ISOWeek", Method, 0, ""},
    +		{"(Time).In", Method, 0, ""},
    +		{"(Time).IsDST", Method, 17, ""},
    +		{"(Time).IsZero", Method, 0, ""},
    +		{"(Time).Local", Method, 0, ""},
    +		{"(Time).Location", Method, 0, ""},
    +		{"(Time).MarshalBinary", Method, 2, ""},
    +		{"(Time).MarshalJSON", Method, 0, ""},
    +		{"(Time).MarshalText", Method, 2, ""},
    +		{"(Time).Minute", Method, 0, ""},
    +		{"(Time).Month", Method, 0, ""},
    +		{"(Time).Nanosecond", Method, 0, ""},
    +		{"(Time).Round", Method, 1, ""},
    +		{"(Time).Second", Method, 0, ""},
    +		{"(Time).String", Method, 0, ""},
    +		{"(Time).Sub", Method, 0, ""},
    +		{"(Time).Truncate", Method, 1, ""},
    +		{"(Time).UTC", Method, 0, ""},
    +		{"(Time).Unix", Method, 0, ""},
    +		{"(Time).UnixMicro", Method, 17, ""},
    +		{"(Time).UnixMilli", Method, 17, ""},
    +		{"(Time).UnixNano", Method, 0, ""},
    +		{"(Time).Weekday", Method, 0, ""},
    +		{"(Time).Year", Method, 0, ""},
    +		{"(Time).YearDay", Method, 1, ""},
    +		{"(Time).Zone", Method, 0, ""},
    +		{"(Time).ZoneBounds", Method, 19, ""},
    +		{"(Weekday).String", Method, 0, ""},
    +		{"ANSIC", Const, 0, ""},
    +		{"After", Func, 0, "func(d Duration) <-chan Time"},
    +		{"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"},
    +		{"April", Const, 0, ""},
    +		{"August", Const, 0, ""},
    +		{"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"},
    +		{"DateOnly", Const, 20, ""},
    +		{"DateTime", Const, 20, ""},
    +		{"December", Const, 0, ""},
    +		{"Duration", Type, 0, ""},
    +		{"February", Const, 0, ""},
    +		{"FixedZone", Func, 0, "func(name string, offset int) *Location"},
    +		{"Friday", Const, 0, ""},
    +		{"Hour", Const, 0, ""},
    +		{"January", Const, 0, ""},
    +		{"July", Const, 0, ""},
    +		{"June", Const, 0, ""},
    +		{"Kitchen", Const, 0, ""},
    +		{"Layout", Const, 17, ""},
    +		{"LoadLocation", Func, 0, "func(name string) (*Location, error)"},
    +		{"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"},
    +		{"Local", Var, 0, ""},
    +		{"Location", Type, 0, ""},
    +		{"March", Const, 0, ""},
    +		{"May", Const, 0, ""},
    +		{"Microsecond", Const, 0, ""},
    +		{"Millisecond", Const, 0, ""},
    +		{"Minute", Const, 0, ""},
    +		{"Monday", Const, 0, ""},
    +		{"Month", Type, 0, ""},
    +		{"Nanosecond", Const, 0, ""},
    +		{"NewTicker", Func, 0, "func(d Duration) *Ticker"},
    +		{"NewTimer", Func, 0, "func(d Duration) *Timer"},
    +		{"November", Const, 0, ""},
    +		{"Now", Func, 0, "func() Time"},
    +		{"October", Const, 0, ""},
    +		{"Parse", Func, 0, "func(layout string, value string) (Time, error)"},
    +		{"ParseDuration", Func, 0, "func(s string) (Duration, error)"},
    +		{"ParseError", Type, 0, ""},
    +		{"ParseError.Layout", Field, 0, ""},
    +		{"ParseError.LayoutElem", Field, 0, ""},
    +		{"ParseError.Message", Field, 0, ""},
    +		{"ParseError.Value", Field, 0, ""},
    +		{"ParseError.ValueElem", Field, 0, ""},
    +		{"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"},
    +		{"RFC1123", Const, 0, ""},
    +		{"RFC1123Z", Const, 0, ""},
    +		{"RFC3339", Const, 0, ""},
    +		{"RFC3339Nano", Const, 0, ""},
    +		{"RFC822", Const, 0, ""},
    +		{"RFC822Z", Const, 0, ""},
    +		{"RFC850", Const, 0, ""},
    +		{"RubyDate", Const, 0, ""},
    +		{"Saturday", Const, 0, ""},
    +		{"Second", Const, 0, ""},
    +		{"September", Const, 0, ""},
    +		{"Since", Func, 0, "func(t Time) Duration"},
    +		{"Sleep", Func, 0, "func(d Duration)"},
    +		{"Stamp", Const, 0, ""},
    +		{"StampMicro", Const, 0, ""},
    +		{"StampMilli", Const, 0, ""},
    +		{"StampNano", Const, 0, ""},
    +		{"Sunday", Const, 0, ""},
    +		{"Thursday", Const, 0, ""},
    +		{"Tick", Func, 0, "func(d Duration) <-chan Time"},
    +		{"Ticker", Type, 0, ""},
    +		{"Ticker.C", Field, 0, ""},
    +		{"Time", Type, 0, ""},
    +		{"TimeOnly", Const, 20, ""},
    +		{"Timer", Type, 0, ""},
    +		{"Timer.C", Field, 0, ""},
    +		{"Tuesday", Const, 0, ""},
    +		{"UTC", Var, 0, ""},
    +		{"Unix", Func, 0, "func(sec int64, nsec int64) Time"},
    +		{"UnixDate", Const, 0, ""},
    +		{"UnixMicro", Func, 17, "func(usec int64) Time"},
    +		{"UnixMilli", Func, 17, "func(msec int64) Time"},
    +		{"Until", Func, 8, "func(t Time) Duration"},
    +		{"Wednesday", Const, 0, ""},
    +		{"Weekday", Type, 0, ""},
     	},
     	"unicode": {
    -		{"(SpecialCase).ToLower", Method, 0},
    -		{"(SpecialCase).ToTitle", Method, 0},
    -		{"(SpecialCase).ToUpper", Method, 0},
    -		{"ASCII_Hex_Digit", Var, 0},
    -		{"Adlam", Var, 7},
    -		{"Ahom", Var, 5},
    -		{"Anatolian_Hieroglyphs", Var, 5},
    -		{"Arabic", Var, 0},
    -		{"Armenian", Var, 0},
    -		{"Avestan", Var, 0},
    -		{"AzeriCase", Var, 0},
    -		{"Balinese", Var, 0},
    -		{"Bamum", Var, 0},
    -		{"Bassa_Vah", Var, 4},
    -		{"Batak", Var, 0},
    -		{"Bengali", Var, 0},
    -		{"Bhaiksuki", Var, 7},
    -		{"Bidi_Control", Var, 0},
    -		{"Bopomofo", Var, 0},
    -		{"Brahmi", Var, 0},
    -		{"Braille", Var, 0},
    -		{"Buginese", Var, 0},
    -		{"Buhid", Var, 0},
    -		{"C", Var, 0},
    -		{"Canadian_Aboriginal", Var, 0},
    -		{"Carian", Var, 0},
    -		{"CaseRange", Type, 0},
    -		{"CaseRange.Delta", Field, 0},
    -		{"CaseRange.Hi", Field, 0},
    -		{"CaseRange.Lo", Field, 0},
    -		{"CaseRanges", Var, 0},
    -		{"Categories", Var, 0},
    -		{"Caucasian_Albanian", Var, 4},
    -		{"Cc", Var, 0},
    -		{"Cf", Var, 0},
    -		{"Chakma", Var, 1},
    -		{"Cham", Var, 0},
    -		{"Cherokee", Var, 0},
    -		{"Chorasmian", Var, 16},
    -		{"Co", Var, 0},
    -		{"Common", Var, 0},
    -		{"Coptic", Var, 0},
    -		{"Cs", Var, 0},
    -		{"Cuneiform", Var, 0},
    -		{"Cypriot", Var, 0},
    -		{"Cypro_Minoan", Var, 21},
    -		{"Cyrillic", Var, 0},
    -		{"Dash", Var, 0},
    -		{"Deprecated", Var, 0},
    -		{"Deseret", Var, 0},
    -		{"Devanagari", Var, 0},
    -		{"Diacritic", Var, 0},
    -		{"Digit", Var, 0},
    -		{"Dives_Akuru", Var, 16},
    -		{"Dogra", Var, 13},
    -		{"Duployan", Var, 4},
    -		{"Egyptian_Hieroglyphs", Var, 0},
    -		{"Elbasan", Var, 4},
    -		{"Elymaic", Var, 14},
    -		{"Ethiopic", Var, 0},
    -		{"Extender", Var, 0},
    -		{"FoldCategory", Var, 0},
    -		{"FoldScript", Var, 0},
    -		{"Georgian", Var, 0},
    -		{"Glagolitic", Var, 0},
    -		{"Gothic", Var, 0},
    -		{"Grantha", Var, 4},
    -		{"GraphicRanges", Var, 0},
    -		{"Greek", Var, 0},
    -		{"Gujarati", Var, 0},
    -		{"Gunjala_Gondi", Var, 13},
    -		{"Gurmukhi", Var, 0},
    -		{"Han", Var, 0},
    -		{"Hangul", Var, 0},
    -		{"Hanifi_Rohingya", Var, 13},
    -		{"Hanunoo", Var, 0},
    -		{"Hatran", Var, 5},
    -		{"Hebrew", Var, 0},
    -		{"Hex_Digit", Var, 0},
    -		{"Hiragana", Var, 0},
    -		{"Hyphen", Var, 0},
    -		{"IDS_Binary_Operator", Var, 0},
    -		{"IDS_Trinary_Operator", Var, 0},
    -		{"Ideographic", Var, 0},
    -		{"Imperial_Aramaic", Var, 0},
    -		{"In", Func, 2},
    -		{"Inherited", Var, 0},
    -		{"Inscriptional_Pahlavi", Var, 0},
    -		{"Inscriptional_Parthian", Var, 0},
    -		{"Is", Func, 0},
    -		{"IsControl", Func, 0},
    -		{"IsDigit", Func, 0},
    -		{"IsGraphic", Func, 0},
    -		{"IsLetter", Func, 0},
    -		{"IsLower", Func, 0},
    -		{"IsMark", Func, 0},
    -		{"IsNumber", Func, 0},
    -		{"IsOneOf", Func, 0},
    -		{"IsPrint", Func, 0},
    -		{"IsPunct", Func, 0},
    -		{"IsSpace", Func, 0},
    -		{"IsSymbol", Func, 0},
    -		{"IsTitle", Func, 0},
    -		{"IsUpper", Func, 0},
    -		{"Javanese", Var, 0},
    -		{"Join_Control", Var, 0},
    -		{"Kaithi", Var, 0},
    -		{"Kannada", Var, 0},
    -		{"Katakana", Var, 0},
    -		{"Kawi", Var, 21},
    -		{"Kayah_Li", Var, 0},
    -		{"Kharoshthi", Var, 0},
    -		{"Khitan_Small_Script", Var, 16},
    -		{"Khmer", Var, 0},
    -		{"Khojki", Var, 4},
    -		{"Khudawadi", Var, 4},
    -		{"L", Var, 0},
    -		{"Lao", Var, 0},
    -		{"Latin", Var, 0},
    -		{"Lepcha", Var, 0},
    -		{"Letter", Var, 0},
    -		{"Limbu", Var, 0},
    -		{"Linear_A", Var, 4},
    -		{"Linear_B", Var, 0},
    -		{"Lisu", Var, 0},
    -		{"Ll", Var, 0},
    -		{"Lm", Var, 0},
    -		{"Lo", Var, 0},
    -		{"Logical_Order_Exception", Var, 0},
    -		{"Lower", Var, 0},
    -		{"LowerCase", Const, 0},
    -		{"Lt", Var, 0},
    -		{"Lu", Var, 0},
    -		{"Lycian", Var, 0},
    -		{"Lydian", Var, 0},
    -		{"M", Var, 0},
    -		{"Mahajani", Var, 4},
    -		{"Makasar", Var, 13},
    -		{"Malayalam", Var, 0},
    -		{"Mandaic", Var, 0},
    -		{"Manichaean", Var, 4},
    -		{"Marchen", Var, 7},
    -		{"Mark", Var, 0},
    -		{"Masaram_Gondi", Var, 10},
    -		{"MaxASCII", Const, 0},
    -		{"MaxCase", Const, 0},
    -		{"MaxLatin1", Const, 0},
    -		{"MaxRune", Const, 0},
    -		{"Mc", Var, 0},
    -		{"Me", Var, 0},
    -		{"Medefaidrin", Var, 13},
    -		{"Meetei_Mayek", Var, 0},
    -		{"Mende_Kikakui", Var, 4},
    -		{"Meroitic_Cursive", Var, 1},
    -		{"Meroitic_Hieroglyphs", Var, 1},
    -		{"Miao", Var, 1},
    -		{"Mn", Var, 0},
    -		{"Modi", Var, 4},
    -		{"Mongolian", Var, 0},
    -		{"Mro", Var, 4},
    -		{"Multani", Var, 5},
    -		{"Myanmar", Var, 0},
    -		{"N", Var, 0},
    -		{"Nabataean", Var, 4},
    -		{"Nag_Mundari", Var, 21},
    -		{"Nandinagari", Var, 14},
    -		{"Nd", Var, 0},
    -		{"New_Tai_Lue", Var, 0},
    -		{"Newa", Var, 7},
    -		{"Nko", Var, 0},
    -		{"Nl", Var, 0},
    -		{"No", Var, 0},
    -		{"Noncharacter_Code_Point", Var, 0},
    -		{"Number", Var, 0},
    -		{"Nushu", Var, 10},
    -		{"Nyiakeng_Puachue_Hmong", Var, 14},
    -		{"Ogham", Var, 0},
    -		{"Ol_Chiki", Var, 0},
    -		{"Old_Hungarian", Var, 5},
    -		{"Old_Italic", Var, 0},
    -		{"Old_North_Arabian", Var, 4},
    -		{"Old_Permic", Var, 4},
    -		{"Old_Persian", Var, 0},
    -		{"Old_Sogdian", Var, 13},
    -		{"Old_South_Arabian", Var, 0},
    -		{"Old_Turkic", Var, 0},
    -		{"Old_Uyghur", Var, 21},
    -		{"Oriya", Var, 0},
    -		{"Osage", Var, 7},
    -		{"Osmanya", Var, 0},
    -		{"Other", Var, 0},
    -		{"Other_Alphabetic", Var, 0},
    -		{"Other_Default_Ignorable_Code_Point", Var, 0},
    -		{"Other_Grapheme_Extend", Var, 0},
    -		{"Other_ID_Continue", Var, 0},
    -		{"Other_ID_Start", Var, 0},
    -		{"Other_Lowercase", Var, 0},
    -		{"Other_Math", Var, 0},
    -		{"Other_Uppercase", Var, 0},
    -		{"P", Var, 0},
    -		{"Pahawh_Hmong", Var, 4},
    -		{"Palmyrene", Var, 4},
    -		{"Pattern_Syntax", Var, 0},
    -		{"Pattern_White_Space", Var, 0},
    -		{"Pau_Cin_Hau", Var, 4},
    -		{"Pc", Var, 0},
    -		{"Pd", Var, 0},
    -		{"Pe", Var, 0},
    -		{"Pf", Var, 0},
    -		{"Phags_Pa", Var, 0},
    -		{"Phoenician", Var, 0},
    -		{"Pi", Var, 0},
    -		{"Po", Var, 0},
    -		{"Prepended_Concatenation_Mark", Var, 7},
    -		{"PrintRanges", Var, 0},
    -		{"Properties", Var, 0},
    -		{"Ps", Var, 0},
    -		{"Psalter_Pahlavi", Var, 4},
    -		{"Punct", Var, 0},
    -		{"Quotation_Mark", Var, 0},
    -		{"Radical", Var, 0},
    -		{"Range16", Type, 0},
    -		{"Range16.Hi", Field, 0},
    -		{"Range16.Lo", Field, 0},
    -		{"Range16.Stride", Field, 0},
    -		{"Range32", Type, 0},
    -		{"Range32.Hi", Field, 0},
    -		{"Range32.Lo", Field, 0},
    -		{"Range32.Stride", Field, 0},
    -		{"RangeTable", Type, 0},
    -		{"RangeTable.LatinOffset", Field, 1},
    -		{"RangeTable.R16", Field, 0},
    -		{"RangeTable.R32", Field, 0},
    -		{"Regional_Indicator", Var, 10},
    -		{"Rejang", Var, 0},
    -		{"ReplacementChar", Const, 0},
    -		{"Runic", Var, 0},
    -		{"S", Var, 0},
    -		{"STerm", Var, 0},
    -		{"Samaritan", Var, 0},
    -		{"Saurashtra", Var, 0},
    -		{"Sc", Var, 0},
    -		{"Scripts", Var, 0},
    -		{"Sentence_Terminal", Var, 7},
    -		{"Sharada", Var, 1},
    -		{"Shavian", Var, 0},
    -		{"Siddham", Var, 4},
    -		{"SignWriting", Var, 5},
    -		{"SimpleFold", Func, 0},
    -		{"Sinhala", Var, 0},
    -		{"Sk", Var, 0},
    -		{"Sm", Var, 0},
    -		{"So", Var, 0},
    -		{"Soft_Dotted", Var, 0},
    -		{"Sogdian", Var, 13},
    -		{"Sora_Sompeng", Var, 1},
    -		{"Soyombo", Var, 10},
    -		{"Space", Var, 0},
    -		{"SpecialCase", Type, 0},
    -		{"Sundanese", Var, 0},
    -		{"Syloti_Nagri", Var, 0},
    -		{"Symbol", Var, 0},
    -		{"Syriac", Var, 0},
    -		{"Tagalog", Var, 0},
    -		{"Tagbanwa", Var, 0},
    -		{"Tai_Le", Var, 0},
    -		{"Tai_Tham", Var, 0},
    -		{"Tai_Viet", Var, 0},
    -		{"Takri", Var, 1},
    -		{"Tamil", Var, 0},
    -		{"Tangsa", Var, 21},
    -		{"Tangut", Var, 7},
    -		{"Telugu", Var, 0},
    -		{"Terminal_Punctuation", Var, 0},
    -		{"Thaana", Var, 0},
    -		{"Thai", Var, 0},
    -		{"Tibetan", Var, 0},
    -		{"Tifinagh", Var, 0},
    -		{"Tirhuta", Var, 4},
    -		{"Title", Var, 0},
    -		{"TitleCase", Const, 0},
    -		{"To", Func, 0},
    -		{"ToLower", Func, 0},
    -		{"ToTitle", Func, 0},
    -		{"ToUpper", Func, 0},
    -		{"Toto", Var, 21},
    -		{"TurkishCase", Var, 0},
    -		{"Ugaritic", Var, 0},
    -		{"Unified_Ideograph", Var, 0},
    -		{"Upper", Var, 0},
    -		{"UpperCase", Const, 0},
    -		{"UpperLower", Const, 0},
    -		{"Vai", Var, 0},
    -		{"Variation_Selector", Var, 0},
    -		{"Version", Const, 0},
    -		{"Vithkuqi", Var, 21},
    -		{"Wancho", Var, 14},
    -		{"Warang_Citi", Var, 4},
    -		{"White_Space", Var, 0},
    -		{"Yezidi", Var, 16},
    -		{"Yi", Var, 0},
    -		{"Z", Var, 0},
    -		{"Zanabazar_Square", Var, 10},
    -		{"Zl", Var, 0},
    -		{"Zp", Var, 0},
    -		{"Zs", Var, 0},
    +		{"(SpecialCase).ToLower", Method, 0, ""},
    +		{"(SpecialCase).ToTitle", Method, 0, ""},
    +		{"(SpecialCase).ToUpper", Method, 0, ""},
    +		{"ASCII_Hex_Digit", Var, 0, ""},
    +		{"Adlam", Var, 7, ""},
    +		{"Ahom", Var, 5, ""},
    +		{"Anatolian_Hieroglyphs", Var, 5, ""},
    +		{"Arabic", Var, 0, ""},
    +		{"Armenian", Var, 0, ""},
    +		{"Avestan", Var, 0, ""},
    +		{"AzeriCase", Var, 0, ""},
    +		{"Balinese", Var, 0, ""},
    +		{"Bamum", Var, 0, ""},
    +		{"Bassa_Vah", Var, 4, ""},
    +		{"Batak", Var, 0, ""},
    +		{"Bengali", Var, 0, ""},
    +		{"Bhaiksuki", Var, 7, ""},
    +		{"Bidi_Control", Var, 0, ""},
    +		{"Bopomofo", Var, 0, ""},
    +		{"Brahmi", Var, 0, ""},
    +		{"Braille", Var, 0, ""},
    +		{"Buginese", Var, 0, ""},
    +		{"Buhid", Var, 0, ""},
    +		{"C", Var, 0, ""},
    +		{"Canadian_Aboriginal", Var, 0, ""},
    +		{"Carian", Var, 0, ""},
    +		{"CaseRange", Type, 0, ""},
    +		{"CaseRange.Delta", Field, 0, ""},
    +		{"CaseRange.Hi", Field, 0, ""},
    +		{"CaseRange.Lo", Field, 0, ""},
    +		{"CaseRanges", Var, 0, ""},
    +		{"Categories", Var, 0, ""},
    +		{"CategoryAliases", Var, 25, ""},
    +		{"Caucasian_Albanian", Var, 4, ""},
    +		{"Cc", Var, 0, ""},
    +		{"Cf", Var, 0, ""},
    +		{"Chakma", Var, 1, ""},
    +		{"Cham", Var, 0, ""},
    +		{"Cherokee", Var, 0, ""},
    +		{"Chorasmian", Var, 16, ""},
    +		{"Cn", Var, 25, ""},
    +		{"Co", Var, 0, ""},
    +		{"Common", Var, 0, ""},
    +		{"Coptic", Var, 0, ""},
    +		{"Cs", Var, 0, ""},
    +		{"Cuneiform", Var, 0, ""},
    +		{"Cypriot", Var, 0, ""},
    +		{"Cypro_Minoan", Var, 21, ""},
    +		{"Cyrillic", Var, 0, ""},
    +		{"Dash", Var, 0, ""},
    +		{"Deprecated", Var, 0, ""},
    +		{"Deseret", Var, 0, ""},
    +		{"Devanagari", Var, 0, ""},
    +		{"Diacritic", Var, 0, ""},
    +		{"Digit", Var, 0, ""},
    +		{"Dives_Akuru", Var, 16, ""},
    +		{"Dogra", Var, 13, ""},
    +		{"Duployan", Var, 4, ""},
    +		{"Egyptian_Hieroglyphs", Var, 0, ""},
    +		{"Elbasan", Var, 4, ""},
    +		{"Elymaic", Var, 14, ""},
    +		{"Ethiopic", Var, 0, ""},
    +		{"Extender", Var, 0, ""},
    +		{"FoldCategory", Var, 0, ""},
    +		{"FoldScript", Var, 0, ""},
    +		{"Georgian", Var, 0, ""},
    +		{"Glagolitic", Var, 0, ""},
    +		{"Gothic", Var, 0, ""},
    +		{"Grantha", Var, 4, ""},
    +		{"GraphicRanges", Var, 0, ""},
    +		{"Greek", Var, 0, ""},
    +		{"Gujarati", Var, 0, ""},
    +		{"Gunjala_Gondi", Var, 13, ""},
    +		{"Gurmukhi", Var, 0, ""},
    +		{"Han", Var, 0, ""},
    +		{"Hangul", Var, 0, ""},
    +		{"Hanifi_Rohingya", Var, 13, ""},
    +		{"Hanunoo", Var, 0, ""},
    +		{"Hatran", Var, 5, ""},
    +		{"Hebrew", Var, 0, ""},
    +		{"Hex_Digit", Var, 0, ""},
    +		{"Hiragana", Var, 0, ""},
    +		{"Hyphen", Var, 0, ""},
    +		{"IDS_Binary_Operator", Var, 0, ""},
    +		{"IDS_Trinary_Operator", Var, 0, ""},
    +		{"Ideographic", Var, 0, ""},
    +		{"Imperial_Aramaic", Var, 0, ""},
    +		{"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"},
    +		{"Inherited", Var, 0, ""},
    +		{"Inscriptional_Pahlavi", Var, 0, ""},
    +		{"Inscriptional_Parthian", Var, 0, ""},
    +		{"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"},
    +		{"IsControl", Func, 0, "func(r rune) bool"},
    +		{"IsDigit", Func, 0, "func(r rune) bool"},
    +		{"IsGraphic", Func, 0, "func(r rune) bool"},
    +		{"IsLetter", Func, 0, "func(r rune) bool"},
    +		{"IsLower", Func, 0, "func(r rune) bool"},
    +		{"IsMark", Func, 0, "func(r rune) bool"},
    +		{"IsNumber", Func, 0, "func(r rune) bool"},
    +		{"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"},
    +		{"IsPrint", Func, 0, "func(r rune) bool"},
    +		{"IsPunct", Func, 0, "func(r rune) bool"},
    +		{"IsSpace", Func, 0, "func(r rune) bool"},
    +		{"IsSymbol", Func, 0, "func(r rune) bool"},
    +		{"IsTitle", Func, 0, "func(r rune) bool"},
    +		{"IsUpper", Func, 0, "func(r rune) bool"},
    +		{"Javanese", Var, 0, ""},
    +		{"Join_Control", Var, 0, ""},
    +		{"Kaithi", Var, 0, ""},
    +		{"Kannada", Var, 0, ""},
    +		{"Katakana", Var, 0, ""},
    +		{"Kawi", Var, 21, ""},
    +		{"Kayah_Li", Var, 0, ""},
    +		{"Kharoshthi", Var, 0, ""},
    +		{"Khitan_Small_Script", Var, 16, ""},
    +		{"Khmer", Var, 0, ""},
    +		{"Khojki", Var, 4, ""},
    +		{"Khudawadi", Var, 4, ""},
    +		{"L", Var, 0, ""},
    +		{"LC", Var, 25, ""},
    +		{"Lao", Var, 0, ""},
    +		{"Latin", Var, 0, ""},
    +		{"Lepcha", Var, 0, ""},
    +		{"Letter", Var, 0, ""},
    +		{"Limbu", Var, 0, ""},
    +		{"Linear_A", Var, 4, ""},
    +		{"Linear_B", Var, 0, ""},
    +		{"Lisu", Var, 0, ""},
    +		{"Ll", Var, 0, ""},
    +		{"Lm", Var, 0, ""},
    +		{"Lo", Var, 0, ""},
    +		{"Logical_Order_Exception", Var, 0, ""},
    +		{"Lower", Var, 0, ""},
    +		{"LowerCase", Const, 0, ""},
    +		{"Lt", Var, 0, ""},
    +		{"Lu", Var, 0, ""},
    +		{"Lycian", Var, 0, ""},
    +		{"Lydian", Var, 0, ""},
    +		{"M", Var, 0, ""},
    +		{"Mahajani", Var, 4, ""},
    +		{"Makasar", Var, 13, ""},
    +		{"Malayalam", Var, 0, ""},
    +		{"Mandaic", Var, 0, ""},
    +		{"Manichaean", Var, 4, ""},
    +		{"Marchen", Var, 7, ""},
    +		{"Mark", Var, 0, ""},
    +		{"Masaram_Gondi", Var, 10, ""},
    +		{"MaxASCII", Const, 0, ""},
    +		{"MaxCase", Const, 0, ""},
    +		{"MaxLatin1", Const, 0, ""},
    +		{"MaxRune", Const, 0, ""},
    +		{"Mc", Var, 0, ""},
    +		{"Me", Var, 0, ""},
    +		{"Medefaidrin", Var, 13, ""},
    +		{"Meetei_Mayek", Var, 0, ""},
    +		{"Mende_Kikakui", Var, 4, ""},
    +		{"Meroitic_Cursive", Var, 1, ""},
    +		{"Meroitic_Hieroglyphs", Var, 1, ""},
    +		{"Miao", Var, 1, ""},
    +		{"Mn", Var, 0, ""},
    +		{"Modi", Var, 4, ""},
    +		{"Mongolian", Var, 0, ""},
    +		{"Mro", Var, 4, ""},
    +		{"Multani", Var, 5, ""},
    +		{"Myanmar", Var, 0, ""},
    +		{"N", Var, 0, ""},
    +		{"Nabataean", Var, 4, ""},
    +		{"Nag_Mundari", Var, 21, ""},
    +		{"Nandinagari", Var, 14, ""},
    +		{"Nd", Var, 0, ""},
    +		{"New_Tai_Lue", Var, 0, ""},
    +		{"Newa", Var, 7, ""},
    +		{"Nko", Var, 0, ""},
    +		{"Nl", Var, 0, ""},
    +		{"No", Var, 0, ""},
    +		{"Noncharacter_Code_Point", Var, 0, ""},
    +		{"Number", Var, 0, ""},
    +		{"Nushu", Var, 10, ""},
    +		{"Nyiakeng_Puachue_Hmong", Var, 14, ""},
    +		{"Ogham", Var, 0, ""},
    +		{"Ol_Chiki", Var, 0, ""},
    +		{"Old_Hungarian", Var, 5, ""},
    +		{"Old_Italic", Var, 0, ""},
    +		{"Old_North_Arabian", Var, 4, ""},
    +		{"Old_Permic", Var, 4, ""},
    +		{"Old_Persian", Var, 0, ""},
    +		{"Old_Sogdian", Var, 13, ""},
    +		{"Old_South_Arabian", Var, 0, ""},
    +		{"Old_Turkic", Var, 0, ""},
    +		{"Old_Uyghur", Var, 21, ""},
    +		{"Oriya", Var, 0, ""},
    +		{"Osage", Var, 7, ""},
    +		{"Osmanya", Var, 0, ""},
    +		{"Other", Var, 0, ""},
    +		{"Other_Alphabetic", Var, 0, ""},
    +		{"Other_Default_Ignorable_Code_Point", Var, 0, ""},
    +		{"Other_Grapheme_Extend", Var, 0, ""},
    +		{"Other_ID_Continue", Var, 0, ""},
    +		{"Other_ID_Start", Var, 0, ""},
    +		{"Other_Lowercase", Var, 0, ""},
    +		{"Other_Math", Var, 0, ""},
    +		{"Other_Uppercase", Var, 0, ""},
    +		{"P", Var, 0, ""},
    +		{"Pahawh_Hmong", Var, 4, ""},
    +		{"Palmyrene", Var, 4, ""},
    +		{"Pattern_Syntax", Var, 0, ""},
    +		{"Pattern_White_Space", Var, 0, ""},
    +		{"Pau_Cin_Hau", Var, 4, ""},
    +		{"Pc", Var, 0, ""},
    +		{"Pd", Var, 0, ""},
    +		{"Pe", Var, 0, ""},
    +		{"Pf", Var, 0, ""},
    +		{"Phags_Pa", Var, 0, ""},
    +		{"Phoenician", Var, 0, ""},
    +		{"Pi", Var, 0, ""},
    +		{"Po", Var, 0, ""},
    +		{"Prepended_Concatenation_Mark", Var, 7, ""},
    +		{"PrintRanges", Var, 0, ""},
    +		{"Properties", Var, 0, ""},
    +		{"Ps", Var, 0, ""},
    +		{"Psalter_Pahlavi", Var, 4, ""},
    +		{"Punct", Var, 0, ""},
    +		{"Quotation_Mark", Var, 0, ""},
    +		{"Radical", Var, 0, ""},
    +		{"Range16", Type, 0, ""},
    +		{"Range16.Hi", Field, 0, ""},
    +		{"Range16.Lo", Field, 0, ""},
    +		{"Range16.Stride", Field, 0, ""},
    +		{"Range32", Type, 0, ""},
    +		{"Range32.Hi", Field, 0, ""},
    +		{"Range32.Lo", Field, 0, ""},
    +		{"Range32.Stride", Field, 0, ""},
    +		{"RangeTable", Type, 0, ""},
    +		{"RangeTable.LatinOffset", Field, 1, ""},
    +		{"RangeTable.R16", Field, 0, ""},
    +		{"RangeTable.R32", Field, 0, ""},
    +		{"Regional_Indicator", Var, 10, ""},
    +		{"Rejang", Var, 0, ""},
    +		{"ReplacementChar", Const, 0, ""},
    +		{"Runic", Var, 0, ""},
    +		{"S", Var, 0, ""},
    +		{"STerm", Var, 0, ""},
    +		{"Samaritan", Var, 0, ""},
    +		{"Saurashtra", Var, 0, ""},
    +		{"Sc", Var, 0, ""},
    +		{"Scripts", Var, 0, ""},
    +		{"Sentence_Terminal", Var, 7, ""},
    +		{"Sharada", Var, 1, ""},
    +		{"Shavian", Var, 0, ""},
    +		{"Siddham", Var, 4, ""},
    +		{"SignWriting", Var, 5, ""},
    +		{"SimpleFold", Func, 0, "func(r rune) rune"},
    +		{"Sinhala", Var, 0, ""},
    +		{"Sk", Var, 0, ""},
    +		{"Sm", Var, 0, ""},
    +		{"So", Var, 0, ""},
    +		{"Soft_Dotted", Var, 0, ""},
    +		{"Sogdian", Var, 13, ""},
    +		{"Sora_Sompeng", Var, 1, ""},
    +		{"Soyombo", Var, 10, ""},
    +		{"Space", Var, 0, ""},
    +		{"SpecialCase", Type, 0, ""},
    +		{"Sundanese", Var, 0, ""},
    +		{"Syloti_Nagri", Var, 0, ""},
    +		{"Symbol", Var, 0, ""},
    +		{"Syriac", Var, 0, ""},
    +		{"Tagalog", Var, 0, ""},
    +		{"Tagbanwa", Var, 0, ""},
    +		{"Tai_Le", Var, 0, ""},
    +		{"Tai_Tham", Var, 0, ""},
    +		{"Tai_Viet", Var, 0, ""},
    +		{"Takri", Var, 1, ""},
    +		{"Tamil", Var, 0, ""},
    +		{"Tangsa", Var, 21, ""},
    +		{"Tangut", Var, 7, ""},
    +		{"Telugu", Var, 0, ""},
    +		{"Terminal_Punctuation", Var, 0, ""},
    +		{"Thaana", Var, 0, ""},
    +		{"Thai", Var, 0, ""},
    +		{"Tibetan", Var, 0, ""},
    +		{"Tifinagh", Var, 0, ""},
    +		{"Tirhuta", Var, 4, ""},
    +		{"Title", Var, 0, ""},
    +		{"TitleCase", Const, 0, ""},
    +		{"To", Func, 0, "func(_case int, r rune) rune"},
    +		{"ToLower", Func, 0, "func(r rune) rune"},
    +		{"ToTitle", Func, 0, "func(r rune) rune"},
    +		{"ToUpper", Func, 0, "func(r rune) rune"},
    +		{"Toto", Var, 21, ""},
    +		{"TurkishCase", Var, 0, ""},
    +		{"Ugaritic", Var, 0, ""},
    +		{"Unified_Ideograph", Var, 0, ""},
    +		{"Upper", Var, 0, ""},
    +		{"UpperCase", Const, 0, ""},
    +		{"UpperLower", Const, 0, ""},
    +		{"Vai", Var, 0, ""},
    +		{"Variation_Selector", Var, 0, ""},
    +		{"Version", Const, 0, ""},
    +		{"Vithkuqi", Var, 21, ""},
    +		{"Wancho", Var, 14, ""},
    +		{"Warang_Citi", Var, 4, ""},
    +		{"White_Space", Var, 0, ""},
    +		{"Yezidi", Var, 16, ""},
    +		{"Yi", Var, 0, ""},
    +		{"Z", Var, 0, ""},
    +		{"Zanabazar_Square", Var, 10, ""},
    +		{"Zl", Var, 0, ""},
    +		{"Zp", Var, 0, ""},
    +		{"Zs", Var, 0, ""},
     	},
     	"unicode/utf16": {
    -		{"AppendRune", Func, 20},
    -		{"Decode", Func, 0},
    -		{"DecodeRune", Func, 0},
    -		{"Encode", Func, 0},
    -		{"EncodeRune", Func, 0},
    -		{"IsSurrogate", Func, 0},
    -		{"RuneLen", Func, 23},
    +		{"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"},
    +		{"Decode", Func, 0, "func(s []uint16) []rune"},
    +		{"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"},
    +		{"Encode", Func, 0, "func(s []rune) []uint16"},
    +		{"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"},
    +		{"IsSurrogate", Func, 0, "func(r rune) bool"},
    +		{"RuneLen", Func, 23, "func(r rune) int"},
     	},
     	"unicode/utf8": {
    -		{"AppendRune", Func, 18},
    -		{"DecodeLastRune", Func, 0},
    -		{"DecodeLastRuneInString", Func, 0},
    -		{"DecodeRune", Func, 0},
    -		{"DecodeRuneInString", Func, 0},
    -		{"EncodeRune", Func, 0},
    -		{"FullRune", Func, 0},
    -		{"FullRuneInString", Func, 0},
    -		{"MaxRune", Const, 0},
    -		{"RuneCount", Func, 0},
    -		{"RuneCountInString", Func, 0},
    -		{"RuneError", Const, 0},
    -		{"RuneLen", Func, 0},
    -		{"RuneSelf", Const, 0},
    -		{"RuneStart", Func, 0},
    -		{"UTFMax", Const, 0},
    -		{"Valid", Func, 0},
    -		{"ValidRune", Func, 1},
    -		{"ValidString", Func, 0},
    +		{"AppendRune", Func, 18, "func(p []byte, r rune) []byte"},
    +		{"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"},
    +		{"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"},
    +		{"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"},
    +		{"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"},
    +		{"EncodeRune", Func, 0, "func(p []byte, r rune) int"},
    +		{"FullRune", Func, 0, "func(p []byte) bool"},
    +		{"FullRuneInString", Func, 0, "func(s string) bool"},
    +		{"MaxRune", Const, 0, ""},
    +		{"RuneCount", Func, 0, "func(p []byte) int"},
    +		{"RuneCountInString", Func, 0, "func(s string) (n int)"},
    +		{"RuneError", Const, 0, ""},
    +		{"RuneLen", Func, 0, "func(r rune) int"},
    +		{"RuneSelf", Const, 0, ""},
    +		{"RuneStart", Func, 0, "func(b byte) bool"},
    +		{"UTFMax", Const, 0, ""},
    +		{"Valid", Func, 0, "func(p []byte) bool"},
    +		{"ValidRune", Func, 1, "func(r rune) bool"},
    +		{"ValidString", Func, 0, "func(s string) bool"},
     	},
     	"unique": {
    -		{"(Handle).Value", Method, 23},
    -		{"Handle", Type, 23},
    -		{"Make", Func, 23},
    +		{"(Handle).Value", Method, 23, ""},
    +		{"Handle", Type, 23, ""},
    +		{"Make", Func, 23, "func[T comparable](value T) Handle[T]"},
     	},
     	"unsafe": {
    -		{"Add", Func, 0},
    -		{"Alignof", Func, 0},
    -		{"Offsetof", Func, 0},
    -		{"Pointer", Type, 0},
    -		{"Sizeof", Func, 0},
    -		{"Slice", Func, 0},
    -		{"SliceData", Func, 0},
    -		{"String", Func, 0},
    -		{"StringData", Func, 0},
    +		{"Add", Func, 0, ""},
    +		{"Alignof", Func, 0, ""},
    +		{"Offsetof", Func, 0, ""},
    +		{"Pointer", Type, 0, ""},
    +		{"Sizeof", Func, 0, ""},
    +		{"Slice", Func, 0, ""},
    +		{"SliceData", Func, 0, ""},
    +		{"String", Func, 0, ""},
    +		{"StringData", Func, 0, ""},
     	},
     	"weak": {
    -		{"(Pointer).Value", Method, 24},
    -		{"Make", Func, 24},
    -		{"Pointer", Type, 24},
    +		{"(Pointer).Value", Method, 24, ""},
    +		{"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"},
    +		{"Pointer", Type, 24, ""},
     	},
     }
    diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    index 3d96d3bf6..e223e0f34 100644
    --- a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
    @@ -18,6 +18,14 @@ type Symbol struct {
     	Name    string
     	Kind    Kind
     	Version Version // Go version that first included the symbol
    +	// Signature provides the type of a function (defined only for Kind=Func).
    +	// Imported types are denoted as pkg.T; pkg is not fully qualified.
    +	// TODO(adonovan): use an unambiguous encoding that is parseable.
    +	//
    +	// Example2:
    +	//    func[M ~map[K]V, K comparable, V any](m M) M
    +	//    func(fi fs.FileInfo, link string) (*Header, error)
    +	Signature string // if Kind == stdlib.Func
     }
     
     // A Kind indicates the kind of a symbol:
    diff --git a/tools/vendor/golang.org/x/tools/internal/typeparams/free.go b/tools/vendor/golang.org/x/tools/internal/typeparams/free.go
    index 0ade5c294..709d2fc14 100644
    --- a/tools/vendor/golang.org/x/tools/internal/typeparams/free.go
    +++ b/tools/vendor/golang.org/x/tools/internal/typeparams/free.go
    @@ -70,7 +70,7 @@ func (w *Free) Has(typ types.Type) (res bool) {
     
     	case *types.Tuple:
     		n := t.Len()
    -		for i := 0; i < n; i++ {
    +		for i := range n {
     			if w.Has(t.At(i).Type()) {
     				return true
     			}
    diff --git a/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    index cbd12f801..9bc29143f 100644
    --- a/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    +++ b/tools/vendor/golang.org/x/tools/internal/typeparams/termlist.go
    @@ -1,3 +1,6 @@
    +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
    +// Source: ../../cmd/compile/internal/types2/termlist.go
    +
     // Copyright 2021 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
    @@ -7,8 +10,8 @@
     package typeparams
     
     import (
    -	"bytes"
     	"go/types"
    +	"strings"
     )
     
     // A termlist represents the type set represented by the union
    @@ -22,15 +25,18 @@ type termlist []*term
     // It is in normal form.
     var allTermlist = termlist{new(term)}
     
    +// termSep is the separator used between individual terms.
    +const termSep = " | "
    +
     // String prints the termlist exactly (without normalization).
     func (xl termlist) String() string {
     	if len(xl) == 0 {
     		return "∅"
     	}
    -	var buf bytes.Buffer
    +	var buf strings.Builder
     	for i, x := range xl {
     		if i > 0 {
    -			buf.WriteString(" | ")
    +			buf.WriteString(termSep)
     		}
     		buf.WriteString(x.String())
     	}
    diff --git a/tools/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/tools/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    index 7350bb702..fa758cdc9 100644
    --- a/tools/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    +++ b/tools/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
    @@ -1,3 +1,6 @@
    +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
    +// Source: ../../cmd/compile/internal/types2/typeterm.go
    +
     // Copyright 2021 The Go Authors. All rights reserved.
     // Use of this source code is governed by a BSD-style
     // license that can be found in the LICENSE file.
    diff --git a/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
    new file mode 100644
    index 000000000..3db2a135b
    --- /dev/null
    +++ b/tools/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
    @@ -0,0 +1,137 @@
    +// Copyright 2018 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package typesinternal
    +
    +import (
    +	"fmt"
    +	"go/ast"
    +	"go/types"
    +	_ "unsafe"
    +)
    +
    +// CallKind describes the function position of an [*ast.CallExpr].
    +type CallKind int
    +
    +const (
    +	CallStatic     CallKind = iota // static call to known function
    +	CallInterface                  // dynamic call through an interface method
    +	CallDynamic                    // dynamic call of a func value
    +	CallBuiltin                    // call to a builtin function
    +	CallConversion                 // a conversion (not a call)
    +)
    +
    +var callKindNames = []string{
    +	"CallStatic",
    +	"CallInterface",
    +	"CallDynamic",
    +	"CallBuiltin",
    +	"CallConversion",
    +}
    +
    +func (k CallKind) String() string {
    +	if i := int(k); i >= 0 && i < len(callKindNames) {
    +		return callKindNames[i]
    +	}
    +	return fmt.Sprintf("typeutil.CallKind(%d)", k)
    +}
    +
    +// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
    +// It distinguishes among true function calls, calls to builtins, and type conversions,
    +// and further classifies function calls as static calls (where the function is known),
    +// dynamic interface calls, and other dynamic calls.
    +//
    +// For the declarations:
    +//
    +//	func f() {}
    +//	func g[T any]() {}
    +//	var v func()
    +//	var s []func()
    +//	type I interface { M() }
    +//	var i I
    +//
    +// ClassifyCall returns the following:
    +//
    +//	f()           CallStatic
    +//	g[int]()      CallStatic
    +//	i.M()         CallInterface
    +//	min(1, 2)     CallBuiltin
    +//	v()           CallDynamic
    +//	s[0]()        CallDynamic
    +//	int(x)        CallConversion
    +//	[]byte("")    CallConversion
    +func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
    +	if info.Types == nil {
    +		panic("ClassifyCall: info.Types is nil")
    +	}
    +	tv := info.Types[call.Fun]
    +	if tv.IsType() {
    +		return CallConversion
    +	}
    +	if tv.IsBuiltin() {
    +		return CallBuiltin
    +	}
    +	obj := info.Uses[UsedIdent(info, call.Fun)]
    +	// Classify the call by the type of the object, if any.
    +	switch obj := obj.(type) {
    +	case *types.Func:
    +		if interfaceMethod(obj) {
    +			return CallInterface
    +		}
    +		return CallStatic
    +	default:
    +		return CallDynamic
    +	}
    +}
    +
    +// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
    +// is the [types.Object] used by e, if any.
    +//
    +// If e is one of various forms of reference:
    +//
    +//	f, c, v, T           lexical reference
    +//	pkg.X                qualified identifier
    +//	f[T] or pkg.F[K,V]   instantiations of the above kinds
    +//	expr.f               field or method value selector
    +//	T.f                  method expression selector
    +//
    +// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
    +// is the object to which it refers.
    +//
    +// For the declarations:
    +//
    +//	func F[T any] {...}
    +//	type I interface { M() }
    +//	var (
    +//	  x int
    +//	  s struct { f  int }
    +//	  a []int
    +//	  i I
    +//	)
    +//
    +// UsedIdent returns the following:
    +//
    +//	Expr          UsedIdent
    +//	x             x
    +//	s.f           f
    +//	F[int]        F
    +//	i.M           M
    +//	I.M           M
    +//	min           min
    +//	int           int
    +//	1             nil
    +//	a[0]          nil
    +//	[]byte        nil
    +//
    +// Note: if e is an instantiated function or method, UsedIdent returns
    +// the corresponding generic function or method on the generic type.
    +func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
    +	return usedIdent(info, e)
    +}
    +
    +//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
    +func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
    +
    +//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
    +func interfaceMethod(f *types.Func) bool
    diff --git a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
    index edf0347ec..a5cd7e8db 100644
    --- a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
    +++ b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go
    @@ -7,6 +7,7 @@
     package typesinternal
     
     import (
    +	"go/ast"
     	"go/token"
     	"go/types"
     	"reflect"
    @@ -68,6 +69,34 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     	}
     }
     
    +// TypeNameFor returns the type name symbol for the specified type, if
    +// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
    +// [*types.Basic] representing a type.
    +//
    +// For all other types, and for Basic types representing a builtin,
    +// constant, or nil, it returns nil. Be careful not to convert the
    +// resulting nil pointer to a [types.Object]!
    +//
    +// If t is the type of a constant, it may be an "untyped" type, which
    +// has no TypeName. To access the name of such types (e.g. "untyped
    +// int"), use [types.Basic.Name].
    +func TypeNameFor(t types.Type) *types.TypeName {
    +	switch t := t.(type) {
    +	case *types.Alias:
    +		return t.Obj()
    +	case *types.Named:
    +		return t.Obj()
    +	case *types.TypeParam:
    +		return t.Obj()
    +	case *types.Basic:
    +		// See issues #71886 and #66890 for some history.
    +		if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
    +			return tname
    +		}
    +	}
    +	return nil
    +}
    +
     // A NamedOrAlias is a [types.Type] that is named (as
     // defined by the spec) and capable of bearing type parameters: it
     // abstracts aliases ([types.Alias]) and defined types
    @@ -76,7 +105,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     // Every type declared by an explicit "type" declaration is a
     // NamedOrAlias. (Built-in type symbols may additionally
     // have type [types.Basic], which is not a NamedOrAlias,
    -// though the spec regards them as "named".)
    +// though the spec regards them as "named"; see [TypeNameFor].)
     //
     // NamedOrAlias cannot expose the Origin method, because
     // [types.Alias.Origin] and [types.Named.Origin] have different
    @@ -84,32 +113,15 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
     type NamedOrAlias interface {
     	types.Type
     	Obj() *types.TypeName
    -	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
    +	TypeArgs() *types.TypeList
    +	TypeParams() *types.TypeParamList
    +	SetTypeParams(tparams []*types.TypeParam)
     }
     
    -// TypeParams is a light shim around t.TypeParams().
    -// (go/types.Alias).TypeParams requires >= 1.23.
    -func TypeParams(t NamedOrAlias) *types.TypeParamList {
    -	switch t := t.(type) {
    -	case *types.Alias:
    -		return aliases.TypeParams(t)
    -	case *types.Named:
    -		return t.TypeParams()
    -	}
    -	return nil
    -}
    -
    -// TypeArgs is a light shim around t.TypeArgs().
    -// (go/types.Alias).TypeArgs requires >= 1.23.
    -func TypeArgs(t NamedOrAlias) *types.TypeList {
    -	switch t := t.(type) {
    -	case *types.Alias:
    -		return aliases.TypeArgs(t)
    -	case *types.Named:
    -		return t.TypeArgs()
    -	}
    -	return nil
    -}
    +var (
    +	_ NamedOrAlias = (*types.Alias)(nil)
    +	_ NamedOrAlias = (*types.Named)(nil)
    +)
     
     // Origin returns the generic type of the Named or Alias type t if it
     // is instantiated, otherwise it returns t.
    @@ -127,3 +139,17 @@ func Origin(t NamedOrAlias) NamedOrAlias {
     func IsPackageLevel(obj types.Object) bool {
     	return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
     }
    +
    +// NewTypesInfo returns a *types.Info with all maps populated.
    +func NewTypesInfo() *types.Info {
    +	return &types.Info{
    +		Types:        map[ast.Expr]types.TypeAndValue{},
    +		Instances:    map[*ast.Ident]types.Instance{},
    +		Defs:         map[*ast.Ident]types.Object{},
    +		Uses:         map[*ast.Ident]types.Object{},
    +		Implicits:    map[ast.Node]types.Object{},
    +		Selections:   map[*ast.SelectorExpr]*types.Selection{},
    +		Scopes:       map[ast.Node]*types.Scope{},
    +		FileVersions: map[*ast.File]string{},
    +	}
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
    new file mode 100644
    index 000000000..0b789e2c5
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
    @@ -0,0 +1,119 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/annotations.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
    +	{
    +		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
    +		ExtensionType: (*HttpRule)(nil),
    +		Field:         72295728,
    +		Name:          "google.api.http",
    +		Tag:           "bytes,72295728,opt,name=http",
    +		Filename:      "google/api/annotations.proto",
    +	},
    +}
    +
    +// Extension fields to descriptorpb.MethodOptions.
    +var (
    +	// See `HttpRule`.
    +	//
    +	// optional google.api.HttpRule http = 72295728;
    +	E_Http = &file_google_api_annotations_proto_extTypes[0]
    +)
    +
    +var File_google_api_annotations_proto protoreflect.FileDescriptor
    +
    +var file_google_api_annotations_proto_rawDesc = []byte{
    +	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
    +	0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
    +	0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
    +	0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70,
    +	0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
    +	0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50,
    +	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    +	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
    +	0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
    +	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
    +	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var file_google_api_annotations_proto_goTypes = []interface{}{
    +	(*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions
    +	(*HttpRule)(nil),                   // 1: google.api.HttpRule
    +}
    +var file_google_api_annotations_proto_depIdxs = []int32{
    +	0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions
    +	1, // 1: google.api.http:type_name -> google.api.HttpRule
    +	2, // [2:2] is the sub-list for method output_type
    +	2, // [2:2] is the sub-list for method input_type
    +	1, // [1:2] is the sub-list for extension type_name
    +	0, // [0:1] is the sub-list for extension extendee
    +	0, // [0:0] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_annotations_proto_init() }
    +func file_google_api_annotations_proto_init() {
    +	if File_google_api_annotations_proto != nil {
    +		return
    +	}
    +	file_google_api_http_proto_init()
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_annotations_proto_rawDesc,
    +			NumEnums:      0,
    +			NumMessages:   0,
    +			NumExtensions: 1,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_annotations_proto_goTypes,
    +		DependencyIndexes: file_google_api_annotations_proto_depIdxs,
    +		ExtensionInfos:    file_google_api_annotations_proto_extTypes,
    +	}.Build()
    +	File_google_api_annotations_proto = out.File
    +	file_google_api_annotations_proto_rawDesc = nil
    +	file_google_api_annotations_proto_goTypes = nil
    +	file_google_api_annotations_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
    new file mode 100644
    index 000000000..f84048172
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
    @@ -0,0 +1,2103 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/client.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	api "google.golang.org/genproto/googleapis/api"
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +	durationpb "google.golang.org/protobuf/types/known/durationpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// The organization for which the client libraries are being published.
    +// Affects the url where generated docs are published, etc.
    +type ClientLibraryOrganization int32
    +
    +const (
    +	// Not useful.
    +	ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0
    +	// Google Cloud Platform Org.
    +	ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1
    +	// Ads (Advertising) Org.
    +	ClientLibraryOrganization_ADS ClientLibraryOrganization = 2
    +	// Photos Org.
    +	ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3
    +	// Street View Org.
    +	ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4
    +	// Shopping Org.
    +	ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5
    +	// Geo Org.
    +	ClientLibraryOrganization_GEO ClientLibraryOrganization = 6
    +	// Generative AI - https://developers.generativeai.google
    +	ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7
    +)
    +
    +// Enum value maps for ClientLibraryOrganization.
    +var (
    +	ClientLibraryOrganization_name = map[int32]string{
    +		0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED",
    +		1: "CLOUD",
    +		2: "ADS",
    +		3: "PHOTOS",
    +		4: "STREET_VIEW",
    +		5: "SHOPPING",
    +		6: "GEO",
    +		7: "GENERATIVE_AI",
    +	}
    +	ClientLibraryOrganization_value = map[string]int32{
    +		"CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0,
    +		"CLOUD":         1,
    +		"ADS":           2,
    +		"PHOTOS":        3,
    +		"STREET_VIEW":   4,
    +		"SHOPPING":      5,
    +		"GEO":           6,
    +		"GENERATIVE_AI": 7,
    +	}
    +)
    +
    +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization {
    +	p := new(ClientLibraryOrganization)
    +	*p = x
    +	return p
    +}
    +
    +func (x ClientLibraryOrganization) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_client_proto_enumTypes[0].Descriptor()
    +}
    +
    +func (ClientLibraryOrganization) Type() protoreflect.EnumType {
    +	return &file_google_api_client_proto_enumTypes[0]
    +}
    +
    +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use ClientLibraryOrganization.Descriptor instead.
    +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{0}
    +}
    +
    +// To where should client libraries be published?
    +type ClientLibraryDestination int32
    +
    +const (
    +	// Client libraries will neither be generated nor published to package
    +	// managers.
    +	ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0
    +	// Generate the client library in a repo under github.com/googleapis,
    +	// but don't publish it to package managers.
    +	ClientLibraryDestination_GITHUB ClientLibraryDestination = 10
    +	// Publish the library to package managers like nuget.org and npmjs.com.
    +	ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20
    +)
    +
    +// Enum value maps for ClientLibraryDestination.
    +var (
    +	ClientLibraryDestination_name = map[int32]string{
    +		0:  "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED",
    +		10: "GITHUB",
    +		20: "PACKAGE_MANAGER",
    +	}
    +	ClientLibraryDestination_value = map[string]int32{
    +		"CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0,
    +		"GITHUB":                                 10,
    +		"PACKAGE_MANAGER":                        20,
    +	}
    +)
    +
    +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination {
    +	p := new(ClientLibraryDestination)
    +	*p = x
    +	return p
    +}
    +
    +func (x ClientLibraryDestination) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_client_proto_enumTypes[1].Descriptor()
    +}
    +
    +func (ClientLibraryDestination) Type() protoreflect.EnumType {
    +	return &file_google_api_client_proto_enumTypes[1]
    +}
    +
    +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use ClientLibraryDestination.Descriptor instead.
    +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{1}
    +}
    +
    +// Required information for every language.
    +type CommonLanguageSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Link to automatically generated reference documentation.  Example:
    +	// https://cloud.google.com/nodejs/docs/reference/asset/latest
    +	//
    +	// Deprecated: Do not use.
    +	ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
    +	// The destination where API teams want this client library to be published.
    +	Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
    +	// Configuration for which RPCs should be generated in the GAPIC client.
    +	SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
    +}
    +
    +func (x *CommonLanguageSettings) Reset() {
    +	*x = CommonLanguageSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[0]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *CommonLanguageSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*CommonLanguageSettings) ProtoMessage() {}
    +
    +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[0]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead.
    +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{0}
    +}
    +
    +// Deprecated: Do not use.
    +func (x *CommonLanguageSettings) GetReferenceDocsUri() string {
    +	if x != nil {
    +		return x.ReferenceDocsUri
    +	}
    +	return ""
    +}
    +
    +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination {
    +	if x != nil {
    +		return x.Destinations
    +	}
    +	return nil
    +}
    +
    +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
    +	if x != nil {
    +		return x.SelectiveGapicGeneration
    +	}
    +	return nil
    +}
    +
    +// Details about how and where to publish client libraries.
    +type ClientLibrarySettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Version of the API to apply these settings to. This is the full protobuf
    +	// package for the API, ending in the version element.
    +	// Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1".
    +	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
    +	// Launch stage of this version of the API.
    +	LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"`
    +	// When using transport=rest, the client request will encode enums as
    +	// numbers rather than strings.
    +	RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"`
    +	// Settings for legacy Java features, supported in the Service YAML.
    +	JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"`
    +	// Settings for C++ client libraries.
    +	CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"`
    +	// Settings for PHP client libraries.
    +	PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"`
    +	// Settings for Python client libraries.
    +	PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"`
    +	// Settings for Node client libraries.
    +	NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"`
    +	// Settings for .NET client libraries.
    +	DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"`
    +	// Settings for Ruby client libraries.
    +	RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"`
    +	// Settings for Go client libraries.
    +	GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"`
    +}
    +
    +func (x *ClientLibrarySettings) Reset() {
    +	*x = ClientLibrarySettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[1]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *ClientLibrarySettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*ClientLibrarySettings) ProtoMessage() {}
    +
    +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[1]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead.
    +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{1}
    +}
    +
    +func (x *ClientLibrarySettings) GetVersion() string {
    +	if x != nil {
    +		return x.Version
    +	}
    +	return ""
    +}
    +
    +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage {
    +	if x != nil {
    +		return x.LaunchStage
    +	}
    +	return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED
    +}
    +
    +func (x *ClientLibrarySettings) GetRestNumericEnums() bool {
    +	if x != nil {
    +		return x.RestNumericEnums
    +	}
    +	return false
    +}
    +
    +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings {
    +	if x != nil {
    +		return x.JavaSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings {
    +	if x != nil {
    +		return x.CppSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings {
    +	if x != nil {
    +		return x.PhpSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings {
    +	if x != nil {
    +		return x.PythonSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings {
    +	if x != nil {
    +		return x.NodeSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings {
    +	if x != nil {
    +		return x.DotnetSettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings {
    +	if x != nil {
    +		return x.RubySettings
    +	}
    +	return nil
    +}
    +
    +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings {
    +	if x != nil {
    +		return x.GoSettings
    +	}
    +	return nil
    +}
    +
    +// This message configures the settings for publishing [Google Cloud Client
    +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries)
    +// generated from the service config.
    +type Publishing struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// A list of API method settings, e.g. the behavior for methods that use the
    +	// long-running operation pattern.
    +	MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"`
    +	// Link to a *public* URI where users can report issues.  Example:
    +	// https://issuetracker.google.com/issues/new?component=190865&template=1161103
    +	NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"`
    +	// Link to product home page.  Example:
    +	// https://cloud.google.com/asset-inventory/docs/overview
    +	DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"`
    +	// Used as a tracking tag when collecting data about the APIs developer
    +	// relations artifacts like docs, packages delivered to package managers,
    +	// etc.  Example: "speech".
    +	ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"`
    +	// GitHub label to apply to issues and pull requests opened for this API.
    +	GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"`
    +	// GitHub teams to be added to CODEOWNERS in the directory in GitHub
    +	// containing source code for the client libraries for this API.
    +	CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"`
    +	// A prefix used in sample code when demarking regions to be included in
    +	// documentation.
    +	DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"`
    +	// For whom the client library is being published.
    +	Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"`
    +	// Client library settings.  If the same version string appears multiple
    +	// times in this list, then the last one wins.  Settings from earlier
    +	// settings with the same version string are discarded.
    +	LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"`
    +	// Optional link to proto reference documentation.  Example:
    +	// https://cloud.google.com/pubsub/lite/docs/reference/rpc
    +	ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"`
    +	// Optional link to REST reference documentation.  Example:
    +	// https://cloud.google.com/pubsub/lite/docs/reference/rest
    +	RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"`
    +}
    +
    +func (x *Publishing) Reset() {
    +	*x = Publishing{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[2]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *Publishing) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*Publishing) ProtoMessage() {}
    +
    +func (x *Publishing) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[2]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead.
    +func (*Publishing) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{2}
    +}
    +
    +func (x *Publishing) GetMethodSettings() []*MethodSettings {
    +	if x != nil {
    +		return x.MethodSettings
    +	}
    +	return nil
    +}
    +
    +func (x *Publishing) GetNewIssueUri() string {
    +	if x != nil {
    +		return x.NewIssueUri
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetDocumentationUri() string {
    +	if x != nil {
    +		return x.DocumentationUri
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetApiShortName() string {
    +	if x != nil {
    +		return x.ApiShortName
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetGithubLabel() string {
    +	if x != nil {
    +		return x.GithubLabel
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetCodeownerGithubTeams() []string {
    +	if x != nil {
    +		return x.CodeownerGithubTeams
    +	}
    +	return nil
    +}
    +
    +func (x *Publishing) GetDocTagPrefix() string {
    +	if x != nil {
    +		return x.DocTagPrefix
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetOrganization() ClientLibraryOrganization {
    +	if x != nil {
    +		return x.Organization
    +	}
    +	return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED
    +}
    +
    +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings {
    +	if x != nil {
    +		return x.LibrarySettings
    +	}
    +	return nil
    +}
    +
    +func (x *Publishing) GetProtoReferenceDocumentationUri() string {
    +	if x != nil {
    +		return x.ProtoReferenceDocumentationUri
    +	}
    +	return ""
    +}
    +
    +func (x *Publishing) GetRestReferenceDocumentationUri() string {
    +	if x != nil {
    +		return x.RestReferenceDocumentationUri
    +	}
    +	return ""
    +}
    +
    +// Settings for Java client libraries.
    +type JavaSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The package name to use in Java. Clobbers the java_package option
    +	// set in the protobuf. This should be used **only** by APIs
    +	// who have already set the language_settings.java.package_name" field
    +	// in gapic.yaml. API teams should use the protobuf java_package option
    +	// where possible.
    +	//
    +	// Example of a YAML configuration::
    +	//
    +	//	publishing:
    +	//	  java_settings:
    +	//	    library_package: com.google.cloud.pubsub.v1
    +	LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"`
    +	// Configure the Java class name to use instead of the service's for its
    +	// corresponding generated GAPIC client. Keys are fully-qualified
    +	// service names as they appear in the protobuf (including the full
    +	// the language_settings.java.interface_names" field in gapic.yaml. API
    +	// teams should otherwise use the service name as it appears in the
    +	// protobuf.
    +	//
    +	// Example of a YAML configuration::
    +	//
    +	//	publishing:
    +	//	  java_settings:
    +	//	    service_class_names:
    +	//	      - google.pubsub.v1.Publisher: TopicAdmin
    +	//	      - google.pubsub.v1.Subscriber: SubscriptionAdmin
    +	ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"`
    +}
    +
    +func (x *JavaSettings) Reset() {
    +	*x = JavaSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[3]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *JavaSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*JavaSettings) ProtoMessage() {}
    +
    +func (x *JavaSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[3]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead.
    +func (*JavaSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{3}
    +}
    +
    +func (x *JavaSettings) GetLibraryPackage() string {
    +	if x != nil {
    +		return x.LibraryPackage
    +	}
    +	return ""
    +}
    +
    +func (x *JavaSettings) GetServiceClassNames() map[string]string {
    +	if x != nil {
    +		return x.ServiceClassNames
    +	}
    +	return nil
    +}
    +
    +func (x *JavaSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +// Settings for C++ client libraries.
    +type CppSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +}
    +
    +func (x *CppSettings) Reset() {
    +	*x = CppSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[4]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *CppSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*CppSettings) ProtoMessage() {}
    +
    +func (x *CppSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[4]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead.
    +func (*CppSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{4}
    +}
    +
    +func (x *CppSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +// Settings for Php client libraries.
    +type PhpSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +}
    +
    +func (x *PhpSettings) Reset() {
    +	*x = PhpSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[5]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *PhpSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*PhpSettings) ProtoMessage() {}
    +
    +func (x *PhpSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[5]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead.
    +func (*PhpSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{5}
    +}
    +
    +func (x *PhpSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +// Settings for Python client libraries.
    +type PythonSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +	// Experimental features to be included during client library generation.
    +	ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
    +}
    +
    +func (x *PythonSettings) Reset() {
    +	*x = PythonSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[6]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *PythonSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*PythonSettings) ProtoMessage() {}
    +
    +func (x *PythonSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[6]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead.
    +func (*PythonSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{6}
    +}
    +
    +func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
    +	if x != nil {
    +		return x.ExperimentalFeatures
    +	}
    +	return nil
    +}
    +
    +// Settings for Node client libraries.
    +type NodeSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +}
    +
    +func (x *NodeSettings) Reset() {
    +	*x = NodeSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[7]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *NodeSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*NodeSettings) ProtoMessage() {}
    +
    +func (x *NodeSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[7]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead.
    +func (*NodeSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{7}
    +}
    +
    +func (x *NodeSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +// Settings for Dotnet client libraries.
    +type DotnetSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +	// Map from original service names to renamed versions.
    +	// This is used when the default generated types
    +	// would cause a naming conflict. (Neither name is
    +	// fully-qualified.)
    +	// Example: Subscriber to SubscriberServiceApi.
    +	RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +	// Map from full resource types to the effective short name
    +	// for the resource. This is used when otherwise resource
    +	// named from different services would cause naming collisions.
    +	// Example entry:
    +	// "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
    +	RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +	// List of full resource types to ignore during generation.
    +	// This is typically used for API-specific Location resources,
    +	// which should be handled by the generator as if they were actually
    +	// the common Location resources.
    +	// Example entry: "documentai.googleapis.com/Location"
    +	IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"`
    +	// Namespaces which must be aliased in snippets due to
    +	// a known (but non-generator-predictable) naming collision
    +	ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"`
    +	// Method signatures (in the form "service.method(signature)")
    +	// which are provided separately, so shouldn't be generated.
    +	// Snippets *calling* these methods are still generated, however.
    +	HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"`
    +}
    +
    +func (x *DotnetSettings) Reset() {
    +	*x = DotnetSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[8]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *DotnetSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*DotnetSettings) ProtoMessage() {}
    +
    +func (x *DotnetSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[8]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead.
    +func (*DotnetSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{8}
    +}
    +
    +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +func (x *DotnetSettings) GetRenamedServices() map[string]string {
    +	if x != nil {
    +		return x.RenamedServices
    +	}
    +	return nil
    +}
    +
    +func (x *DotnetSettings) GetRenamedResources() map[string]string {
    +	if x != nil {
    +		return x.RenamedResources
    +	}
    +	return nil
    +}
    +
    +func (x *DotnetSettings) GetIgnoredResources() []string {
    +	if x != nil {
    +		return x.IgnoredResources
    +	}
    +	return nil
    +}
    +
    +func (x *DotnetSettings) GetForcedNamespaceAliases() []string {
    +	if x != nil {
    +		return x.ForcedNamespaceAliases
    +	}
    +	return nil
    +}
    +
    +func (x *DotnetSettings) GetHandwrittenSignatures() []string {
    +	if x != nil {
    +		return x.HandwrittenSignatures
    +	}
    +	return nil
    +}
    +
    +// Settings for Ruby client libraries.
    +type RubySettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +}
    +
    +func (x *RubySettings) Reset() {
    +	*x = RubySettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[9]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *RubySettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*RubySettings) ProtoMessage() {}
    +
    +func (x *RubySettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[9]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead.
    +func (*RubySettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{9}
    +}
    +
    +func (x *RubySettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +// Settings for Go client libraries.
    +type GoSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Some settings.
    +	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
    +	// Map of service names to renamed services. Keys are the package relative
    +	// service names and values are the name to be used for the service client
    +	// and call options.
    +	//
    +	// publishing:
    +	//
    +	//	go_settings:
    +	//	  renamed_services:
    +	//	    Publisher: TopicAdmin
    +	RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    +}
    +
    +func (x *GoSettings) Reset() {
    +	*x = GoSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[10]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *GoSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*GoSettings) ProtoMessage() {}
    +
    +func (x *GoSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[10]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead.
    +func (*GoSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{10}
    +}
    +
    +func (x *GoSettings) GetCommon() *CommonLanguageSettings {
    +	if x != nil {
    +		return x.Common
    +	}
    +	return nil
    +}
    +
    +func (x *GoSettings) GetRenamedServices() map[string]string {
    +	if x != nil {
    +		return x.RenamedServices
    +	}
    +	return nil
    +}
    +
    +// Describes the generator configuration for a method.
    +type MethodSettings struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The fully qualified name of the method, for which the options below apply.
    +	// This is used to find the method to apply the options.
    +	//
    +	// Example:
    +	//
    +	//	publishing:
    +	//	  method_settings:
    +	//	  - selector: google.storage.control.v2.StorageControl.CreateFolder
    +	//	    # method settings for CreateFolder...
    +	Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
    +	// Describes settings to use for long-running operations when generating
    +	// API methods for RPCs. Complements RPCs that use the annotations in
    +	// google/longrunning/operations.proto.
    +	//
    +	// Example of a YAML configuration::
    +	//
    +	//	publishing:
    +	//	  method_settings:
    +	//	  - selector: google.cloud.speech.v2.Speech.BatchRecognize
    +	//	    long_running:
    +	//	      initial_poll_delay: 60s # 1 minute
    +	//	      poll_delay_multiplier: 1.5
    +	//	      max_poll_delay: 360s # 6 minutes
    +	//	      total_poll_timeout: 54000s # 90 minutes
    +	LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
    +	// List of top-level fields of the request message, that should be
    +	// automatically populated by the client libraries based on their
    +	// (google.api.field_info).format. Currently supported format: UUID4.
    +	//
    +	// Example of a YAML configuration:
    +	//
    +	//	publishing:
    +	//	  method_settings:
    +	//	  - selector: google.example.v1.ExampleService.CreateExample
    +	//	    auto_populated_fields:
    +	//	    - request_id
    +	AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
    +}
    +
    +func (x *MethodSettings) Reset() {
    +	*x = MethodSettings{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[11]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *MethodSettings) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*MethodSettings) ProtoMessage() {}
    +
    +func (x *MethodSettings) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[11]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead.
    +func (*MethodSettings) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{11}
    +}
    +
    +func (x *MethodSettings) GetSelector() string {
    +	if x != nil {
    +		return x.Selector
    +	}
    +	return ""
    +}
    +
    +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning {
    +	if x != nil {
    +		return x.LongRunning
    +	}
    +	return nil
    +}
    +
    +func (x *MethodSettings) GetAutoPopulatedFields() []string {
    +	if x != nil {
    +		return x.AutoPopulatedFields
    +	}
    +	return nil
    +}
    +
    +// This message is used to configure the generation of a subset of the RPCs in
    +// a service for client libraries.
    +type SelectiveGapicGeneration struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// An allowlist of the fully qualified names of RPCs that should be included
    +	// on public client surfaces.
    +	Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
    +	// Setting this to true indicates to the client generators that methods
    +	// that would be excluded from the generation should instead be generated
    +	// in a way that indicates these methods should not be consumed by
    +	// end users. How this is expressed is up to individual language
    +	// implementations to decide. Some examples may be: added annotations,
    +	// obfuscated identifiers, or other language idiomatic patterns.
    +	GenerateOmittedAsInternal bool `protobuf:"varint,2,opt,name=generate_omitted_as_internal,json=generateOmittedAsInternal,proto3" json:"generate_omitted_as_internal,omitempty"`
    +}
    +
    +func (x *SelectiveGapicGeneration) Reset() {
    +	*x = SelectiveGapicGeneration{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[12]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *SelectiveGapicGeneration) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*SelectiveGapicGeneration) ProtoMessage() {}
    +
    +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[12]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
    +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{12}
    +}
    +
    +func (x *SelectiveGapicGeneration) GetMethods() []string {
    +	if x != nil {
    +		return x.Methods
    +	}
    +	return nil
    +}
    +
    +func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool {
    +	if x != nil {
    +		return x.GenerateOmittedAsInternal
    +	}
    +	return false
    +}
    +
    +// Experimental features to be included during client library generation.
    +// These fields will be deprecated once the feature graduates and is enabled
    +// by default.
    +type PythonSettings_ExperimentalFeatures struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Enables generation of asynchronous REST clients if `rest` transport is
    +	// enabled. By default, asynchronous REST clients will not be generated.
    +	// This feature will be enabled by default 1 month after launching the
    +	// feature in preview packages.
    +	RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
    +	// Enables generation of protobuf code using new types that are more
    +	// Pythonic which are included in `protobuf>=5.29.x`. This feature will be
    +	// enabled by default 1 month after launching the feature in preview
    +	// packages.
    +	ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
    +	// Disables generation of an unversioned Python package for this client
    +	// library. This means that the module names will need to be versioned in
    +	// import statements. For example `import google.cloud.library_v2` instead
    +	// of `import google.cloud.library`.
    +	UnversionedPackageDisabled bool `protobuf:"varint,3,opt,name=unversioned_package_disabled,json=unversionedPackageDisabled,proto3" json:"unversioned_package_disabled,omitempty"`
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) Reset() {
    +	*x = PythonSettings_ExperimentalFeatures{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[14]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
    +
    +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[14]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
    +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
    +	if x != nil {
    +		return x.RestAsyncIoEnabled
    +	}
    +	return false
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
    +	if x != nil {
    +		return x.ProtobufPythonicTypesEnabled
    +	}
    +	return false
    +}
    +
    +func (x *PythonSettings_ExperimentalFeatures) GetUnversionedPackageDisabled() bool {
    +	if x != nil {
    +		return x.UnversionedPackageDisabled
    +	}
    +	return false
    +}
    +
    +// Describes settings to use when generating API methods that use the
    +// long-running operation pattern.
    +// All default values below are from those used in the client library
    +// generators (e.g.
    +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)).
    +type MethodSettings_LongRunning struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Initial delay after which the first poll request will be made.
    +	// Default value: 5 seconds.
    +	InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"`
    +	// Multiplier to gradually increase delay between subsequent polls until it
    +	// reaches max_poll_delay.
    +	// Default value: 1.5.
    +	PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"`
    +	// Maximum time between two subsequent poll requests.
    +	// Default value: 45 seconds.
    +	MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"`
    +	// Total polling timeout.
    +	// Default value: 5 minutes.
    +	TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"`
    +}
    +
    +func (x *MethodSettings_LongRunning) Reset() {
    +	*x = MethodSettings_LongRunning{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_client_proto_msgTypes[18]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *MethodSettings_LongRunning) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*MethodSettings_LongRunning) ProtoMessage() {}
    +
    +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_client_proto_msgTypes[18]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead.
    +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) {
    +	return file_google_api_client_proto_rawDescGZIP(), []int{11, 0}
    +}
    +
    +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration {
    +	if x != nil {
    +		return x.InitialPollDelay
    +	}
    +	return nil
    +}
    +
    +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 {
    +	if x != nil {
    +		return x.PollDelayMultiplier
    +	}
    +	return 0
    +}
    +
    +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration {
    +	if x != nil {
    +		return x.MaxPollDelay
    +	}
    +	return nil
    +}
    +
    +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration {
    +	if x != nil {
    +		return x.TotalPollTimeout
    +	}
    +	return nil
    +}
    +
    +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{
    +	{
    +		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
    +		ExtensionType: ([]string)(nil),
    +		Field:         1051,
    +		Name:          "google.api.method_signature",
    +		Tag:           "bytes,1051,rep,name=method_signature",
    +		Filename:      "google/api/client.proto",
    +	},
    +	{
    +		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
    +		ExtensionType: (*string)(nil),
    +		Field:         1049,
    +		Name:          "google.api.default_host",
    +		Tag:           "bytes,1049,opt,name=default_host",
    +		Filename:      "google/api/client.proto",
    +	},
    +	{
    +		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
    +		ExtensionType: (*string)(nil),
    +		Field:         1050,
    +		Name:          "google.api.oauth_scopes",
    +		Tag:           "bytes,1050,opt,name=oauth_scopes",
    +		Filename:      "google/api/client.proto",
    +	},
    +	{
    +		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
    +		ExtensionType: (*string)(nil),
    +		Field:         525000001,
    +		Name:          "google.api.api_version",
    +		Tag:           "bytes,525000001,opt,name=api_version",
    +		Filename:      "google/api/client.proto",
    +	},
    +}
    +
    +// Extension fields to descriptorpb.MethodOptions.
    +var (
    +	// A definition of a client library method signature.
    +	//
    +	// In client libraries, each proto RPC corresponds to one or more methods
    +	// which the end user is able to call, and calls the underlying RPC.
    +	// Normally, this method receives a single argument (a struct or instance
    +	// corresponding to the RPC request object). Defining this field will
    +	// add one or more overloads providing flattened or simpler method signatures
    +	// in some languages.
    +	//
    +	// The fields on the method signature are provided as a comma-separated
    +	// string.
    +	//
    +	// For example, the proto RPC and annotation:
    +	//
    +	//	rpc CreateSubscription(CreateSubscriptionRequest)
    +	//	    returns (Subscription) {
    +	//	  option (google.api.method_signature) = "name,topic";
    +	//	}
    +	//
    +	// Would add the following Java overload (in addition to the method accepting
    +	// the request object):
    +	//
    +	//	public final Subscription createSubscription(String name, String topic)
    +	//
    +	// The following backwards-compatibility guidelines apply:
    +	//
    +	//   - Adding this annotation to an unannotated method is backwards
    +	//     compatible.
    +	//   - Adding this annotation to a method which already has existing
    +	//     method signature annotations is backwards compatible if and only if
    +	//     the new method signature annotation is last in the sequence.
    +	//   - Modifying or removing an existing method signature annotation is
    +	//     a breaking change.
    +	//   - Re-ordering existing method signature annotations is a breaking
    +	//     change.
    +	//
    +	// repeated string method_signature = 1051;
    +	E_MethodSignature = &file_google_api_client_proto_extTypes[0]
    +)
    +
    +// Extension fields to descriptorpb.ServiceOptions.
    +var (
    +	// The hostname for this service.
    +	// This should be specified with no prefix or protocol.
    +	//
    +	// Example:
    +	//
    +	//	service Foo {
    +	//	  option (google.api.default_host) = "foo.googleapi.com";
    +	//	  ...
    +	//	}
    +	//
    +	// optional string default_host = 1049;
    +	E_DefaultHost = &file_google_api_client_proto_extTypes[1]
    +	// OAuth scopes needed for the client.
    +	//
    +	// Example:
    +	//
    +	//	service Foo {
    +	//	  option (google.api.oauth_scopes) = \
    +	//	    "https://www.googleapis.com/auth/cloud-platform";
    +	//	  ...
    +	//	}
    +	//
    +	// If there is more than one scope, use a comma-separated string:
    +	//
    +	// Example:
    +	//
    +	//	service Foo {
    +	//	  option (google.api.oauth_scopes) = \
    +	//	    "https://www.googleapis.com/auth/cloud-platform,"
    +	//	    "https://www.googleapis.com/auth/monitoring";
    +	//	  ...
    +	//	}
    +	//
    +	// optional string oauth_scopes = 1050;
    +	E_OauthScopes = &file_google_api_client_proto_extTypes[2]
    +	// The API version of this service, which should be sent by version-aware
    +	// clients to the service. This allows services to abide by the schema and
    +	// behavior of the service at the time this API version was deployed.
    +	// The format of the API version must be treated as opaque by clients.
    +	// Services may use a format with an apparent structure, but clients must
    +	// not rely on this to determine components within an API version, or attempt
    +	// to construct other valid API versions. Note that this is for upcoming
    +	// functionality and may not be implemented for all services.
    +	//
    +	// Example:
    +	//
    +	//	service Foo {
    +	//	  option (google.api.api_version) = "v1_20230821_preview";
    +	//	}
    +	//
    +	// optional string api_version = 525000001;
    +	E_ApiVersion = &file_google_api_client_proto_extTypes[3]
    +)
    +
    +var File_google_api_client_proto protoreflect.FileDescriptor
    +
    +var file_google_api_client_proto_rawDesc = []byte{
    +	0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69,
    +	0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
    +	0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
    +	0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
    +	0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
    +	0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
    +	0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73,
    +	0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
    +	0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
    +	0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
    +	0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
    +	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
    +	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
    +	0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
    +	0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
    +	0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
    +	0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
    +	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
    +	0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
    +	0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
    +	0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
    +	0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
    +	0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
    +	0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
    +	0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
    +	0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
    +	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
    +	0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
    +	0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
    +	0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
    +	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
    +	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
    +	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
    +	0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
    +	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    +	0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
    +	0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
    +	0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
    +	0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
    +	0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
    +	0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
    +	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
    +	0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    +	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
    +	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
    +	0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
    +	0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
    +	0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
    +	0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
    +	0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
    +	0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
    +	0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
    +	0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
    +	0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
    +	0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
    +	0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
    +	0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
    +	0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
    +	0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
    +	0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
    +	0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
    +	0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
    +	0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
    +	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
    +	0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
    +	0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
    +	0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
    +	0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
    +	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
    +	0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
    +	0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
    +	0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
    +	0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
    +	0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
    +	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
    +	0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
    +	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
    +	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
    +	0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
    +	0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
    +	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
    +	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    +	0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
    +	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
    +	0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
    +	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
    +	0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
    +	0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
    +	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
    +	0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
    +	0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
    +	0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
    +	0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
    +	0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
    +	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
    +	0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
    +	0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
    +	0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
    +	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
    +	0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
    +	0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
    +	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
    +	0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
    +	0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
    +	0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
    +	0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
    +	0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
    +	0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
    +	0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
    +	0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76,
    +	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
    +	0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
    +	0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b,
    +	0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
    +	0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
    +	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
    +	0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
    +	0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
    +	0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
    +	0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
    +	0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
    +	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
    +	0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
    +	0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
    +	0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
    +	0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
    +	0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
    +	0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
    +	0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
    +	0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
    +	0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    +	0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
    +	0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
    +	0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
    +	0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
    +	0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
    +	0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
    +	0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
    +	0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
    +	0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
    +	0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
    +	0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
    +	0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
    +	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
    +	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
    +	0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
    +	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
    +	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
    +	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
    +	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
    +	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
    +	0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
    +	0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
    +	0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53,
    +	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12,
    +	0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
    +	0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
    +	0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
    +	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53,
    +	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d,
    +	0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
    +	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
    +	0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e,
    +	0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a,
    +	0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
    +	0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f,
    +	0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
    +	0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65,
    +	0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e,
    +	0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75,
    +	0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f,
    +	0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03,
    +	0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61,
    +	0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f,
    +	0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69,
    +	0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18,
    +	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
    +	0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
    +	0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79,
    +	0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
    +	0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74,
    +	0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f,
    +	0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
    +	0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f,
    +	0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
    +	0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20,
    +	0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
    +	0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
    +	0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70,
    +	0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07,
    +	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d,
    +	0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
    +	0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e,
    +	0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65,
    +	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49,
    +	0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65,
    +	0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
    +	0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41,
    +	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
    +	0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a,
    +	0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53,
    +	0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45,
    +	0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10,
    +	0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45,
    +	0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a,
    +	0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65,
    +	0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49,
    +	0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54,
    +	0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
    +	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10,
    +	0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
    +	0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
    +	0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
    +	0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28,
    +	0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
    +	0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f,
    +	0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61,
    +	0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68,
    +	0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
    +	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b,
    +	0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f,
    +	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
    +	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab,
    +	0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69,
    +	0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
    +	0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    +	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
    +	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var (
    +	file_google_api_client_proto_rawDescOnce sync.Once
    +	file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc
    +)
    +
    +func file_google_api_client_proto_rawDescGZIP() []byte {
    +	file_google_api_client_proto_rawDescOnce.Do(func() {
    +		file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData)
    +	})
    +	return file_google_api_client_proto_rawDescData
    +}
    +
    +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
    +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
    +var file_google_api_client_proto_goTypes = []interface{}{
    +	(ClientLibraryOrganization)(0),              // 0: google.api.ClientLibraryOrganization
    +	(ClientLibraryDestination)(0),               // 1: google.api.ClientLibraryDestination
    +	(*CommonLanguageSettings)(nil),              // 2: google.api.CommonLanguageSettings
    +	(*ClientLibrarySettings)(nil),               // 3: google.api.ClientLibrarySettings
    +	(*Publishing)(nil),                          // 4: google.api.Publishing
    +	(*JavaSettings)(nil),                        // 5: google.api.JavaSettings
    +	(*CppSettings)(nil),                         // 6: google.api.CppSettings
    +	(*PhpSettings)(nil),                         // 7: google.api.PhpSettings
    +	(*PythonSettings)(nil),                      // 8: google.api.PythonSettings
    +	(*NodeSettings)(nil),                        // 9: google.api.NodeSettings
    +	(*DotnetSettings)(nil),                      // 10: google.api.DotnetSettings
    +	(*RubySettings)(nil),                        // 11: google.api.RubySettings
    +	(*GoSettings)(nil),                          // 12: google.api.GoSettings
    +	(*MethodSettings)(nil),                      // 13: google.api.MethodSettings
    +	(*SelectiveGapicGeneration)(nil),            // 14: google.api.SelectiveGapicGeneration
    +	nil,                                         // 15: google.api.JavaSettings.ServiceClassNamesEntry
    +	(*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
    +	nil,                                 // 17: google.api.DotnetSettings.RenamedServicesEntry
    +	nil,                                 // 18: google.api.DotnetSettings.RenamedResourcesEntry
    +	nil,                                 // 19: google.api.GoSettings.RenamedServicesEntry
    +	(*MethodSettings_LongRunning)(nil),  // 20: google.api.MethodSettings.LongRunning
    +	(api.LaunchStage)(0),                // 21: google.api.LaunchStage
    +	(*durationpb.Duration)(nil),         // 22: google.protobuf.Duration
    +	(*descriptorpb.MethodOptions)(nil),  // 23: google.protobuf.MethodOptions
    +	(*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
    +}
    +var file_google_api_client_proto_depIdxs = []int32{
    +	1,  // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
    +	14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
    +	21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
    +	5,  // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
    +	6,  // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
    +	7,  // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
    +	8,  // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
    +	9,  // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
    +	10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
    +	11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
    +	12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
    +	13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
    +	0,  // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
    +	3,  // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
    +	15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
    +	2,  // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
    +	16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
    +	2,  // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
    +	17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
    +	18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
    +	2,  // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
    +	2,  // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
    +	19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
    +	20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
    +	22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
    +	22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
    +	22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
    +	23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
    +	24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
    +	24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
    +	24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
    +	35, // [35:35] is the sub-list for method output_type
    +	35, // [35:35] is the sub-list for method input_type
    +	35, // [35:35] is the sub-list for extension type_name
    +	31, // [31:35] is the sub-list for extension extendee
    +	0,  // [0:31] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_client_proto_init() }
    +func file_google_api_client_proto_init() {
    +	if File_google_api_client_proto != nil {
    +		return
    +	}
    +	if !protoimpl.UnsafeEnabled {
    +		file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*CommonLanguageSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*ClientLibrarySettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*Publishing); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*JavaSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*CppSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*PhpSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*PythonSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*NodeSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*DotnetSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*RubySettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*GoSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*MethodSettings); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*SelectiveGapicGeneration); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*PythonSettings_ExperimentalFeatures); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*MethodSettings_LongRunning); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +	}
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_client_proto_rawDesc,
    +			NumEnums:      2,
    +			NumMessages:   19,
    +			NumExtensions: 4,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_client_proto_goTypes,
    +		DependencyIndexes: file_google_api_client_proto_depIdxs,
    +		EnumInfos:         file_google_api_client_proto_enumTypes,
    +		MessageInfos:      file_google_api_client_proto_msgTypes,
    +		ExtensionInfos:    file_google_api_client_proto_extTypes,
    +	}.Build()
    +	File_google_api_client_proto = out.File
    +	file_google_api_client_proto_rawDesc = nil
    +	file_google_api_client_proto_goTypes = nil
    +	file_google_api_client_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
    new file mode 100644
    index 000000000..5d583b866
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
    @@ -0,0 +1,266 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/field_behavior.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// An indicator of the behavior of a given field (for example, that a field
    +// is required in requests, or given as output but ignored as input).
    +// This **does not** change the behavior in protocol buffers itself; it only
    +// denotes the behavior and may affect how API tooling handles the field.
    +//
    +// Note: This enum **may** receive new values in the future.
    +type FieldBehavior int32
    +
    +const (
    +	// Conventional default for enums. Do not use this.
    +	FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0
    +	// Specifically denotes a field as optional.
    +	// While all fields in protocol buffers are optional, this may be specified
    +	// for emphasis if appropriate.
    +	FieldBehavior_OPTIONAL FieldBehavior = 1
    +	// Denotes a field as required.
    +	// This indicates that the field **must** be provided as part of the request,
    +	// and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
    +	FieldBehavior_REQUIRED FieldBehavior = 2
    +	// Denotes a field as output only.
    +	// This indicates that the field is provided in responses, but including the
    +	// field in a request does nothing (the server *must* ignore it and
    +	// *must not* throw an error as a result of the field's presence).
    +	FieldBehavior_OUTPUT_ONLY FieldBehavior = 3
    +	// Denotes a field as input only.
    +	// This indicates that the field is provided in requests, and the
    +	// corresponding field is not included in output.
    +	FieldBehavior_INPUT_ONLY FieldBehavior = 4
    +	// Denotes a field as immutable.
    +	// This indicates that the field may be set once in a request to create a
    +	// resource, but may not be changed thereafter.
    +	FieldBehavior_IMMUTABLE FieldBehavior = 5
    +	// Denotes that a (repeated) field is an unordered list.
    +	// This indicates that the service may provide the elements of the list
    +	// in any arbitrary  order, rather than the order the user originally
    +	// provided. Additionally, the list's order may or may not be stable.
    +	FieldBehavior_UNORDERED_LIST FieldBehavior = 6
    +	// Denotes that this field returns a non-empty default value if not set.
    +	// This indicates that if the user provides the empty value in a request,
    +	// a non-empty value will be returned. The user will not be aware of what
    +	// non-empty value to expect.
    +	FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
    +	// Denotes that the field in a resource (a message annotated with
    +	// google.api.resource) is used in the resource name to uniquely identify the
    +	// resource. For AIP-compliant APIs, this should only be applied to the
    +	// `name` field on the resource.
    +	//
    +	// This behavior should not be applied to references to other resources within
    +	// the message.
    +	//
    +	// The identifier field of resources often have different field behavior
    +	// depending on the request it is embedded in (e.g. for Create methods name
    +	// is optional and unused, while for Update methods it is required). Instead
    +	// of method-specific annotations, only `IDENTIFIER` is required.
    +	FieldBehavior_IDENTIFIER FieldBehavior = 8
    +)
    +
    +// Enum value maps for FieldBehavior.
    +var (
    +	FieldBehavior_name = map[int32]string{
    +		0: "FIELD_BEHAVIOR_UNSPECIFIED",
    +		1: "OPTIONAL",
    +		2: "REQUIRED",
    +		3: "OUTPUT_ONLY",
    +		4: "INPUT_ONLY",
    +		5: "IMMUTABLE",
    +		6: "UNORDERED_LIST",
    +		7: "NON_EMPTY_DEFAULT",
    +		8: "IDENTIFIER",
    +	}
    +	FieldBehavior_value = map[string]int32{
    +		"FIELD_BEHAVIOR_UNSPECIFIED": 0,
    +		"OPTIONAL":                   1,
    +		"REQUIRED":                   2,
    +		"OUTPUT_ONLY":                3,
    +		"INPUT_ONLY":                 4,
    +		"IMMUTABLE":                  5,
    +		"UNORDERED_LIST":             6,
    +		"NON_EMPTY_DEFAULT":          7,
    +		"IDENTIFIER":                 8,
    +	}
    +)
    +
    +func (x FieldBehavior) Enum() *FieldBehavior {
    +	p := new(FieldBehavior)
    +	*p = x
    +	return p
    +}
    +
    +func (x FieldBehavior) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_field_behavior_proto_enumTypes[0].Descriptor()
    +}
    +
    +func (FieldBehavior) Type() protoreflect.EnumType {
    +	return &file_google_api_field_behavior_proto_enumTypes[0]
    +}
    +
    +func (x FieldBehavior) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use FieldBehavior.Descriptor instead.
    +func (FieldBehavior) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0}
    +}
    +
    +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{
    +	{
    +		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
    +		ExtensionType: ([]FieldBehavior)(nil),
    +		Field:         1052,
    +		Name:          "google.api.field_behavior",
    +		Tag:           "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior",
    +		Filename:      "google/api/field_behavior.proto",
    +	},
    +}
    +
    +// Extension fields to descriptorpb.FieldOptions.
    +var (
    +	// A designation of a specific field behavior (required, output only, etc.)
    +	// in protobuf messages.
    +	//
    +	// Examples:
    +	//
    +	//	string name = 1 [(google.api.field_behavior) = REQUIRED];
    +	//	State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
    +	//	google.protobuf.Duration ttl = 1
    +	//	  [(google.api.field_behavior) = INPUT_ONLY];
    +	//	google.protobuf.Timestamp expire_time = 1
    +	//	  [(google.api.field_behavior) = OUTPUT_ONLY,
    +	//	   (google.api.field_behavior) = IMMUTABLE];
    +	//
    +	// repeated google.api.FieldBehavior field_behavior = 1052;
    +	E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0]
    +)
    +
    +var File_google_api_field_behavior_proto protoreflect.FileDescriptor
    +
    +var file_google_api_field_behavior_proto_rawDesc = []byte{
    +	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
    +	0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
    +	0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
    +	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
    +	0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
    +	0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
    +	0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
    +	0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
    +	0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a,
    +	0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e,
    +	0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d,
    +	0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
    +	0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
    +	0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
    +	0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
    +	0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
    +	0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
    +	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
    +	0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
    +	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52,
    +	0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70,
    +	0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
    +	0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50,
    +	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
    +	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
    +	0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
    +	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
    +	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var (
    +	file_google_api_field_behavior_proto_rawDescOnce sync.Once
    +	file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc
    +)
    +
    +func file_google_api_field_behavior_proto_rawDescGZIP() []byte {
    +	file_google_api_field_behavior_proto_rawDescOnce.Do(func() {
    +		file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData)
    +	})
    +	return file_google_api_field_behavior_proto_rawDescData
    +}
    +
    +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
    +var file_google_api_field_behavior_proto_goTypes = []interface{}{
    +	(FieldBehavior)(0),                // 0: google.api.FieldBehavior
    +	(*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
    +}
    +var file_google_api_field_behavior_proto_depIdxs = []int32{
    +	1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions
    +	0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior
    +	2, // [2:2] is the sub-list for method output_type
    +	2, // [2:2] is the sub-list for method input_type
    +	1, // [1:2] is the sub-list for extension type_name
    +	0, // [0:1] is the sub-list for extension extendee
    +	0, // [0:0] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_field_behavior_proto_init() }
    +func file_google_api_field_behavior_proto_init() {
    +	if File_google_api_field_behavior_proto != nil {
    +		return
    +	}
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_field_behavior_proto_rawDesc,
    +			NumEnums:      1,
    +			NumMessages:   0,
    +			NumExtensions: 1,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_field_behavior_proto_goTypes,
    +		DependencyIndexes: file_google_api_field_behavior_proto_depIdxs,
    +		EnumInfos:         file_google_api_field_behavior_proto_enumTypes,
    +		ExtensionInfos:    file_google_api_field_behavior_proto_extTypes,
    +	}.Build()
    +	File_google_api_field_behavior_proto = out.File
    +	file_google_api_field_behavior_proto_rawDesc = nil
    +	file_google_api_field_behavior_proto_goTypes = nil
    +	file_google_api_field_behavior_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
    new file mode 100644
    index 000000000..53e9dd1e9
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
    @@ -0,0 +1,392 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/field_info.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// The standard format of a field value. The supported formats are all backed
    +// by either an RFC defined by the IETF or a Google-defined AIP.
    +type FieldInfo_Format int32
    +
    +const (
    +	// Default, unspecified value.
    +	FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0
    +	// Universally Unique Identifier, version 4, value as defined by
    +	// https://datatracker.ietf.org/doc/html/rfc4122. The value may be
    +	// normalized to entirely lowercase letters. For example, the value
    +	// `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to
    +	// `f47ac10b-58cc-0372-8567-0e02b2c3d479`.
    +	FieldInfo_UUID4 FieldInfo_Format = 1
    +	// Internet Protocol v4 value as defined by [RFC
    +	// 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be
    +	// condensed, with leading zeros in each octet stripped. For example,
    +	// `001.022.233.040` would be condensed to `1.22.233.40`.
    +	FieldInfo_IPV4 FieldInfo_Format = 2
    +	// Internet Protocol v6 value as defined by [RFC
    +	// 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be
    +	// normalized to entirely lowercase letters with zeros compressed, following
    +	// [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example,
    +	// the value `2001:0DB8:0::0` would be normalized to `2001:db8::`.
    +	FieldInfo_IPV6 FieldInfo_Format = 3
    +	// An IP address in either v4 or v6 format as described by the individual
    +	// values defined herein. See the comments on the IPV4 and IPV6 types for
    +	// allowed normalizations of each.
    +	FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4
    +)
    +
    +// Enum value maps for FieldInfo_Format.
    +var (
    +	FieldInfo_Format_name = map[int32]string{
    +		0: "FORMAT_UNSPECIFIED",
    +		1: "UUID4",
    +		2: "IPV4",
    +		3: "IPV6",
    +		4: "IPV4_OR_IPV6",
    +	}
    +	FieldInfo_Format_value = map[string]int32{
    +		"FORMAT_UNSPECIFIED": 0,
    +		"UUID4":              1,
    +		"IPV4":               2,
    +		"IPV6":               3,
    +		"IPV4_OR_IPV6":       4,
    +	}
    +)
    +
    +func (x FieldInfo_Format) Enum() *FieldInfo_Format {
    +	p := new(FieldInfo_Format)
    +	*p = x
    +	return p
    +}
    +
    +func (x FieldInfo_Format) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_field_info_proto_enumTypes[0].Descriptor()
    +}
    +
    +func (FieldInfo_Format) Type() protoreflect.EnumType {
    +	return &file_google_api_field_info_proto_enumTypes[0]
    +}
    +
    +func (x FieldInfo_Format) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use FieldInfo_Format.Descriptor instead.
    +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0}
    +}
    +
    +// Rich semantic information of an API field beyond basic typing.
    +type FieldInfo struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The standard format of a field value. This does not explicitly configure
    +	// any API consumer, just documents the API's format for the field it is
    +	// applied to.
    +	Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"`
    +	// The type(s) that the annotated, generic field may represent.
    +	//
    +	// Currently, this must only be used on fields of type `google.protobuf.Any`.
    +	// Supporting other generic types may be considered in the future.
    +	ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"`
    +}
    +
    +func (x *FieldInfo) Reset() {
    +	*x = FieldInfo{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_field_info_proto_msgTypes[0]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *FieldInfo) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*FieldInfo) ProtoMessage() {}
    +
    +func (x *FieldInfo) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_field_info_proto_msgTypes[0]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead.
    +func (*FieldInfo) Descriptor() ([]byte, []int) {
    +	return file_google_api_field_info_proto_rawDescGZIP(), []int{0}
    +}
    +
    +func (x *FieldInfo) GetFormat() FieldInfo_Format {
    +	if x != nil {
    +		return x.Format
    +	}
    +	return FieldInfo_FORMAT_UNSPECIFIED
    +}
    +
    +func (x *FieldInfo) GetReferencedTypes() []*TypeReference {
    +	if x != nil {
    +		return x.ReferencedTypes
    +	}
    +	return nil
    +}
    +
    +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo].
    +type TypeReference struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The name of the type that the annotated, generic field may represent.
    +	// If the type is in the same protobuf package, the value can be the simple
    +	// message name e.g., `"MyMessage"`. Otherwise, the value must be the
    +	// fully-qualified message name e.g., `"google.library.v1.Book"`.
    +	//
    +	// If the type(s) are unknown to the service (e.g. the field accepts generic
    +	// user input), use the wildcard `"*"` to denote this behavior.
    +	//
    +	// See [AIP-202](https://google.aip.dev/202#type-references) for more details.
    +	TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
    +}
    +
    +func (x *TypeReference) Reset() {
    +	*x = TypeReference{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_field_info_proto_msgTypes[1]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *TypeReference) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*TypeReference) ProtoMessage() {}
    +
    +func (x *TypeReference) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_field_info_proto_msgTypes[1]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead.
    +func (*TypeReference) Descriptor() ([]byte, []int) {
    +	return file_google_api_field_info_proto_rawDescGZIP(), []int{1}
    +}
    +
    +func (x *TypeReference) GetTypeName() string {
    +	if x != nil {
    +		return x.TypeName
    +	}
    +	return ""
    +}
    +
    +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{
    +	{
    +		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
    +		ExtensionType: (*FieldInfo)(nil),
    +		Field:         291403980,
    +		Name:          "google.api.field_info",
    +		Tag:           "bytes,291403980,opt,name=field_info",
    +		Filename:      "google/api/field_info.proto",
    +	},
    +}
    +
    +// Extension fields to descriptorpb.FieldOptions.
    +var (
    +	// Rich semantic descriptor of an API field beyond the basic typing.
    +	//
    +	// Examples:
    +	//
    +	//	string request_id = 1 [(google.api.field_info).format = UUID4];
    +	//	string old_ip_address = 2 [(google.api.field_info).format = IPV4];
    +	//	string new_ip_address = 3 [(google.api.field_info).format = IPV6];
    +	//	string actual_ip_address = 4 [
    +	//	  (google.api.field_info).format = IPV4_OR_IPV6
    +	//	];
    +	//	google.protobuf.Any generic_field = 5 [
    +	//	  (google.api.field_info).referenced_types = {type_name: "ActualType"},
    +	//	  (google.api.field_info).referenced_types = {type_name: "OtherType"},
    +	//	];
    +	//	google.protobuf.Any generic_user_input = 5 [
    +	//	  (google.api.field_info).referenced_types = {type_name: "*"},
    +	//	];
    +	//
    +	// optional google.api.FieldInfo field_info = 291403980;
    +	E_FieldInfo = &file_google_api_field_info_proto_extTypes[0]
    +)
    +
    +var File_google_api_field_info_proto protoreflect.FileDescriptor
    +
    +var file_google_api_field_info_proto_rawDesc = []byte{
    +	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
    +	0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
    +	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09,
    +	0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72,
    +	0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
    +	0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
    +	0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79,
    +	0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
    +	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72,
    +	0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64,
    +	0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
    +	0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
    +	0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34,
    +	0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04,
    +	0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f,
    +	0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65,
    +	0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
    +	0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
    +	0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
    +	0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
    +	0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42,
    +	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
    +	0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74,
    +	0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
    +	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
    +	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
    +	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
    +	0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var (
    +	file_google_api_field_info_proto_rawDescOnce sync.Once
    +	file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc
    +)
    +
    +func file_google_api_field_info_proto_rawDescGZIP() []byte {
    +	file_google_api_field_info_proto_rawDescOnce.Do(func() {
    +		file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData)
    +	})
    +	return file_google_api_field_info_proto_rawDescData
    +}
    +
    +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
    +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
    +var file_google_api_field_info_proto_goTypes = []interface{}{
    +	(FieldInfo_Format)(0),             // 0: google.api.FieldInfo.Format
    +	(*FieldInfo)(nil),                 // 1: google.api.FieldInfo
    +	(*TypeReference)(nil),             // 2: google.api.TypeReference
    +	(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
    +}
    +var file_google_api_field_info_proto_depIdxs = []int32{
    +	0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format
    +	2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference
    +	3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions
    +	1, // 3: google.api.field_info:type_name -> google.api.FieldInfo
    +	4, // [4:4] is the sub-list for method output_type
    +	4, // [4:4] is the sub-list for method input_type
    +	3, // [3:4] is the sub-list for extension type_name
    +	2, // [2:3] is the sub-list for extension extendee
    +	0, // [0:2] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_field_info_proto_init() }
    +func file_google_api_field_info_proto_init() {
    +	if File_google_api_field_info_proto != nil {
    +		return
    +	}
    +	if !protoimpl.UnsafeEnabled {
    +		file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*FieldInfo); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*TypeReference); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +	}
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_field_info_proto_rawDesc,
    +			NumEnums:      1,
    +			NumMessages:   2,
    +			NumExtensions: 1,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_field_info_proto_goTypes,
    +		DependencyIndexes: file_google_api_field_info_proto_depIdxs,
    +		EnumInfos:         file_google_api_field_info_proto_enumTypes,
    +		MessageInfos:      file_google_api_field_info_proto_msgTypes,
    +		ExtensionInfos:    file_google_api_field_info_proto_extTypes,
    +	}.Build()
    +	File_google_api_field_info_proto = out.File
    +	file_google_api_field_info_proto_rawDesc = nil
    +	file_google_api_field_info_proto_goTypes = nil
    +	file_google_api_field_info_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
    new file mode 100644
    index 000000000..d30fcee4c
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
    @@ -0,0 +1,774 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/http.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// Defines the HTTP configuration for an API service. It contains a list of
    +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
    +// to one or more HTTP REST API methods.
    +type Http struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// A list of HTTP configuration rules that apply to individual API methods.
    +	//
    +	// **NOTE:** All service configuration rules follow "last one wins" order.
    +	Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
    +	// When set to true, URL path parameters will be fully URI-decoded except in
    +	// cases of single segment matches in reserved expansion, where "%2F" will be
    +	// left encoded.
    +	//
    +	// The default behavior is to not decode RFC 6570 reserved characters in multi
    +	// segment matches.
    +	FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
    +}
    +
    +func (x *Http) Reset() {
    +	*x = Http{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_http_proto_msgTypes[0]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *Http) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*Http) ProtoMessage() {}
    +
    +func (x *Http) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_http_proto_msgTypes[0]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use Http.ProtoReflect.Descriptor instead.
    +func (*Http) Descriptor() ([]byte, []int) {
    +	return file_google_api_http_proto_rawDescGZIP(), []int{0}
    +}
    +
    +func (x *Http) GetRules() []*HttpRule {
    +	if x != nil {
    +		return x.Rules
    +	}
    +	return nil
    +}
    +
    +func (x *Http) GetFullyDecodeReservedExpansion() bool {
    +	if x != nil {
    +		return x.FullyDecodeReservedExpansion
    +	}
    +	return false
    +}
    +
    +// gRPC Transcoding
    +//
    +// gRPC Transcoding is a feature for mapping between a gRPC method and one or
    +// more HTTP REST endpoints. It allows developers to build a single API service
    +// that supports both gRPC APIs and REST APIs. Many systems, including [Google
    +// APIs](https://github.com/googleapis/googleapis),
    +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
    +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
    +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
    +// and use it for large scale production services.
    +//
    +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
    +// how different portions of the gRPC request message are mapped to the URL
    +// path, URL query parameters, and HTTP request body. It also controls how the
    +// gRPC response message is mapped to the HTTP response body. `HttpRule` is
    +// typically specified as an `google.api.http` annotation on the gRPC method.
    +//
    +// Each mapping specifies a URL path template and an HTTP method. The path
    +// template may refer to one or more fields in the gRPC request message, as long
    +// as each field is a non-repeated field with a primitive (non-message) type.
    +// The path template controls how fields of the request message are mapped to
    +// the URL path.
    +//
    +// Example:
    +//
    +//	service Messaging {
    +//	  rpc GetMessage(GetMessageRequest) returns (Message) {
    +//	    option (google.api.http) = {
    +//	        get: "/v1/{name=messages/*}"
    +//	    };
    +//	  }
    +//	}
    +//	message GetMessageRequest {
    +//	  string name = 1; // Mapped to URL path.
    +//	}
    +//	message Message {
    +//	  string text = 1; // The resource content.
    +//	}
    +//
    +// This enables an HTTP REST to gRPC mapping as below:
    +//
    +// - HTTP: `GET /v1/messages/123456`
    +// - gRPC: `GetMessage(name: "messages/123456")`
    +//
    +// Any fields in the request message which are not bound by the path template
    +// automatically become HTTP query parameters if there is no HTTP request body.
    +// For example:
    +//
    +//	service Messaging {
    +//	  rpc GetMessage(GetMessageRequest) returns (Message) {
    +//	    option (google.api.http) = {
    +//	        get:"/v1/messages/{message_id}"
    +//	    };
    +//	  }
    +//	}
    +//	message GetMessageRequest {
    +//	  message SubMessage {
    +//	    string subfield = 1;
    +//	  }
    +//	  string message_id = 1; // Mapped to URL path.
    +//	  int64 revision = 2;    // Mapped to URL query parameter `revision`.
    +//	  SubMessage sub = 3;    // Mapped to URL query parameter `sub.subfield`.
    +//	}
    +//
    +// This enables a HTTP JSON to RPC mapping as below:
    +//
    +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
    +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
    +// SubMessage(subfield: "foo"))`
    +//
    +// Note that fields which are mapped to URL query parameters must have a
    +// primitive type or a repeated primitive type or a non-repeated message type.
    +// In the case of a repeated type, the parameter can be repeated in the URL
    +// as `...?param=A¶m=B`. In the case of a message type, each field of the
    +// message is mapped to a separate parameter, such as
    +// `...?foo.a=A&foo.b=B&foo.c=C`.
    +//
    +// For HTTP methods that allow a request body, the `body` field
    +// specifies the mapping. Consider a REST update method on the
    +// message resource collection:
    +//
    +//	service Messaging {
    +//	  rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
    +//	    option (google.api.http) = {
    +//	      patch: "/v1/messages/{message_id}"
    +//	      body: "message"
    +//	    };
    +//	  }
    +//	}
    +//	message UpdateMessageRequest {
    +//	  string message_id = 1; // mapped to the URL
    +//	  Message message = 2;   // mapped to the body
    +//	}
    +//
    +// The following HTTP JSON to RPC mapping is enabled, where the
    +// representation of the JSON in the request body is determined by
    +// protos JSON encoding:
    +//
    +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
    +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
    +//
    +// The special name `*` can be used in the body mapping to define that
    +// every field not bound by the path template should be mapped to the
    +// request body.  This enables the following alternative definition of
    +// the update method:
    +//
    +//	service Messaging {
    +//	  rpc UpdateMessage(Message) returns (Message) {
    +//	    option (google.api.http) = {
    +//	      patch: "/v1/messages/{message_id}"
    +//	      body: "*"
    +//	    };
    +//	  }
    +//	}
    +//	message Message {
    +//	  string message_id = 1;
    +//	  string text = 2;
    +//	}
    +//
    +// The following HTTP JSON to RPC mapping is enabled:
    +//
    +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
    +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
    +//
    +// Note that when using `*` in the body mapping, it is not possible to
    +// have HTTP parameters, as all fields not bound by the path end in
    +// the body. This makes this option more rarely used in practice when
    +// defining REST APIs. The common usage of `*` is in custom methods
    +// which don't use the URL at all for transferring data.
    +//
    +// It is possible to define multiple HTTP methods for one RPC by using
    +// the `additional_bindings` option. Example:
    +//
    +//	service Messaging {
    +//	  rpc GetMessage(GetMessageRequest) returns (Message) {
    +//	    option (google.api.http) = {
    +//	      get: "/v1/messages/{message_id}"
    +//	      additional_bindings {
    +//	        get: "/v1/users/{user_id}/messages/{message_id}"
    +//	      }
    +//	    };
    +//	  }
    +//	}
    +//	message GetMessageRequest {
    +//	  string message_id = 1;
    +//	  string user_id = 2;
    +//	}
    +//
    +// This enables the following two alternative HTTP JSON to RPC mappings:
    +//
    +// - HTTP: `GET /v1/messages/123456`
    +// - gRPC: `GetMessage(message_id: "123456")`
    +//
    +// - HTTP: `GET /v1/users/me/messages/123456`
    +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
    +//
    +// # Rules for HTTP mapping
    +//
    +//  1. Leaf request fields (recursive expansion nested messages in the request
    +//     message) are classified into three categories:
    +//     - Fields referred by the path template. They are passed via the URL path.
    +//     - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They
    +//     are passed via the HTTP
    +//     request body.
    +//     - All other fields are passed via the URL query parameters, and the
    +//     parameter name is the field path in the request message. A repeated
    +//     field can be represented as multiple query parameters under the same
    +//     name.
    +//  2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL
    +//     query parameter, all fields
    +//     are passed via URL path and HTTP request body.
    +//  3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP
    +//     request body, all
    +//     fields are passed via URL path and URL query parameters.
    +//
    +// Path template syntax
    +//
    +//	Template = "/" Segments [ Verb ] ;
    +//	Segments = Segment { "/" Segment } ;
    +//	Segment  = "*" | "**" | LITERAL | Variable ;
    +//	Variable = "{" FieldPath [ "=" Segments ] "}" ;
    +//	FieldPath = IDENT { "." IDENT } ;
    +//	Verb     = ":" LITERAL ;
    +//
    +// The syntax `*` matches a single URL path segment. The syntax `**` matches
    +// zero or more URL path segments, which must be the last part of the URL path
    +// except the `Verb`.
    +//
    +// The syntax `Variable` matches part of the URL path as specified by its
    +// template. A variable template must not contain other variables. If a variable
    +// matches a single path segment, its template may be omitted, e.g. `{var}`
    +// is equivalent to `{var=*}`.
    +//
    +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
    +// contains any reserved character, such characters should be percent-encoded
    +// before the matching.
    +//
    +// If a variable contains exactly one path segment, such as `"{var}"` or
    +// `"{var=*}"`, when such a variable is expanded into a URL path on the client
    +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
    +// server side does the reverse decoding. Such variables show up in the
    +// [Discovery
    +// Document](https://developers.google.com/discovery/v1/reference/apis) as
    +// `{var}`.
    +//
    +// If a variable contains multiple path segments, such as `"{var=foo/*}"`
    +// or `"{var=**}"`, when such a variable is expanded into a URL path on the
    +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
    +// The server side does the reverse decoding, except "%2F" and "%2f" are left
    +// unchanged. Such variables show up in the
    +// [Discovery
    +// Document](https://developers.google.com/discovery/v1/reference/apis) as
    +// `{+var}`.
    +//
    +// # Using gRPC API Service Configuration
    +//
    +// gRPC API Service Configuration (service config) is a configuration language
    +// for configuring a gRPC service to become a user-facing product. The
    +// service config is simply the YAML representation of the `google.api.Service`
    +// proto message.
    +//
    +// As an alternative to annotating your proto file, you can configure gRPC
    +// transcoding in your service config YAML files. You do this by specifying a
    +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
    +// effect as the proto annotation. This can be particularly useful if you
    +// have a proto that is reused in multiple services. Note that any transcoding
    +// specified in the service config will override any matching transcoding
    +// configuration in the proto.
    +//
    +// The following example selects a gRPC method and applies an `HttpRule` to it:
    +//
    +//	http:
    +//	  rules:
    +//	    - selector: example.v1.Messaging.GetMessage
    +//	      get: /v1/messages/{message_id}/{sub.subfield}
    +//
    +// # Special notes
    +//
    +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
    +// proto to JSON conversion must follow the [proto3
    +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
    +//
    +// While the single segment variable follows the semantics of
    +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
    +// Expansion, the multi segment variable **does not** follow RFC 6570 Section
    +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
    +// does not expand special characters like `?` and `#`, which would lead
    +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
    +// for multi segment variables.
    +//
    +// The path variables **must not** refer to any repeated or mapped field,
    +// because client libraries are not capable of handling such variable expansion.
    +//
    +// The path variables **must not** capture the leading "/" character. The reason
    +// is that the most common use case "{var}" does not capture the leading "/"
    +// character. For consistency, all path variables must share the same behavior.
    +//
    +// Repeated message fields must not be mapped to URL query parameters, because
    +// no client library can support such complicated mapping.
    +//
    +// If an API needs to use a JSON array for request or response body, it can map
    +// the request or response body to a repeated field. However, some gRPC
    +// Transcoding implementations may not support this feature.
    +type HttpRule struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// Selects a method to which this rule applies.
    +	//
    +	// Refer to [selector][google.api.DocumentationRule.selector] for syntax
    +	// details.
    +	Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
    +	// Determines the URL pattern is matched by this rules. This pattern can be
    +	// used with any of the {get|put|post|delete|patch} methods. A custom method
    +	// can be defined using the 'custom' field.
    +	//
    +	// Types that are assignable to Pattern:
    +	//
    +	//	*HttpRule_Get
    +	//	*HttpRule_Put
    +	//	*HttpRule_Post
    +	//	*HttpRule_Delete
    +	//	*HttpRule_Patch
    +	//	*HttpRule_Custom
    +	Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
    +	// The name of the request field whose value is mapped to the HTTP request
    +	// body, or `*` for mapping all request fields not captured by the path
    +	// pattern to the HTTP body, or omitted for not having any HTTP request body.
    +	//
    +	// NOTE: the referred field must be present at the top-level of the request
    +	// message type.
    +	Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
    +	// Optional. The name of the response field whose value is mapped to the HTTP
    +	// response body. When omitted, the entire response message will be used
    +	// as the HTTP response body.
    +	//
    +	// NOTE: The referred field must be present at the top-level of the response
    +	// message type.
    +	ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
    +	// Additional HTTP bindings for the selector. Nested bindings must
    +	// not contain an `additional_bindings` field themselves (that is,
    +	// the nesting may only be one level deep).
    +	AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
    +}
    +
    +func (x *HttpRule) Reset() {
    +	*x = HttpRule{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_http_proto_msgTypes[1]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *HttpRule) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*HttpRule) ProtoMessage() {}
    +
    +func (x *HttpRule) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_http_proto_msgTypes[1]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead.
    +func (*HttpRule) Descriptor() ([]byte, []int) {
    +	return file_google_api_http_proto_rawDescGZIP(), []int{1}
    +}
    +
    +func (x *HttpRule) GetSelector() string {
    +	if x != nil {
    +		return x.Selector
    +	}
    +	return ""
    +}
    +
    +func (m *HttpRule) GetPattern() isHttpRule_Pattern {
    +	if m != nil {
    +		return m.Pattern
    +	}
    +	return nil
    +}
    +
    +func (x *HttpRule) GetGet() string {
    +	if x, ok := x.GetPattern().(*HttpRule_Get); ok {
    +		return x.Get
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetPut() string {
    +	if x, ok := x.GetPattern().(*HttpRule_Put); ok {
    +		return x.Put
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetPost() string {
    +	if x, ok := x.GetPattern().(*HttpRule_Post); ok {
    +		return x.Post
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetDelete() string {
    +	if x, ok := x.GetPattern().(*HttpRule_Delete); ok {
    +		return x.Delete
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetPatch() string {
    +	if x, ok := x.GetPattern().(*HttpRule_Patch); ok {
    +		return x.Patch
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetCustom() *CustomHttpPattern {
    +	if x, ok := x.GetPattern().(*HttpRule_Custom); ok {
    +		return x.Custom
    +	}
    +	return nil
    +}
    +
    +func (x *HttpRule) GetBody() string {
    +	if x != nil {
    +		return x.Body
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetResponseBody() string {
    +	if x != nil {
    +		return x.ResponseBody
    +	}
    +	return ""
    +}
    +
    +func (x *HttpRule) GetAdditionalBindings() []*HttpRule {
    +	if x != nil {
    +		return x.AdditionalBindings
    +	}
    +	return nil
    +}
    +
    +type isHttpRule_Pattern interface {
    +	isHttpRule_Pattern()
    +}
    +
    +type HttpRule_Get struct {
    +	// Maps to HTTP GET. Used for listing and getting information about
    +	// resources.
    +	Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
    +}
    +
    +type HttpRule_Put struct {
    +	// Maps to HTTP PUT. Used for replacing a resource.
    +	Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
    +}
    +
    +type HttpRule_Post struct {
    +	// Maps to HTTP POST. Used for creating a resource or performing an action.
    +	Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
    +}
    +
    +type HttpRule_Delete struct {
    +	// Maps to HTTP DELETE. Used for deleting a resource.
    +	Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
    +}
    +
    +type HttpRule_Patch struct {
    +	// Maps to HTTP PATCH. Used for updating a resource.
    +	Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
    +}
    +
    +type HttpRule_Custom struct {
    +	// The custom pattern is used for specifying an HTTP method that is not
    +	// included in the `pattern` field, such as HEAD, or "*" to leave the
    +	// HTTP method unspecified for this rule. The wild-card rule is useful
    +	// for services that provide content to Web (HTML) clients.
    +	Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
    +}
    +
    +func (*HttpRule_Get) isHttpRule_Pattern() {}
    +
    +func (*HttpRule_Put) isHttpRule_Pattern() {}
    +
    +func (*HttpRule_Post) isHttpRule_Pattern() {}
    +
    +func (*HttpRule_Delete) isHttpRule_Pattern() {}
    +
    +func (*HttpRule_Patch) isHttpRule_Pattern() {}
    +
    +func (*HttpRule_Custom) isHttpRule_Pattern() {}
    +
    +// A custom pattern is used for defining custom HTTP verb.
    +type CustomHttpPattern struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The name of this custom HTTP verb.
    +	Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
    +	// The path matched by this custom verb.
    +	Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
    +}
    +
    +func (x *CustomHttpPattern) Reset() {
    +	*x = CustomHttpPattern{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_http_proto_msgTypes[2]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *CustomHttpPattern) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*CustomHttpPattern) ProtoMessage() {}
    +
    +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_http_proto_msgTypes[2]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead.
    +func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
    +	return file_google_api_http_proto_rawDescGZIP(), []int{2}
    +}
    +
    +func (x *CustomHttpPattern) GetKind() string {
    +	if x != nil {
    +		return x.Kind
    +	}
    +	return ""
    +}
    +
    +func (x *CustomHttpPattern) GetPath() string {
    +	if x != nil {
    +		return x.Path
    +	}
    +	return ""
    +}
    +
    +var File_google_api_http_proto protoreflect.FileDescriptor
    +
    +var file_google_api_http_proto_rawDesc = []byte{
    +	0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
    +	0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72,
    +	0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65,
    +	0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79,
    +	0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
    +	0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
    +	0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
    +	0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda,
    +	0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73,
    +	0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73,
    +	0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02,
    +	0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70,
    +	0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12,
    +	0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
    +	0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18,
    +	0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
    +	0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
    +	0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
    +	0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50,
    +	0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
    +	0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
    +	0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
    +	0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
    +	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64,
    +	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
    +	0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64,
    +	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
    +	0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43,
    +	0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
    +	0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
    +	0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x67, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
    +	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
    +	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
    +	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
    +	0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
    +	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50,
    +	0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var (
    +	file_google_api_http_proto_rawDescOnce sync.Once
    +	file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc
    +)
    +
    +func file_google_api_http_proto_rawDescGZIP() []byte {
    +	file_google_api_http_proto_rawDescOnce.Do(func() {
    +		file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData)
    +	})
    +	return file_google_api_http_proto_rawDescData
    +}
    +
    +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
    +var file_google_api_http_proto_goTypes = []interface{}{
    +	(*Http)(nil),              // 0: google.api.Http
    +	(*HttpRule)(nil),          // 1: google.api.HttpRule
    +	(*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern
    +}
    +var file_google_api_http_proto_depIdxs = []int32{
    +	1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule
    +	2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern
    +	1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule
    +	3, // [3:3] is the sub-list for method output_type
    +	3, // [3:3] is the sub-list for method input_type
    +	3, // [3:3] is the sub-list for extension type_name
    +	3, // [3:3] is the sub-list for extension extendee
    +	0, // [0:3] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_http_proto_init() }
    +func file_google_api_http_proto_init() {
    +	if File_google_api_http_proto != nil {
    +		return
    +	}
    +	if !protoimpl.UnsafeEnabled {
    +		file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*Http); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*HttpRule); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*CustomHttpPattern); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +	}
    +	file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{
    +		(*HttpRule_Get)(nil),
    +		(*HttpRule_Put)(nil),
    +		(*HttpRule_Post)(nil),
    +		(*HttpRule_Delete)(nil),
    +		(*HttpRule_Patch)(nil),
    +		(*HttpRule_Custom)(nil),
    +	}
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_http_proto_rawDesc,
    +			NumEnums:      0,
    +			NumMessages:   3,
    +			NumExtensions: 0,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_http_proto_goTypes,
    +		DependencyIndexes: file_google_api_http_proto_depIdxs,
    +		MessageInfos:      file_google_api_http_proto_msgTypes,
    +	}.Build()
    +	File_google_api_http_proto = out.File
    +	file_google_api_http_proto_rawDesc = nil
    +	file_google_api_http_proto_goTypes = nil
    +	file_google_api_http_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
    new file mode 100644
    index 000000000..175974a86
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
    @@ -0,0 +1,659 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/resource.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// A description of the historical or future-looking state of the
    +// resource pattern.
    +type ResourceDescriptor_History int32
    +
    +const (
    +	// The "unset" value.
    +	ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0
    +	// The resource originally had one pattern and launched as such, and
    +	// additional patterns were added later.
    +	ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1
    +	// The resource has one pattern, but the API owner expects to add more
    +	// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
    +	// that from being necessary once there are multiple patterns.)
    +	ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2
    +)
    +
    +// Enum value maps for ResourceDescriptor_History.
    +var (
    +	ResourceDescriptor_History_name = map[int32]string{
    +		0: "HISTORY_UNSPECIFIED",
    +		1: "ORIGINALLY_SINGLE_PATTERN",
    +		2: "FUTURE_MULTI_PATTERN",
    +	}
    +	ResourceDescriptor_History_value = map[string]int32{
    +		"HISTORY_UNSPECIFIED":       0,
    +		"ORIGINALLY_SINGLE_PATTERN": 1,
    +		"FUTURE_MULTI_PATTERN":      2,
    +	}
    +)
    +
    +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History {
    +	p := new(ResourceDescriptor_History)
    +	*p = x
    +	return p
    +}
    +
    +func (x ResourceDescriptor_History) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_resource_proto_enumTypes[0].Descriptor()
    +}
    +
    +func (ResourceDescriptor_History) Type() protoreflect.EnumType {
    +	return &file_google_api_resource_proto_enumTypes[0]
    +}
    +
    +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use ResourceDescriptor_History.Descriptor instead.
    +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0}
    +}
    +
    +// A flag representing a specific style that a resource claims to conform to.
    +type ResourceDescriptor_Style int32
    +
    +const (
    +	// The unspecified value. Do not use.
    +	ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0
    +	// This resource is intended to be "declarative-friendly".
    +	//
    +	// Declarative-friendly resources must be more strictly consistent, and
    +	// setting this to true communicates to tools that this resource should
    +	// adhere to declarative-friendly expectations.
    +	//
    +	// Note: This is used by the API linter (linter.aip.dev) to enable
    +	// additional checks.
    +	ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1
    +)
    +
    +// Enum value maps for ResourceDescriptor_Style.
    +var (
    +	ResourceDescriptor_Style_name = map[int32]string{
    +		0: "STYLE_UNSPECIFIED",
    +		1: "DECLARATIVE_FRIENDLY",
    +	}
    +	ResourceDescriptor_Style_value = map[string]int32{
    +		"STYLE_UNSPECIFIED":    0,
    +		"DECLARATIVE_FRIENDLY": 1,
    +	}
    +)
    +
    +func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style {
    +	p := new(ResourceDescriptor_Style)
    +	*p = x
    +	return p
    +}
    +
    +func (x ResourceDescriptor_Style) String() string {
    +	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
    +}
    +
    +func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor {
    +	return file_google_api_resource_proto_enumTypes[1].Descriptor()
    +}
    +
    +func (ResourceDescriptor_Style) Type() protoreflect.EnumType {
    +	return &file_google_api_resource_proto_enumTypes[1]
    +}
    +
    +func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber {
    +	return protoreflect.EnumNumber(x)
    +}
    +
    +// Deprecated: Use ResourceDescriptor_Style.Descriptor instead.
    +func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) {
    +	return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1}
    +}
    +
    +// A simple descriptor of a resource type.
    +//
    +// ResourceDescriptor annotates a resource message (either by means of a
    +// protobuf annotation or use in the service config), and associates the
    +// resource's schema, the resource type, and the pattern of the resource name.
    +//
    +// Example:
    +//
    +//	message Topic {
    +//	  // Indicates this message defines a resource schema.
    +//	  // Declares the resource type in the format of {service}/{kind}.
    +//	  // For Kubernetes resources, the format is {api group}/{kind}.
    +//	  option (google.api.resource) = {
    +//	    type: "pubsub.googleapis.com/Topic"
    +//	    pattern: "projects/{project}/topics/{topic}"
    +//	  };
    +//	}
    +//
    +// The ResourceDescriptor Yaml config will look like:
    +//
    +//	resources:
    +//	- type: "pubsub.googleapis.com/Topic"
    +//	  pattern: "projects/{project}/topics/{topic}"
    +//
    +// Sometimes, resources have multiple patterns, typically because they can
    +// live under multiple parents.
    +//
    +// Example:
    +//
    +//	message LogEntry {
    +//	  option (google.api.resource) = {
    +//	    type: "logging.googleapis.com/LogEntry"
    +//	    pattern: "projects/{project}/logs/{log}"
    +//	    pattern: "folders/{folder}/logs/{log}"
    +//	    pattern: "organizations/{organization}/logs/{log}"
    +//	    pattern: "billingAccounts/{billing_account}/logs/{log}"
    +//	  };
    +//	}
    +//
    +// The ResourceDescriptor Yaml config will look like:
    +//
    +//	resources:
    +//	- type: 'logging.googleapis.com/LogEntry'
    +//	  pattern: "projects/{project}/logs/{log}"
    +//	  pattern: "folders/{folder}/logs/{log}"
    +//	  pattern: "organizations/{organization}/logs/{log}"
    +//	  pattern: "billingAccounts/{billing_account}/logs/{log}"
    +type ResourceDescriptor struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The resource type. It must be in the format of
    +	// {service_name}/{resource_type_kind}. The `resource_type_kind` must be
    +	// singular and must not include version numbers.
    +	//
    +	// Example: `storage.googleapis.com/Bucket`
    +	//
    +	// The value of the resource_type_kind must follow the regular expression
    +	// /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
    +	// should use PascalCase (UpperCamelCase). The maximum number of
    +	// characters allowed for the `resource_type_kind` is 100.
    +	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
    +	// Optional. The relative resource name pattern associated with this resource
    +	// type. The DNS prefix of the full resource name shouldn't be specified here.
    +	//
    +	// The path pattern must follow the syntax, which aligns with HTTP binding
    +	// syntax:
    +	//
    +	//	Template = Segment { "/" Segment } ;
    +	//	Segment = LITERAL | Variable ;
    +	//	Variable = "{" LITERAL "}" ;
    +	//
    +	// Examples:
    +	//
    +	//   - "projects/{project}/topics/{topic}"
    +	//   - "projects/{project}/knowledgeBases/{knowledge_base}"
    +	//
    +	// The components in braces correspond to the IDs for each resource in the
    +	// hierarchy. It is expected that, if multiple patterns are provided,
    +	// the same component name (e.g. "project") refers to IDs of the same
    +	// type of resource.
    +	Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"`
    +	// Optional. The field on the resource that designates the resource name
    +	// field. If omitted, this is assumed to be "name".
    +	NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"`
    +	// Optional. The historical or future-looking state of the resource pattern.
    +	//
    +	// Example:
    +	//
    +	//	// The InspectTemplate message originally only supported resource
    +	//	// names with organization, and project was added later.
    +	//	message InspectTemplate {
    +	//	  option (google.api.resource) = {
    +	//	    type: "dlp.googleapis.com/InspectTemplate"
    +	//	    pattern:
    +	//	    "organizations/{organization}/inspectTemplates/{inspect_template}"
    +	//	    pattern: "projects/{project}/inspectTemplates/{inspect_template}"
    +	//	    history: ORIGINALLY_SINGLE_PATTERN
    +	//	  };
    +	//	}
    +	History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
    +	// The plural name used in the resource name and permission names, such as
    +	// 'projects' for the resource name of 'projects/{project}' and the permission
    +	// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
    +	// to this is for Nested Collections that have stuttering names, as defined
    +	// in [AIP-122](https://google.aip.dev/122#nested-collections), where the
    +	// collection ID in the resource name pattern does not necessarily directly
    +	// match the `plural` value.
    +	//
    +	// It is the same concept of the `plural` field in k8s CRD spec
    +	// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
    +	//
    +	// Note: The plural form is required even for singleton resources. See
    +	// https://aip.dev/156
    +	Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"`
    +	// The same concept of the `singular` field in k8s CRD spec
    +	// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
    +	// Such as "project" for the `resourcemanager.googleapis.com/Project` type.
    +	Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"`
    +	// Style flag(s) for this resource.
    +	// These indicate that a resource is expected to conform to a given
    +	// style. See the specific style flags for additional information.
    +	Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"`
    +}
    +
    +func (x *ResourceDescriptor) Reset() {
    +	*x = ResourceDescriptor{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_resource_proto_msgTypes[0]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *ResourceDescriptor) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*ResourceDescriptor) ProtoMessage() {}
    +
    +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_resource_proto_msgTypes[0]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead.
    +func (*ResourceDescriptor) Descriptor() ([]byte, []int) {
    +	return file_google_api_resource_proto_rawDescGZIP(), []int{0}
    +}
    +
    +func (x *ResourceDescriptor) GetType() string {
    +	if x != nil {
    +		return x.Type
    +	}
    +	return ""
    +}
    +
    +func (x *ResourceDescriptor) GetPattern() []string {
    +	if x != nil {
    +		return x.Pattern
    +	}
    +	return nil
    +}
    +
    +func (x *ResourceDescriptor) GetNameField() string {
    +	if x != nil {
    +		return x.NameField
    +	}
    +	return ""
    +}
    +
    +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History {
    +	if x != nil {
    +		return x.History
    +	}
    +	return ResourceDescriptor_HISTORY_UNSPECIFIED
    +}
    +
    +func (x *ResourceDescriptor) GetPlural() string {
    +	if x != nil {
    +		return x.Plural
    +	}
    +	return ""
    +}
    +
    +func (x *ResourceDescriptor) GetSingular() string {
    +	if x != nil {
    +		return x.Singular
    +	}
    +	return ""
    +}
    +
    +func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style {
    +	if x != nil {
    +		return x.Style
    +	}
    +	return nil
    +}
    +
    +// Defines a proto annotation that describes a string field that refers to
    +// an API resource.
    +type ResourceReference struct {
    +	state         protoimpl.MessageState
    +	sizeCache     protoimpl.SizeCache
    +	unknownFields protoimpl.UnknownFields
    +
    +	// The resource type that the annotated field references.
    +	//
    +	// Example:
    +	//
    +	//	message Subscription {
    +	//	  string topic = 2 [(google.api.resource_reference) = {
    +	//	    type: "pubsub.googleapis.com/Topic"
    +	//	  }];
    +	//	}
    +	//
    +	// Occasionally, a field may reference an arbitrary resource. In this case,
    +	// APIs use the special value * in their resource reference.
    +	//
    +	// Example:
    +	//
    +	//	message GetIamPolicyRequest {
    +	//	  string resource = 2 [(google.api.resource_reference) = {
    +	//	    type: "*"
    +	//	  }];
    +	//	}
    +	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
    +	// The resource type of a child collection that the annotated field
    +	// references. This is useful for annotating the `parent` field that
    +	// doesn't have a fixed resource type.
    +	//
    +	// Example:
    +	//
    +	//	message ListLogEntriesRequest {
    +	//	  string parent = 1 [(google.api.resource_reference) = {
    +	//	    child_type: "logging.googleapis.com/LogEntry"
    +	//	  };
    +	//	}
    +	ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"`
    +}
    +
    +func (x *ResourceReference) Reset() {
    +	*x = ResourceReference{}
    +	if protoimpl.UnsafeEnabled {
    +		mi := &file_google_api_resource_proto_msgTypes[1]
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		ms.StoreMessageInfo(mi)
    +	}
    +}
    +
    +func (x *ResourceReference) String() string {
    +	return protoimpl.X.MessageStringOf(x)
    +}
    +
    +func (*ResourceReference) ProtoMessage() {}
    +
    +func (x *ResourceReference) ProtoReflect() protoreflect.Message {
    +	mi := &file_google_api_resource_proto_msgTypes[1]
    +	if protoimpl.UnsafeEnabled && x != nil {
    +		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
    +		if ms.LoadMessageInfo() == nil {
    +			ms.StoreMessageInfo(mi)
    +		}
    +		return ms
    +	}
    +	return mi.MessageOf(x)
    +}
    +
    +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead.
    +func (*ResourceReference) Descriptor() ([]byte, []int) {
    +	return file_google_api_resource_proto_rawDescGZIP(), []int{1}
    +}
    +
    +func (x *ResourceReference) GetType() string {
    +	if x != nil {
    +		return x.Type
    +	}
    +	return ""
    +}
    +
    +func (x *ResourceReference) GetChildType() string {
    +	if x != nil {
    +		return x.ChildType
    +	}
    +	return ""
    +}
    +
    +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{
    +	{
    +		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
    +		ExtensionType: (*ResourceReference)(nil),
    +		Field:         1055,
    +		Name:          "google.api.resource_reference",
    +		Tag:           "bytes,1055,opt,name=resource_reference",
    +		Filename:      "google/api/resource.proto",
    +	},
    +	{
    +		ExtendedType:  (*descriptorpb.FileOptions)(nil),
    +		ExtensionType: ([]*ResourceDescriptor)(nil),
    +		Field:         1053,
    +		Name:          "google.api.resource_definition",
    +		Tag:           "bytes,1053,rep,name=resource_definition",
    +		Filename:      "google/api/resource.proto",
    +	},
    +	{
    +		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
    +		ExtensionType: (*ResourceDescriptor)(nil),
    +		Field:         1053,
    +		Name:          "google.api.resource",
    +		Tag:           "bytes,1053,opt,name=resource",
    +		Filename:      "google/api/resource.proto",
    +	},
    +}
    +
    +// Extension fields to descriptorpb.FieldOptions.
    +var (
    +	// An annotation that describes a resource reference, see
    +	// [ResourceReference][].
    +	//
    +	// optional google.api.ResourceReference resource_reference = 1055;
    +	E_ResourceReference = &file_google_api_resource_proto_extTypes[0]
    +)
    +
    +// Extension fields to descriptorpb.FileOptions.
    +var (
    +	// An annotation that describes a resource definition without a corresponding
    +	// message; see [ResourceDescriptor][].
    +	//
    +	// repeated google.api.ResourceDescriptor resource_definition = 1053;
    +	E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1]
    +)
    +
    +// Extension fields to descriptorpb.MessageOptions.
    +var (
    +	// An annotation that describes a resource definition, see
    +	// [ResourceDescriptor][].
    +	//
    +	// optional google.api.ResourceDescriptor resource = 1053;
    +	E_Resource = &file_google_api_resource_proto_extTypes[2]
    +)
    +
    +var File_google_api_resource_proto protoreflect.FileDescriptor
    +
    +var file_google_api_resource_proto_rawDesc = []byte{
    +	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73,
    +	0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
    +	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
    +	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    +	0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
    +	0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18,
    +	0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d,
    +	0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01,
    +	0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a,
    +	0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
    +	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f,
    +	0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48,
    +	0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12,
    +	0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
    +	0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
    +	0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
    +	0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03,
    +	0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
    +	0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
    +	0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22,
    +	0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49,
    +	0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
    +	0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c,
    +	0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e,
    +	0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c,
    +	0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05,
    +	0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55,
    +	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14,
    +	0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45,
    +	0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
    +	0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74,
    +	0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
    +	0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
    +	0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c,
    +	0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72,
    +	0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
    +	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
    +	0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
    +	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
    +	0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75,
    +	0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13,
    +	0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
    +	0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
    +	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
    +	0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
    +	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
    +	0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08,
    +	0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
    +	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
    +	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b,
    +	0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
    +	0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6b, 0x0a, 0x0e, 0x63, 0x6f,
    +	0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65,
    +	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67,
    +	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
    +	0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
    +	0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
    +	0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
    +	0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
    +}
    +
    +var (
    +	file_google_api_resource_proto_rawDescOnce sync.Once
    +	file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc
    +)
    +
    +func file_google_api_resource_proto_rawDescGZIP() []byte {
    +	file_google_api_resource_proto_rawDescOnce.Do(func() {
    +		file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData)
    +	})
    +	return file_google_api_resource_proto_rawDescData
    +}
    +
    +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
    +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
    +var file_google_api_resource_proto_goTypes = []interface{}{
    +	(ResourceDescriptor_History)(0),     // 0: google.api.ResourceDescriptor.History
    +	(ResourceDescriptor_Style)(0),       // 1: google.api.ResourceDescriptor.Style
    +	(*ResourceDescriptor)(nil),          // 2: google.api.ResourceDescriptor
    +	(*ResourceReference)(nil),           // 3: google.api.ResourceReference
    +	(*descriptorpb.FieldOptions)(nil),   // 4: google.protobuf.FieldOptions
    +	(*descriptorpb.FileOptions)(nil),    // 5: google.protobuf.FileOptions
    +	(*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions
    +}
    +var file_google_api_resource_proto_depIdxs = []int32{
    +	0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History
    +	1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style
    +	4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions
    +	5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions
    +	6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions
    +	3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference
    +	2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor
    +	2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor
    +	8, // [8:8] is the sub-list for method output_type
    +	8, // [8:8] is the sub-list for method input_type
    +	5, // [5:8] is the sub-list for extension type_name
    +	2, // [2:5] is the sub-list for extension extendee
    +	0, // [0:2] is the sub-list for field type_name
    +}
    +
    +func init() { file_google_api_resource_proto_init() }
    +func file_google_api_resource_proto_init() {
    +	if File_google_api_resource_proto != nil {
    +		return
    +	}
    +	if !protoimpl.UnsafeEnabled {
    +		file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*ResourceDescriptor); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +		file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
    +			switch v := v.(*ResourceReference); i {
    +			case 0:
    +				return &v.state
    +			case 1:
    +				return &v.sizeCache
    +			case 2:
    +				return &v.unknownFields
    +			default:
    +				return nil
    +			}
    +		}
    +	}
    +	type x struct{}
    +	out := protoimpl.TypeBuilder{
    +		File: protoimpl.DescBuilder{
    +			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
    +			RawDescriptor: file_google_api_resource_proto_rawDesc,
    +			NumEnums:      2,
    +			NumMessages:   2,
    +			NumExtensions: 3,
    +			NumServices:   0,
    +		},
    +		GoTypes:           file_google_api_resource_proto_goTypes,
    +		DependencyIndexes: file_google_api_resource_proto_depIdxs,
    +		EnumInfos:         file_google_api_resource_proto_enumTypes,
    +		MessageInfos:      file_google_api_resource_proto_msgTypes,
    +		ExtensionInfos:    file_google_api_resource_proto_extTypes,
    +	}.Build()
    +	File_google_api_resource_proto = out.File
    +	file_google_api_resource_proto_rawDesc = nil
    +	file_google_api_resource_proto_goTypes = nil
    +	file_google_api_resource_proto_depIdxs = nil
    +}
    diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
    new file mode 100644
    index 000000000..b8c4aa71f
    --- /dev/null
    +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
    @@ -0,0 +1,693 @@
    +// Copyright 2025 Google LLC
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Code generated by protoc-gen-go. DO NOT EDIT.
    +// versions:
    +// 	protoc-gen-go v1.26.0
    +// 	protoc        v4.24.4
    +// source: google/api/routing.proto
    +
    +package annotations
    +
    +import (
    +	reflect "reflect"
    +	sync "sync"
    +
    +	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
    +	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
    +	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
    +)
    +
    +const (
    +	// Verify that this generated code is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
    +	// Verify that runtime/protoimpl is sufficiently up-to-date.
    +	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
    +)
    +
    +// Specifies the routing information that should be sent along with the request
    +// in the form of routing header.
    +// **NOTE:** All service configuration rules follow the "last one wins" order.
    +//
    +// The examples below will apply to an RPC which has the following request type:
    +//
    +// Message Definition:
    +//
    +//	message Request {
    +//	  // The name of the Table
    +//	  // Values can be of the following formats:
    +//	  // - `projects//tables/`
    +//	  // - `projects//instances//tables/
    ` +// // - `region//zones//tables/
    ` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } +// +// Example message: +// +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } +// +// The routing header consists of one or multiple key-value pairs. Every key +// and value must be percent-encoded, and joined together in the format of +// `key1=value1&key2=value2`. +// The examples below skip the percent-encoding for readability. +// +// # Example 1 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key equal to the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; +// +// result: +// +// x-goog-request-params: app_profile_id=profiles/prof_qux +// +// # Example 2 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key different from the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 3 +// +// Extracting a field from the request to put into the routing +// header, while matching a path template syntax on the field's value. +// +// NB: it is more useful to send nothing than to send garbage for the purpose +// of dynamic routing, since garbage pollutes cache. Thus the matching. +// +// # Sub-example 3a +// +// The field matches the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Sub-example 3b +// +// The field does not match the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; +// +// result: +// +// +// +// # Sub-example 3c +// +// Multiple alternative conflictingly named path templates are +// specified. The one that matches is used to construct the header. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Example 4 +// +// Extracting a single routing header key-value pair by matching a +// template syntax on (a part of) a single request field. +// +// annotation: +// +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=projects/proj_foo +// +// # Example 5 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on (parts of) a single request +// field. The last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar +// +// # Example 6 +// +// Extracting multiple routing header key-value pairs by matching +// several non-conflicting path templates on (parts of) a single request field. +// +// # Sub-example 6a +// +// Make the templates strict, so that if the `table_name` does not +// have an instance information, nothing is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Sub-example 6b +// +// Make the templates loose, so that if the `table_name` does not +// have an instance information, just the project id part is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result (is the same as 6a for our example message because it has the instance +// information): +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Example 7 +// +// Extracting multiple routing header key-value pairs by matching +// several path templates on multiple request fields. +// +// NB: note that here there is no way to specify sending nothing if one of the +// fields does not match its template. E.g. if the `table_name` is in the wrong +// format, the `project_id` will not be sent, but the `routing_id` will be. +// The backend routing code has to be aware of that and be prepared to not +// receive a full complement of keys if it expects multiple. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// +// # Example 8 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on several request fields. The +// last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 9 +// +// Bringing it all together. +// +// annotation: +// +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux +type RoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of Routing Parameter specifications. + // **NOTE:** If multiple Routing Parameters describe the same key + // (via the `path_template` field or via the `field` field when + // `path_template` is not provided), "last one wins" rule + // determines which Parameter gets used. + // See the examples for more details. + RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` +} + +func (x *RoutingRule) Reset() { + *x = RoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingRule) ProtoMessage() {} + +func (x *RoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. +func (*RoutingRule) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { + if x != nil { + return x.RoutingParameters + } + return nil +} + +// A projection from an input message to the GRPC or REST header. +type RoutingParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A request field to extract the header key-value pair from. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A pattern matching the key-value field. Optional. + // If not specified, the whole field specified in the `field` field will be + // taken as value, and its name used as key. If specified, it MUST contain + // exactly one named segment (along with any number of unnamed segments) The + // pattern will be matched over the field specified in the `field` field, then + // if the match is successful: + // - the name of the single named segment will be used as a header name, + // - the match value of the segment will be used as a header value; + // if the match is NOT successful, nothing will be sent. + // + // Example: + // + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. + // + // When looking at this specific example, we can see that: + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. + // + // **NB:** If the `path_template` field is not provided, the key name is + // equal to the field name, and the whole field should be sent as a value. + // This makes the pattern for the field and the value functionally equivalent + // to `**`, and the configuration + // + // { + // field: "table_name" + // } + // + // is a functionally equivalent shorthand to: + // + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } + // + // See Example 1 for more details. + PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` +} + +func (x *RoutingParameter) Reset() { + *x = RoutingParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingParameter) ProtoMessage() {} + +func (x *RoutingParameter) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. +func (*RoutingParameter) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *RoutingParameter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *RoutingParameter) GetPathTemplate() string { + if x != nil { + return x.PathTemplate + } + return "" +} + +var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingRule)(nil), + Field: 72295729, + Name: "google.api.routing", + Tag: "bytes,72295729,opt,name=routing", + Filename: "google/api/routing.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See RoutingRule. + // + // optional google.api.RoutingRule routing = 72295729; + E_Routing = &file_google_api_routing_proto_extTypes[0] +) + +var File_google_api_routing_proto protoreflect.FileDescriptor + +var file_google_api_routing_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, + 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_routing_proto_rawDescOnce sync.Once + file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc +) + +func file_google_api_routing_proto_rawDescGZIP() []byte { + file_google_api_routing_proto_rawDescOnce.Do(func() { + file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) + }) + return file_google_api_routing_proto_rawDescData +} + +var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_routing_proto_goTypes = []interface{}{ + (*RoutingRule)(nil), // 0: google.api.RoutingRule + (*RoutingParameter)(nil), // 1: google.api.RoutingParameter + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_google_api_routing_proto_depIdxs = []int32{ + 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter + 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions + 0, // 2: google.api.routing:type_name -> google.api.RoutingRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_routing_proto_init() } +func file_google_api_routing_proto_init() { + if File_google_api_routing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_routing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_routing_proto_goTypes, + DependencyIndexes: file_google_api_routing_proto_depIdxs, + MessageInfos: file_google_api_routing_proto_msgTypes, + ExtensionInfos: file_google_api_routing_proto_extTypes, + }.Build() + File_google_api_routing_proto = out.File + file_google_api_routing_proto_rawDesc = nil + file_google_api_routing_proto_goTypes = nil + file_google_api_routing_proto_depIdxs = nil +} diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go index 9f81dbcd8..af9c44d93 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go index 0a2ffb595..4b4f15477 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go index 57aaa2c9f..ef27e878b 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index c90c6015d..7b973217e 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go index 0a5ca6a1b..4ba3c7b2a 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3..d083dde3e 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/tools/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 000000000..a69c1d473 --- /dev/null +++ b/tools/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e5621827..e017ef071 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -702,6 +703,65 @@ type QuotaFailure_Violation struct { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"` + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"` + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"` + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"` + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -750,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string { return "" } +func (x *QuotaFailure_Violation) GetApiService() string { + if x != nil { + return x.ApiService + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaMetric() string { + if x != nil { + return x.QuotaMetric + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaId() string { + if x != nil { + return x.QuotaId + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string { + if x != nil { + return x.QuotaDimensions + } + return nil +} + +func (x *QuotaFailure_Violation) GetQuotaValue() int64 { + if x != nil { + return x.QuotaValue + } + return 0 +} + +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 { + if x != nil && x.FutureQuotaValue != nil { + return *x.FutureQuotaValue + } + return 0 +} + // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { state protoimpl.MessageState @@ -774,7 +876,7 @@ type PreconditionFailure_Violation struct { func (x *PreconditionFailure_Violation) Reset() { *x = PreconditionFailure_Violation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -787,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string { func (*PreconditionFailure_Violation) ProtoMessage() {} func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -870,12 +972,22 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { *x = BadRequest_FieldViolation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -888,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string { func (*BadRequest_FieldViolation) ProtoMessage() {} func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -918,6 +1030,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -933,7 +1059,7 @@ type Help_Link struct { func (x *Help_Link) Reset() { *x = Help_Link{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -946,7 +1072,7 @@ func (x *Help_Link) String() string { func (*Help_Link) ProtoMessage() {} func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1004,73 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19, + 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, + 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, + 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, + 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1085,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte { return file_google_rpc_error_details_proto_rawDescData } -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo (*RetryInfo)(nil), // 1: google.rpc.RetryInfo @@ -1099,23 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage nil, // 10: google.rpc.ErrorInfo.MetadataEntry (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 15: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_rpc_error_details_proto_depIdxs = []int32{ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } @@ -1256,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PreconditionFailure_Violation); i { case 0: return &v.state @@ -1268,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BadRequest_FieldViolation); i { case 0: return &v.state @@ -1280,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Help_Link); i { case 0: return &v.state @@ -1293,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() { } } } + file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_rpc_error_details_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 6ad1b1c1d..06a3f7106 100644 --- a/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md b/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md index d9bfa6e1e..1de0ce666 100644 --- a/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,73 +1,102 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! Please read the gRPC -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. +We welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance +rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before +proceeding. If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). +[Contributor License +Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create +your first PR, a link will be added as a comment that contains the steps needed +to complete this process. -## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. +## Getting Started -- Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. +A great way to start is by searching through our open issues. [Unassigned issues +labeled as "help +wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee) +are especially nice for first-time contributors, as they should be well-defined +problems that already have agreed-upon solutions. -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. +## Code Style -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. +We follow [Google's published Go style +guide](https://google.github.io/styleguide/go/). Note that there are three +primary documents that make up this style guide; please follow them as closely +as possible. If a reviewer recommends something that contradicts those +guidelines, there may be valid reasons to do so, but it should be rare. -- The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. +## Guidelines for Pull Requests -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). +How to get your contributions merged smoothly and quickly: + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often receive PRs that attempt to fix several things at the same + time, and if one part of the PR has a problem, that will hold up the entire + PR. + +- For **speculative changes**, consider opening an issue and discussing it + first. If you are suggesting a behavioral or API change, consider starting + with a [gRFC proposal](https://github.com/grpc/proposal). Many new features + that are not bug fixes will require cross-language agreement. + +- If you want to fix **formatting or style**, consider whether your changes are + an obvious improvement or might be considered a personal preference. If a + style change is based on preference, it likely will not be accepted. If it + corrects widely agreed-upon anti-patterns, then please do create a PR and + explain the benefits of the change. + +- For correcting **misspellings**, please be aware that we use some terms that + are sometimes flagged by spell checkers. As an example, "if an only if" is + often written as "iff". Please do not make spelling correction changes unless + you are certain they are misspellings. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. +- **All tests need to be passing** before your change can be merged. We + recommend you run tests locally before creating your PR to catch breakages + early on: -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). + - `./scripts/vet.sh` to catch vet errors. + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests. + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode. -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). + Note that we have a multi-module repo, so `go test` commands may need to be + run from the root of each module in order to cause all tests to run. -- **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. - - `./scripts/vet.sh` to catch vet errors - - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + *Alternatively*, you may find it easier to push your changes to your fork on + GitHub, which will trigger a GitHub Actions run that you can use to verify + everything is passing. + +- If you are adding a new file, make sure it has the **copyright message** + template at the top as a comment. You can copy the message from an existing + file and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. **If your contribution introduces new dependencies**, you will + need a discussion with gRPC-Go maintainers. A GitHub action check will run on + every PR, and will flag any transitive dependency changes from any public + package. + +- Unless your PR is trivial, you should **expect reviewer comments** that you + will need to address before merging. We'll label the PR as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale`, and we will automatically close it after 7 days if we don't hear back + from you. Please feel free to ping issues or bugs if you do not get a response + within a week. -- Exceptions to the rules can be made if there's a compelling reason for doing so. +- Exceptions to the rules can be made if there's a compelling reason to do so. diff --git a/tools/vendor/google.golang.org/grpc/MAINTAINERS.md b/tools/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46..df35bb9a8 100644 --- a/tools/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/tools/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/tools/vendor/google.golang.org/grpc/README.md b/tools/vendor/google.golang.org/grpc/README.md index b572707c6..f9a88d597 100644 --- a/tools/vendor/google.golang.org/grpc/README.md +++ b/tools/vendor/google.golang.org/grpc/README.md @@ -32,6 +32,7 @@ import "google.golang.org/grpc" - [Low-level technical docs](Documentation) from this repository - [Performance benchmark][] - [Examples](examples) +- [Contribution guidelines](CONTRIBUTING.md) ## FAQ diff --git a/tools/vendor/google.golang.org/grpc/balancer/balancer.go b/tools/vendor/google.golang.org/grpc/balancer/balancer.go index 3a2092f10..b1264017d 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/tools/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,57 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -191,6 +129,13 @@ type State struct { // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a +// ClientConn passed to Builder.Build() by embedding it in their +// implementations. An embedded ClientConn must never be nil, or runtime panics +// will occur. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. @@ -229,6 +174,17 @@ type ClientConn interface { // // Deprecated: Use the Target field in the BuildOptions instead. Target() string + + // MetricsRecorder provides the metrics recorder that balancers can use to + // record metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this method. The returned + // MetricsRecorder is guaranteed to never be nil. + MetricsRecorder() estats.MetricsRecorder + + // EnforceClientConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceClientConnEmbedding } // BuildOptions contains additional information for Build. @@ -260,10 +216,6 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target - // MetricsRecorder is the metrics recorder that balancers can use to record - // metrics. Balancer implementations which do not register metrics on - // metrics registry and record on them can ignore this field. - MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -408,6 +360,10 @@ type Balancer interface { // call SubConn.Shutdown for its existing SubConns; however, this will be // required in a future release, so it is recommended. Close() + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // ExitIdler is an optional interface for balancers to implement. If @@ -415,8 +371,8 @@ type Balancer interface { // the ClientConn is idle. If unimplemented, ClientConn.Connect will cause // all SubConns to connect. // -// Notice: it will be required for all balancers to implement this in a future -// release. +// Deprecated: All balancers must implement this interface. This interface will +// be removed in a future release. type ExitIdler interface { // ExitIdle instructs the LB policy to reconnect to backends / exit the // IDLE state, if appropriate and possible. Note that SubConns that enter @@ -424,18 +380,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -448,22 +392,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go b/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go index d5ed172ae..4d576876d 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[balancer.SubConn](), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -65,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[balancer.SubConn] scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() + addrsSet := resolver.NewAddressMapV2[any]() for _, a := range s.ResolverState.Addresses { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(a) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() { // Filter out all ready SCs from full subConn map. for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(addr) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/tools/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/tools/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go new file mode 100644 index 000000000..360db08eb --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -0,0 +1,389 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package endpointsharding implements a load balancing policy that manages +// homogeneous child policies each owning a single endpoint. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package endpointsharding + +import ( + "errors" + rand "math/rand/v2" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var randIntN = rand.IntN + +// ChildState is the balancer state of a child along with the endpoint which +// identifies the child balancer. +type ChildState struct { + Endpoint resolver.Endpoint + State balancer.State + + // Balancer exposes only the ExitIdler interface of the child LB policy. + // Other methods of the child policy are called only by endpointsharding. + Balancer ExitIdler +} + +// ExitIdler provides access to only the ExitIdle method of the child balancer. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// Options are the options to configure the behaviour of the +// endpointsharding balancer. +type Options struct { + // DisableAutoReconnect allows the balancer to keep child balancer in the + // IDLE state until they are explicitly triggered to exit using the + // ChildState obtained from the endpointsharding picker. When set to false, + // the endpointsharding balancer will automatically call ExitIdle on child + // connections that report IDLE. + DisableAutoReconnect bool +} + +// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same +// type as the balancer.Builder.Build method. +type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer + +// NewBalancer returns a load balancing policy that manages homogeneous child +// policies each owning a single endpoint. The endpointsharding balancer +// forwards the LoadBalancingConfig in ClientConn state updates to its children. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer { + es := &endpointSharding{ + cc: cc, + bOpts: opts, + esOpts: esOpts, + childBuilder: childBuilder, + } + es.children.Store(resolver.NewEndpointMap[*balancerWrapper]()) + return es +} + +// endpointSharding is a balancer that wraps child balancers. It creates a child +// balancer with child config for every unique Endpoint received. It updates the +// child states on any update from parent or child. +type endpointSharding struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + esOpts Options + childBuilder ChildBuilderFunc + + // childMu synchronizes calls to any single child. It must be held for all + // calls into a child. To avoid deadlocks, do not acquire childMu while + // holding mu. + childMu sync.Mutex + children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]] + + // inhibitChildUpdates is set during UpdateClientConnState/ResolverError + // calls (calls to children will each produce an update, only want one + // update). + inhibitChildUpdates atomic.Bool + + // mu synchronizes access to the state stored in balancerWrappers in the + // children field. mu must not be held during calls into a child since + // synchronous calls back from the child may require taking mu, causing a + // deadlock. To avoid deadlocks, do not acquire childMu while holding mu. + mu sync.Mutex +} + +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + +// UpdateClientConnState creates a child for new endpoints and deletes children +// for endpoints that are no longer present. It also updates all the children, +// and sends a single synchronous update of the childrens' aggregated state at +// the end of the UpdateClientConnState operation. If any endpoint has no +// addresses it will ignore that endpoint. Otherwise, returns first error found +// from a child, but fully processes the new update. +func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { + es.childMu.Lock() + defer es.childMu.Unlock() + + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + var ret error + + children := es.children.Load() + newChildren := resolver.NewEndpointMap[*balancerWrapper]() + + // Update/Create new children. + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { + if _, ok := newChildren.Get(endpoint); ok { + // Endpoint child was already created, continue to avoid duplicate + // update. + continue + } + childBalancer, ok := children.Get(endpoint) + if ok { + // Endpoint attributes may have changed, update the stored endpoint. + es.mu.Lock() + childBalancer.childState.Endpoint = endpoint + es.mu.Unlock() + } else { + childBalancer = &balancerWrapper{ + childState: ChildState{Endpoint: endpoint}, + ClientConn: es.cc, + es: es, + } + childBalancer.childState.Balancer = childBalancer + childBalancer.child = es.childBuilder(childBalancer, es.bOpts) + } + newChildren.Set(endpoint, childBalancer) + if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{ + BalancerConfig: state.BalancerConfig, + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{endpoint}, + Attributes: state.ResolverState.Attributes, + }, + }); err != nil && ret == nil { + // Return first error found, and always commit full processing of + // updating children. If desired to process more specific errors + // across all endpoints, caller should make these specific + // validations, this is a current limitation for simplicity sake. + ret = err + } + } + // Delete old children that are no longer present. + for _, e := range children.Keys() { + child, _ := children.Get(e) + if _, ok := newChildren.Get(e); !ok { + child.closeLocked() + } + } + es.children.Store(newChildren) + if newChildren.Len() == 0 { + return balancer.ErrBadResolverState + } + return ret +} + +// ResolverError forwards the resolver error to all of the endpointSharding's +// children and sends a single synchronous update of the childStates at the end +// of the ResolverError operation. +func (es *endpointSharding) ResolverError(err error) { + es.childMu.Lock() + defer es.childMu.Unlock() + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + children := es.children.Load() + for _, child := range children.Values() { + child.resolverErrorLocked(err) + } +} + +func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { + // UpdateSubConnState is deprecated. +} + +func (es *endpointSharding) Close() { + es.childMu.Lock() + defer es.childMu.Unlock() + children := es.children.Load() + for _, child := range children.Values() { + child.closeLocked() + } +} + +func (es *endpointSharding) ExitIdle() { + es.childMu.Lock() + defer es.childMu.Unlock() + for _, bw := range es.children.Load().Values() { + if !bw.isClosed { + bw.child.ExitIdle() + } + } +} + +// updateState updates this component's state. It sends the aggregated state, +// and a picker with round robin behavior with all the child states present if +// needed. +func (es *endpointSharding) updateState() { + if es.inhibitChildUpdates.Load() { + return + } + var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker + + es.mu.Lock() + defer es.mu.Unlock() + + children := es.children.Load() + childStates := make([]ChildState, 0, children.Len()) + + for _, child := range children.Values() { + childState := child.childState + childStates = append(childStates, childState) + childPicker := childState.State.Picker + switch childState.State.ConnectivityState { + case connectivity.Ready: + readyPickers = append(readyPickers, childPicker) + case connectivity.Connecting: + connectingPickers = append(connectingPickers, childPicker) + case connectivity.Idle: + idlePickers = append(idlePickers, childPicker) + case connectivity.TransientFailure: + transientFailurePickers = append(transientFailurePickers, childPicker) + // connectivity.Shutdown shouldn't appear. + } + } + + // Construct the round robin picker based off the aggregated state. Whatever + // the aggregated state, use the pickers present that are currently in that + // state only. + var aggState connectivity.State + var pickers []balancer.Picker + if len(readyPickers) >= 1 { + aggState = connectivity.Ready + pickers = readyPickers + } else if len(connectingPickers) >= 1 { + aggState = connectivity.Connecting + pickers = connectingPickers + } else if len(idlePickers) >= 1 { + aggState = connectivity.Idle + pickers = idlePickers + } else if len(transientFailurePickers) >= 1 { + aggState = connectivity.TransientFailure + pickers = transientFailurePickers + } else { + aggState = connectivity.TransientFailure + pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))} + } // No children (resolver error before valid update). + p := &pickerWithChildStates{ + pickers: pickers, + childStates: childStates, + next: uint32(randIntN(len(pickers))), + } + es.cc.UpdateState(balancer.State{ + ConnectivityState: aggState, + Picker: p, + }) +} + +// pickerWithChildStates delegates to the pickers it holds in a round robin +// fashion. It also contains the childStates of all the endpointSharding's +// children. +type pickerWithChildStates struct { + pickers []balancer.Picker + childStates []ChildState + next uint32 +} + +func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + nextIndex := atomic.AddUint32(&p.next, 1) + picker := p.pickers[nextIndex%uint32(len(p.pickers))] + return picker.Pick(info) +} + +// ChildStatesFromPicker returns the state of all the children managed by the +// endpoint sharding balancer that created this picker. +func ChildStatesFromPicker(picker balancer.Picker) []ChildState { + p, ok := picker.(*pickerWithChildStates) + if !ok { + return nil + } + return p.childStates +} + +// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by +// endpoint, and persists recent child balancer state. +type balancerWrapper struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + + // child contains the wrapped balancer. Access its methods only through + // methods on balancerWrapper to ensure proper synchronization + child balancer.Balancer + balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns + + es *endpointSharding + + // Access to the following fields is guarded by es.mu. + + childState ChildState + isClosed bool +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + bw.es.mu.Lock() + bw.childState.State = state + bw.es.mu.Unlock() + if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect { + bw.ExitIdle() + } + bw.es.updateState() +} + +// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to +// avoid deadlocks due to synchronous balancer state updates. +func (bw *balancerWrapper) ExitIdle() { + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + bw.child.ExitIdle() + } + bw.es.childMu.Unlock() + }() +} + +// updateClientConnStateLocked delivers the ClientConnState to the child +// balancer. Callers must hold the child mutex of the parent endpointsharding +// balancer. +func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error { + return bw.child.UpdateClientConnState(ccs) +} + +// closeLocked closes the child balancer. Callers must hold the child mutext of +// the parent endpointsharding balancer. +func (bw *balancerWrapper) closeLocked() { + bw.child.Close() + bw.isClosed = true +} + +func (bw *balancerWrapper) resolverErrorLocked(err error) { + bw.child.ResolverError(err) +} diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index c51978945..7d66cb491 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,7 +18,18 @@ // Package internal contains code internal to the pickfirst package. package internal -import "math/rand" +import ( + rand "math/rand/v2" + "time" +) -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index e069346a7..ea8899818 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 985b6edc7..67f315a0d 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,11 +29,15 @@ import ( "encoding/json" "errors" "fmt" + "net" + "net/netip" "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -50,26 +54,68 @@ func init() { balancer.Register(pickfirstBuilder{}) } +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. - Name = "pick_first_leaf" + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) ) -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -87,6 +133,13 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -104,14 +157,19 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - state connectivity.State - lastErr error + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { sd := &scData{ - state: connectivity.Idle, - addr: addr, + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -128,19 +186,25 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. - mu sync.Mutex + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } // ResolverError is called by the ClientConn when the name resolver produces @@ -166,7 +230,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -175,15 +239,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. - b.state = connectivity.TransientFailure b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -206,9 +271,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -231,16 +293,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) - if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } @@ -252,18 +315,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. - if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + b.forceUpdateConcludedStateLocked(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.requestConnectionLocked() + b.startFirstPassLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.requestConnectionLocked() + b.startFirstPassLocked() } return nil } @@ -278,6 +340,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() + b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -287,22 +350,31 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false } + b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { - sd.(*scData).subConn.Shutdown() + sd.subConn.Shutdown() } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() } // deDupAddresses ensures that each address appears only once in the slice. func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMap() + seenAddrs := resolver.NewAddressMapV2[*scData]() retAddrs := []resolver.Address{} for _, addr := range addrs { @@ -314,6 +386,70 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -324,7 +460,7 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { // This ensures that the subchannel map accurately reflects the current set of // addresses received from the name resolver. func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMap() + newAddrsMap := resolver.NewAddressMapV2[bool]() for _, addr := range newAddrs { newAddrsMap.Set(addr, true) } @@ -334,7 +470,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) continue } val, _ := b.subConns.Get(oldAddr) - val.(*scData).subConn.Shutdown() + val.subConn.Shutdown() b.subConns.Delete(oldAddr) } } @@ -342,13 +478,13 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - for _, v := range b.subConns.Values() { - sd := v.(*scData) + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() b.subConns.Set(selected.addr, selected) } @@ -381,47 +517,89 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.subConns.Set(curAddr, sd) } - scd := sd.(*scData) - switch scd.state { + switch sd.rawConnectivityState { case connectivity.Idle: - scd.subConn.Connect() + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return case connectivity.TransientFailure: - // Try the next address. - lastErr = scd.lastErr + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the SubConn to report success or failure. + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + } - return } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() - oldState := sd.state - sd.state = newState.ConnectivityState + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. - if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + if !b.isActiveSCData(sd) { return } if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -429,10 +607,30 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.state = connectivity.Ready - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) }) return } @@ -443,13 +641,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. - if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { // Once a transport fails, the balancer enters IDLE and starts from // the first address when the picker is used. b.shutdownRemainingLocked(sd) - b.state = connectivity.Idle + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -459,32 +668,35 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: - // The balancer can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. - // If the balancer is already in CONNECTING, no update is needed. - if b.state == connectivity.Idle { - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) } return } @@ -495,7 +707,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -509,24 +721,93 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { + return + } + } b.firstPass = false - b.numTF = 0 - b.state = connectivity.TransientFailure - - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, v := range b.subConns.Values() { - sd := v.(*scData) - if sd.state == connectivity.Idle { + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } } } +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + type picker struct { result balancer.PickResult err error @@ -583,15 +864,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - func (al *addressList) reset() { al.idx = 0 } @@ -614,12 +886,21 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider // fields that are meaningful to the SubConn. func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata + a.Attributes.Equal(b.Attributes) } diff --git a/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31..22045bf39 100644 --- a/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,13 @@ package roundrobin import ( - "math/rand" - "sync/atomic" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" ) // Name is the name of round_robin balancer. @@ -35,47 +36,37 @@ const Name = "round_robin" var logger = grpclog.Component("roundrobin") -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - func init() { - balancer.Register(newBuilder()) + balancer.Register(builder{}) } -type rrPickerBuilder struct{} +type builder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), - } +func (bb builder) Name() string { + return Name } -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 +func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + childBuilder := balancer.Get(pickfirstleaf.Name).Build + bal := &rrBalancer{ + cc: cc, + Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), + } + bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) + bal.logger.Infof("Created") + return bal } -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) +type rrBalancer struct { + balancer.Balancer + cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger +} - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil +func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + // Enable the health listener in pickfirst children for client side health + // checks and outlier detection, if configured. + ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + }) } diff --git a/tools/vendor/google.golang.org/grpc/balancer/subconn.go b/tools/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 000000000..9ee44d4af --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/tools/vendor/google.golang.org/grpc/balancer_wrapper.go b/tools/vendor/google.golang.org/grpc/balancer_wrapper.go index 2a4f2878a..948a21ef6 100644 --- a/tools/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/tools/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" @@ -34,7 +35,15 @@ import ( "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -51,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { + internal.EnforceClientConnEmbedding // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. cc *ClientConn @@ -84,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, - MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -93,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { return ccb } +func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder { + return ccb.cc.metricsRecorderList +} + // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. This is always executed from the serializer, so // it is safe to call into the balancer here. @@ -189,6 +202,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -254,12 +268,39 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -279,6 +320,24 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) }) } @@ -361,7 +420,7 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( } acbw.producersMu.Unlock() } - return pData.producer, grpcsync.OnceFunc(unref) + return pData.producer, sync.OnceFunc(unref) } func (acbw *acBalancerWrapper) closeProducers() { @@ -373,3 +432,89 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } + +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + registerFn := acbw.healthListenerRegFn() + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) + }) +} diff --git a/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77..b1364a032 100644 --- a/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.6 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -31,6 +31,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) { // Log entry we store in binary logs type GrpcLogEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The timestamp of the binary log message Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate @@ -255,7 +253,7 @@ type GrpcLogEntry struct { // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are assignable to Payload: + // Types that are valid to be assigned to Payload: // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader @@ -269,16 +267,16 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { return GrpcLogEntry_LOGGER_UNKNOWN } -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload +func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if x != nil { + return x.Payload } return nil } func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } } return nil } func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } } return nil } func (x *GrpcLogEntry) GetMessage() *Message { - if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Message); ok { + return x.Message + } } return nil } func (x *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } } return nil } @@ -418,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} type ClientHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -435,16 +438,16 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +458,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -499,21 +502,18 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration { } type ServerHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +524,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -547,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata { } type Trailer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -561,15 +558,15 @@ type Trailer struct { // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +577,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -625,24 +622,21 @@ func (x *Trailer) GetStatusDetails() []byte { // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +647,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,20 +698,17 @@ func (x *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields - - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +719,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -752,21 +743,18 @@ func (x *Metadata) GetEntry() []*MetadataEntry { // A metadata key value pair type MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +765,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -808,23 +796,20 @@ func (x *MetadataEntry) GetValue() []byte { // Address information type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +820,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -873,142 +858,77 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, - 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, - 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, - 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, - 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, - 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, - 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" + + "\n" + + "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" + + "\fGrpcLogEntry\x128\n" + + "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" + + "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" + + "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" + + "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" + + "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" + + "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" + + "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" + + "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" + + "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" + + "\x11payload_truncated\x18\n" + + " \x01(\bR\x10payloadTruncated\x12.\n" + + "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" + + "\tEventType\x12\x16\n" + + "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" + + "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" + + "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" + + "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" + + "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" + + "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" + + "\x06Logger\x12\x12\n" + + "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" + + "\rLOGGER_CLIENT\x10\x01\x12\x11\n" + + "\rLOGGER_SERVER\x10\x02B\t\n" + + "\apayload\"\xbb\x01\n" + + "\fClientHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vmethod_name\x18\x02 \x01(\tR\n" + + "methodName\x12\x1c\n" + + "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" + + "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" + + "\fServerHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" + + "\aTrailer\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vstatus_code\x18\x02 \x01(\rR\n" + + "statusCode\x12%\n" + + "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" + + "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" + + "\aMessage\x12\x16\n" + + "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\"B\n" + + "\bMetadata\x126\n" + + "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" + + "\aAddress\x123\n" + + "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" + + "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" + + "\x04Type\x12\x10\n" + + "\fTYPE_UNKNOWN\x10\x00\x12\r\n" + + "\tTYPE_IPV4\x10\x01\x12\r\n" + + "\tTYPE_IPV6\x10\x02\x12\r\n" + + "\tTYPE_UNIX\x10\x03B\\\n" + + "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3" var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once - file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc + file_grpc_binlog_v1_binarylog_proto_rawDescData []byte ) func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { - file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc))) }) return file_grpc_binlog_v1_binarylog_proto_rawDescData } @@ -1057,104 +977,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), @@ -1165,7 +987,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)), NumEnums: 3, NumMessages: 8, NumExtensions: 0, @@ -1177,7 +999,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, }.Build() File_grpc_binlog_v1_binarylog_proto = out.File - file_grpc_binlog_v1_binarylog_proto_rawDesc = nil file_grpc_binlog_v1_binarylog_proto_goTypes = nil file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/tools/vendor/google.golang.org/grpc/clientconn.go b/tools/vendor/google.golang.org/grpc/clientconn.go index 19763f8ed..3f762285d 100644 --- a/tools/vendor/google.golang.org/grpc/clientconn.go +++ b/tools/vendor/google.golang.org/grpc/clientconn.go @@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // NewClient creates a new gRPC "channel" for the target URI provided. No I/O // is performed. Use of the ClientConn for RPCs will automatically cause it to -// connect. Connect may be used to manually create a connection, but for most -// users this is unnecessary. +// connect. The Connect method may be called to manually create a connection, +// but for most users this should be unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns -// resolver, a "dns:///" prefix should be applied to the target. +// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns +// name resolver, a "dns:///" prefix may be applied to the target. The default +// name resolver will be used if no scheme is detected, or if the parsed scheme +// is not a registered name resolver. The default resolver is "dns" but can be +// overridden using the resolver package's SetDefaultScheme. +// +// Examples: +// +// - "foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com" +// - "dns:///10.0.0.213:8080" +// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443" +// - "dns://8.8.8.8/foo.googleapis.com:8080" +// - "dns://8.8.8.8/foo.googleapis.com" +// - "zookeeper://zk.example.com:9900/example_service" // // The DialOptions returned by WithBlock, WithTimeout, // WithReturnConnectionError, and FailOnNonTempDialError are ignored by this @@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } - cc.mkp = cc.dopts.copts.KeepaliveParams + cc.keepaliveParams = cc.dopts.copts.KeepaliveParams if err = cc.initAuthority(); err != nil { return nil, err @@ -194,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. - opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + // + // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it + // preserves behavior: when default scheme passthrough is used, skip + // hostname resolution, when "dns" is used for resolution, perform + // resolution on the client. + opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...) cc, err := NewClient(target, opts...) if err != nil { return nil, err @@ -618,7 +637,7 @@ type ClientConn struct { balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. - mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway. // firstResolveEvent is used to track whether the name resolver sent us at // least one update. RPCs block on this event. May be accessed without mu // if we know we cannot be asked to enter idle mode while accessing it (e.g. @@ -670,22 +689,31 @@ func (cc *ClientConn) Connect() { cc.mu.Unlock() } -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { +// waitForResolvedAddrs blocks until the resolver provides addresses or the +// context expires, whichever happens first. +// +// Error is nil unless the context expires first; otherwise returns a status +// error based on the context. +// +// The returned boolean indicates whether it did block or not. If the +// resolution has already happened once before, it returns false without +// blocking. Otherwise, it wait for the resolution and return true if +// resolution has succeeded or return false along with error if resolution has +// failed. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) { // This is on the RPC path, so we use a fast path to avoid the // more-expensive "select" below after the resolver has returned once. if cc.firstResolveEvent.HasFired() { - return nil + return false, nil } + internal.NewStreamWaitingForResolver() select { case <-cc.firstResolveEvent.Done(): - return nil + return true, nil case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() + return false, status.FromContextError(ctx.Err()).Err() case <-cc.ctx.Done(): - return ErrClientConnClosing + return false, ErrClientConnClosing } } @@ -775,10 +803,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -870,7 +895,13 @@ func (cc *ClientConn) Target() string { return cc.target } -// CanonicalTarget returns the canonical target string of the ClientConn. +// CanonicalTarget returns the canonical target string used when creating cc. +// +// This always has the form "://[authority]/". For example: +// +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" func (cc *ClientConn) CanonicalTarget() string { return cc.parsedTarget.String() } @@ -1045,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1209,12 +1233,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: + if r == transport.GoAwayTooManyPings { v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v + if v > ac.cc.keepaliveParams.Time { + ac.cc.keepaliveParams.Time = v } ac.cc.mu.Unlock() } @@ -1310,7 +1333,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c ac.mu.Lock() ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams ac.cc.mu.RUnlock() copts := ac.dopts.copts @@ -1374,7 +1397,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1448,7 +1471,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1480,7 +1503,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") @@ -1801,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/tools/vendor/google.golang.org/grpc/codec.go b/tools/vendor/google.golang.org/grpc/codec.go index e840858b7..959c2f99d 100644 --- a/tools/vendor/google.golang.org/grpc/codec.go +++ b/tools/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/tools/vendor/google.golang.org/grpc/credentials/credentials.go b/tools/vendor/google.golang.org/grpc/credentials/credentials.go index 665e790bb..c8e337cdd 100644 --- a/tools/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/tools/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -120,6 +130,20 @@ type AuthInfo interface { AuthType() string } +// AuthorityValidator validates the authority used to override the `:authority` +// header. This is an optional interface that implementations of AuthInfo can +// implement if they support per-RPC authority overrides. It is invoked when the +// application attempts to override the HTTP/2 `:authority` header using the +// CallAuthority call option. +type AuthorityValidator interface { + // ValidateAuthority checks the authority value used to override the + // `:authority` header. The authority parameter is the override value + // provided by the application via the CallAuthority option. This value + // typically corresponds to the server hostname or endpoint the RPC is + // targeting. It returns non-nil error if the validation fails. + ValidateAuthority(authority string) error +} + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC // and the caller should not close rawConn. var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") @@ -159,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } @@ -207,14 +236,32 @@ type RequestInfo struct { AuthInfo AuthInfo } +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) return ri, ok } +// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it. +// +// This RequestInfo will be accessible via RequestInfoFromContext. +// +// Intended to be used from tests for PerRPCCredentials implementations (that +// often need to check connection's SecurityLevel). Should not be used from +// non-test code: the gRPC client already prepares a context with the correct +// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata. +// +// This API is experimental. +func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes // it possible to pass arbitrary data to the handshaker from gRPC, resolver, // balancer etc. Individual credential implementations control the actual diff --git a/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4c805c644..93156c0f3 100644 --- a/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -30,7 +30,7 @@ import ( // NewCredentials returns a credentials which disables transport security. // // Note that using this credentials with per-RPC credentials which require -// transport security is incompatible and will cause grpc.Dial() to fail. +// transport security is incompatible and will cause RPCs to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } @@ -71,6 +71,12 @@ func (info) AuthType() string { return "insecure" } +// ValidateAuthority allows any value to be overridden for the :authority +// header. +func (info) ValidateAuthority(string) error { + return nil +} + // insecureBundle implements an insecure bundle. // An insecure bundle provides a thin wrapper around insecureTC to support // the credentials.Bundle interface. diff --git a/tools/vendor/google.golang.org/grpc/credentials/tls.go b/tools/vendor/google.golang.org/grpc/credentials/tls.go index e163a473d..8277be7d6 100644 --- a/tools/vendor/google.golang.org/grpc/credentials/tls.go +++ b/tools/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/url" @@ -32,6 +33,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -48,6 +51,21 @@ func (t TLSInfo) AuthType() string { return "tls" } +// ValidateAuthority validates the provided authority being used to override the +// :authority header by verifying it against the peer certificates. It returns a +// non-nil error if the validation fails. +func (t TLSInfo) ValidateAuthority(authority string) error { + var errs []error + for _, cert := range t.State.PeerCertificates { + var err error + if err = cert.VerifyHostname(authority); err == nil { + return nil + } + errs = append(errs, err) + } + return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) +} + // cipherSuiteLookup returns the string version of a TLS cipher suite ID. func cipherSuiteLookup(cipherSuiteID uint16) string { for _, s := range tls.CipherSuites() { @@ -92,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -128,7 +146,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +176,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } @@ -241,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -253,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/tools/vendor/google.golang.org/grpc/dialoptions.go b/tools/vendor/google.golang.org/grpc/dialoptions.go index 518692c3a..7a5ac2e7c 100644 --- a/tools/vendor/google.golang.org/grpc/dialoptions.go +++ b/tools/vendor/google.golang.org/grpc/dialoptions.go @@ -73,7 +73,7 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor + compressorV0 Compressor dc Decompressor bs internalbackoff.Strategy block bool @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -95,6 +94,8 @@ type dialOptions struct { idleTimeout time.Duration defaultScheme string maxCallAttempts int + enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled. + useProxy bool // Specifies if a server should be connected via proxy. } // DialOption configures how we set up the connection. @@ -212,6 +213,7 @@ func WithReadBufferSize(s int) DialOption { func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -221,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption { func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticStreamWindowSize returns a DialOption which sets the initial +// stream window size to the value provided and disables dynamic flow control. +func WithStaticStreamWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticConnWindowSize returns a DialOption which sets the initial +// connection window size to the value provided and disables dynamic flow +// control. +func WithStaticConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -257,7 +279,7 @@ func WithCodec(c Codec) DialOption { // Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.cp = cp + o.compressorV0 = cp }) } @@ -359,7 +381,7 @@ func WithReturnConnectionError() DialOption { // // Note that using this DialOption with per-RPC credentials (through // WithCredentialsBundle or WithPerRPCCredentials) which require transport -// security is incompatible and will cause grpc.Dial() to fail. +// security is incompatible and will cause RPCs to fail. // // Deprecated: use WithTransportCredentials and insecure.NewCredentials() // instead. Will be supported throughout 1.x. @@ -378,7 +400,22 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UseProxy = false + o.useProxy = false + }) +} + +// WithLocalDNSResolution forces local DNS name resolution even when a proxy is +// specified in the environment. By default, the server name is provided +// directly to the proxy as part of the CONNECT handshake. This is ignored if +// WithNoProxy is used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithLocalDNSResolution() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.enableLocalDNSResolution = true }) } @@ -429,6 +466,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -445,10 +487,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -570,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -662,30 +702,20 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ ReadBufferSize: defaultReadBufSize, WriteBufferSize: defaultWriteBufSize, - UseProxy: true, UserAgent: grpcUA, BufferPool: mem.DefaultBufferPool(), }, - bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, - idleTimeout: 30 * time.Minute, - defaultScheme: "dns", - maxCallAttempts: defaultMaxCallAttempts, + bs: internalbackoff.DefaultExponential, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + useProxy: true, + enableLocalDNSResolution: false, } } diff --git a/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5d..ad75313a1 100644 --- a/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a63..ee1423605 100644 --- a/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" +import "google.golang.org/grpc/stats" // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,75 +40,15 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -// Metric is an identifier for a metric. -type Metric string +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} +// Metric was replaced by direct usage of strings. +type Metric = string -// NewMetrics returns a Metrics containing Metrics. +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } + return stats.NewMetricSet(metrics...) } diff --git a/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98..ed90060c3 100644 --- a/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index d92335445..22d263fb9 100644 --- a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.6 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -28,6 +28,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -90,20 +91,17 @@ func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { } type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,20 +135,17 @@ func (x *HealthCheckRequest) GetService() string { } type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` unknownFields protoimpl.UnknownFields - - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +156,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -183,76 +178,150 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { return HealthCheckResponse_UNKNOWN } -var File_grpc_health_v1_health_proto protoreflect.FileDescriptor +type HealthListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListRequest) Reset() { + *x = HealthListRequest{} + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListRequest) ProtoMessage() {} + +func (x *HealthListRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead. +func (*HealthListRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2} +} -var file_grpc_health_v1_health_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, - 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, - 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +type HealthListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // statuses contains all the services and their respective status. + Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } +func (x *HealthListResponse) Reset() { + *x = HealthListResponse{} + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListResponse) ProtoMessage() {} + +func (x *HealthListResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead. +func (*HealthListResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3} +} + +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { + if x != nil { + return x.Statuses + } + return nil +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +const file_grpc_health_v1_health_proto_rawDesc = "" + + "\n" + + "\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" + + "\x12HealthCheckRequest\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" + + "\x13HealthCheckResponse\x12I\n" + + "\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" + + "\rServingStatus\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\v\n" + + "\aSERVING\x10\x01\x12\x0f\n" + + "\vNOT_SERVING\x10\x02\x12\x13\n" + + "\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" + + "\x11HealthListRequest\"\xc4\x01\n" + + "\x12HealthListResponse\x12L\n" + + "\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" + + "\rStatusesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x129\n" + + "\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" + + "\x06Health\x12P\n" + + "\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" + + "\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" + + "\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" + + "\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3" + var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once - file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc + file_grpc_health_v1_health_proto_rawDescData []byte ) func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { - file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc))) }) return file_grpc_health_v1_health_proto_rawDescData } var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse + (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest + (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse + nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry } var file_grpc_health_v1_health_proto_depIdxs = []int32{ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry + 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse + 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest + 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse + 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_grpc_health_v1_health_proto_init() } @@ -260,39 +329,13 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, @@ -302,7 +345,6 @@ func file_grpc_health_v1_health_proto_init() { MessageInfos: file_grpc_health_v1_health_proto_msgTypes, }.Build() File_grpc_health_v1_health_proto = out.File - file_grpc_health_v1_health_proto_rawDesc = nil file_grpc_health_v1_health_proto_goTypes = nil file_grpc_health_v1_health_proto_depIdxs = nil } diff --git a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab49..f2c01f296 100644 --- a/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_List_FullMethodName = "/grpc.health.v1.Health/List" Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" ) @@ -55,9 +56,19 @@ type HealthClient interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthListResponse) + err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) @@ -128,9 +149,19 @@ type HealthServer interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(context.Context, *HealthListRequest) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -157,10 +188,13 @@ type HealthServer interface { type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + return nil, status.Error(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { + return nil, status.Error(codes.Unimplemented, "method List not implemented") } func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") + return status.Error(codes.Unimplemented, "method Watch not implemented") } func (UnimplementedHealthServer) testEmbeddedByValue() {} @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).List(ctx, req.(*HealthListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{ MethodName: "Check", Handler: _Health_Check_Handler, }, + { + MethodName: "List", + Handler: _Health_List_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d..b6ae7f258 100644 --- a/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 73bb4c4ee..ba25b8988 100644 --- a/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error return nil, errBalancerClosed } bw := &balancerWrapper{ - builder: builder, - gsb: gsb, + ClientConn: gsb.cc, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -222,15 +223,7 @@ func (gsb *Balancer) ExitIdle() { // There is no need to protect this read with a mutex, as the write to the // Balancer field happens in SwitchTo, which completes before this can be // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } + balToUpdate.ExitIdle() } // updateSubConnState forwards the update to the appropriate child. @@ -293,6 +286,7 @@ func (gsb *Balancer) Close() { // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. type balancerWrapper struct { + balancer.ClientConn balancer.Balancer gsb *Balancer builder balancer.Builder @@ -413,7 +407,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver bw.gsb.mu.Unlock() bw.gsb.cc.UpdateAddresses(sc, addrs) } - -func (bw *balancerWrapper) Target() string { - return bw.gsb.cc.Target() -} diff --git a/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 9deee7f65..48b22d9cf 100644 --- a/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -20,20 +20,6 @@ import ( "context" ) -// requestInfoKey is a struct to be used as the key to store RequestInfo in a -// context. -type requestInfoKey struct{} - -// NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) -} - -// RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { - return ctx.Value(requestInfoKey{}) -} - // clientHandshakeInfoKey is a struct used as the key to store // ClientHandshakeInfo in a context. type clientHandshakeInfoKey struct{} diff --git a/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6e7dd6b77..7e060f5ed 100644 --- a/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,35 +26,55 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be enabled by + // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "true". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) + // to "false". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) + + // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash + // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the + // implementation of A76 is stable, we will flip the default value to false + // in a subsequent release. A final release will remove this environment + // variable, enabling the new behavior unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + + // RingHashSetRequestHashKey is set if the ring hash balancer can get the + // request hash header by setting the "requestHashHeader" field, according + // to gRFC A76. It can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + + // ALTSHandshakerKeepaliveParams is set if we should add the + // KeepaliveParams when dial the ALTS handshaker service. + ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb..e87551552 100644 --- a/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,19 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true) + + // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use + // the system's default root certificates for TLS certificate validation. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. + XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) + + // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of + // trust. For more details, see: + // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md + XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) ) diff --git a/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go index fbe697c37..d788c2493 100644 --- a/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ b/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -21,28 +21,25 @@ package grpcsync import ( - "sync" "sync/atomic" ) // Event represents a one-time event that may occur in the future. type Event struct { - fired int32 + fired atomic.Bool c chan struct{} - o sync.Once } // Fire causes e to complete. It is safe to call multiple times, and // concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. +// channel returned by Done to close. If Fire returns false, it is possible +// the Done channel has not been closed yet. func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) + if e.fired.CompareAndSwap(false, true) { close(e.c) - ret = true - }) - return ret + return true + } + return false } // Done returns a channel that will be closed when Fire is called. @@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} { // HasFired returns true if Fire has been called. func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 + return e.fired.Load() } // NewEvent returns a new, ready-to-use Event. diff --git a/tools/vendor/google.golang.org/grpc/internal/internal.go b/tools/vendor/google.golang.org/grpc/internal/internal.go index 20b4dc3d3..2699223a2 100644 --- a/tools/vendor/google.golang.org/grpc/internal/internal.go +++ b/tools/vendor/google.golang.org/grpc/internal/internal.go @@ -29,10 +29,12 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -62,6 +64,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // MetricsRecorderForServer returns the MetricsRecorderList derived from a + // server's stats handlers. + MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // @@ -149,34 +154,33 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. + // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder + // using the provided xDS pool instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // Testing Only // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error) - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). + // Testing Only // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) @@ -226,6 +230,20 @@ var ( // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for // testing purposes. SetBufferPoolingThresholdForTesting any // func(int) + + // TimeAfterFunc is used to create timers. During tests the function is + // replaced to track allocated timers and fail the test if a timer isn't + // cancelled. + TimeAfterFunc = func(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) + } + + // NewStreamWaitingForResolver is a test hook that is triggered when a + // new stream blocks while waiting for name resolution. This can be + // used in tests to synchronize resolver updates and avoid race conditions. + // When set, the function will be called before the stream enters + // the blocking state. + NewStreamWaitingForResolver = func() {} ) // HealthChecker defines the signature of the client-side LB channel health @@ -255,3 +273,21 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} + +// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation +// embedding. +type EnforceClientConnEmbedding interface { + enforceClientConnEmbedding() +} + +// Timer is an interface to allow injecting different time.Timer implementations +// during tests. +type Timer interface { + Stop() bool +} diff --git a/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb716..c4055bc00 100644 --- a/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool { return false } -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { +// ValidateKey validates a key with the following rules (pseudo-headers are +// skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +func ValidateKey(key string) error { // key should not be empty if key == "" { return fmt.Errorf("there is an empty key in the header") @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error { return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) } } + return nil +} + +// ValidatePair validates a key-value pair with the following rules +// (pseudo-header are skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding +// value is performed. +// - the characters in every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + if err := ValidateKey(key); err != nil { + return err + } if strings.HasSuffix(key, "-bin") { return nil } diff --git a/tools/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/tools/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go new file mode 100644 index 000000000..1f61f1a49 --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proxyattributes contains functions for getting and setting proxy +// attributes like the CONNECT address and user info. +package proxyattributes + +import ( + "net/url" + + "google.golang.org/grpc/resolver" +) + +type keyType string + +const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions") + +// Options holds the proxy connection details needed during the CONNECT +// handshake. +type Options struct { + User *url.Userinfo + ConnectAddr string +} + +// Set returns a copy of addr with opts set in its attributes. +func Set(addr resolver.Address, opts Options) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts) + return addr +} + +// Get returns the Options for the proxy [resolver.Address] and a boolean +// value representing if the attribute is present or not. The returned data +// should not be mutated. +func Get(addr resolver.Address) (Options, bool) { + if a := addr.Attributes.Value(proxyOptionsKey); a != nil { + return a.(Options), true + } + return Options{}, false +} diff --git a/tools/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/tools/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go new file mode 100644 index 000000000..20b8fb098 --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -0,0 +1,427 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package delegatingresolver implements a resolver capable of resolving both +// target URIs and proxy addresses. +package delegatingresolver + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + logger = grpclog.Component("delegating-resolver") + // HTTPSProxyFromEnvironment will be overwritten in the tests + HTTPSProxyFromEnvironment = http.ProxyFromEnvironment +) + +// delegatingResolver manages both target URI and proxy address resolution by +// delegating these tasks to separate child resolvers. Essentially, it acts as +// an intermediary between the gRPC ClientConn and the child resolvers. +// +// It implements the [resolver.Resolver] interface. +type delegatingResolver struct { + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target + + // We do not hold both mu and childMu in the same goroutine. Avoid holding + // both locks when calling into the child, as the child resolver may + // synchronously callback into the channel. + mu sync.Mutex // protects all the fields below + targetResolverState *resolver.State // state of the target resolver + proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured +} + +// nopResolver is a resolver that does nothing. +type nopResolver struct{} + +func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (nopResolver) Close() {} + +// proxyURLForTarget determines the proxy URL for the given address based on the +// environment. It can return the following: +// - nil URL, nil error: No proxy is configured or the address is excluded +// using the `NO_PROXY` environment variable or if req.URL.Host is +// "localhost" (with or without // a port number) +// - nil URL, non-nil error: An error occurred while retrieving the proxy URL. +// - non-nil URL, nil error: A proxy is configured, and the proxy URL was +// retrieved successfully without any errors. +func proxyURLForTarget(address string) (*url.URL, error) { + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address, + }} + return HTTPSProxyFromEnvironment(req) +} + +// New creates a new delegating resolver that can create up to two child +// resolvers: +// - one to resolve the proxy address specified using the supported +// environment variables. This uses the registered resolver for the "dns" +// scheme. It is lazily built when a target resolver update contains at least +// one TCP address. +// - one to resolve the target URI using the resolver specified by the scheme +// in the target URI or specified by the user using the WithResolvers dial +// option. As a special case, if the target URI's scheme is "dns" and a +// proxy is specified using the supported environment variables, the target +// URI's path portion is used as the resolved address unless target +// resolution is enabled using the dial option. +func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { + r := &delegatingResolver{ + target: target, + cc: cc, + proxyResolver: nopResolver{}, + targetResolver: nopResolver{}, + } + + var err error + r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + } + + // proxy is not configured or proxy address excluded using `NO_PROXY` env + // var, so only target resolver is used. + if r.proxyURL == nil { + return targetResolverBuilder.Build(target, cc, opts) + } + + if logger.V(2) { + logger.Infof("Proxy URL detected : %s", r.proxyURL) + } + + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() + // When the scheme is 'dns' and target resolution on client is not enabled, + // resolution should be handled by the proxy, not the client. Therefore, we + // bypass the target resolver and store the unresolved target address. + if target.URL.Scheme == "dns" && !targetResolutionEnabled { + r.targetResolverState = &resolver.State{ + Addresses: []resolver.Address{{Addr: target.Endpoint()}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + } + r.updateTargetResolverState(*r.targetResolverState) + return r, nil + } + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, + } + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) + } + return r, nil +} + +// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns" +// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds +// a resolver with a wrappingClientConn to capture resolved addresses. +func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { + proxyBuilder := resolver.Get("dns") + if proxyBuilder == nil { + panic("delegating_resolver: resolver for proxy not found for scheme dns") + } + url := *r.proxyURL + url.Scheme = "dns" + url.Path = "/" + r.proxyURL.Host + url.Host = "" // Clear the Host field to conform to the "dns:///" format + + proxyTarget := resolver.Target{URL: url} + wcc := &wrappingClientConn{ + stateListener: r.updateProxyResolverState, + parent: r, + } + return proxyBuilder.Build(proxyTarget, wcc, opts) +} + +func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.ResolveNow(o) + r.proxyResolver.ResolveNow(o) +} + +func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.Close() + r.targetResolver = nil + + r.proxyResolver.Close() + r.proxyResolver = nil +} + +func needsProxyResolver(state *resolver.State) bool { + for _, addr := range state.Addresses { + if !skipProxy(addr) { + return true + } + } + for _, endpoint := range state.Endpoints { + for _, addr := range endpoint.Addresses { + if !skipProxy(addr) { + return true + } + } + } + return false +} + +func skipProxy(address resolver.Address) bool { + // Avoid proxy when network is not tcp. + networkType, ok := networktype.Get(address) + if !ok { + networkType, _ = transport.ParseDialTarget(address.Addr) + } + if networkType != "tcp" { + return true + } + + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address.Addr, + }} + // Avoid proxy when address included in `NO_PROXY` environment variable or + // fails to get the proxy address. + url, err := HTTPSProxyFromEnvironment(req) + if err != nil || url == nil { + return true + } + return false +} + +// updateClientConnStateLocked constructs a combined list of addresses by +// pairing each proxy address with every target address of type TCP. For each +// pair, it creates a new [resolver.Address] using the proxy address and +// attaches the corresponding target address and user info as attributes. Target +// addresses that are not of type TCP are appended to the list as-is. The +// function returns nil if either resolver has not yet provided an update, and +// returns the result of ClientConn.UpdateState once both resolvers have +// provided at least one update. +func (r *delegatingResolver) updateClientConnStateLocked() error { + if r.targetResolverState == nil || r.proxyAddrs == nil { + return nil + } + + // If multiple resolved proxy addresses are present, we send only the + // unresolved proxy host and let net.Dial handle the proxy host name + // resolution when creating the transport. Sending all resolved addresses + // would increase the number of addresses passed to the ClientConn and + // subsequently to load balancing (LB) policies like Round Robin, leading + // to additional TCP connections. However, if there's only one resolved + // proxy address, we send it directly, as it doesn't affect the address + // count returned by the target resolver and the address count sent to the + // ClientConn. + var proxyAddr resolver.Address + if len(r.proxyAddrs) == 1 { + proxyAddr = r.proxyAddrs[0] + } else { + proxyAddr = resolver.Address{Addr: r.proxyURL.Host} + } + var addresses []resolver.Address + for _, targetAddr := range (*r.targetResolverState).Addresses { + if skipProxy(targetAddr) { + addresses = append(addresses, targetAddr) + continue + } + addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + + // For each target endpoint, construct a new [resolver.Endpoint] that + // includes all addresses from all proxy endpoints and the addresses from + // that target endpoint, preserving the number of target endpoints. + var endpoints []resolver.Endpoint + for _, endpt := range (*r.targetResolverState).Endpoints { + var addrs []resolver.Address + for _, targetAddr := range endpt.Addresses { + // Avoid proxy when network is not tcp. + if skipProxy(targetAddr) { + addrs = append(addrs, targetAddr) + continue + } + for _, proxyAddr := range r.proxyAddrs { + addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + } + endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs}) + } + // Use the targetResolverState for its service config and attributes + // contents. The state update is only sent after both the target and proxy + // resolvers have sent their updates, and curState has been updated with the + // combined addresses. + curState := *r.targetResolverState + curState.Addresses = addresses + curState.Endpoints = endpoints + return r.cc.UpdateState(curState) +} + +// updateProxyResolverState updates the proxy resolver state by storing proxy +// addresses and endpoints, marking the resolver as ready, and triggering a +// state update if both proxy and target resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It +// is a StateListener function of wrappingClientConn passed to the proxy +// resolver. +func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + if logger.V(2) { + logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) + } + if len(state.Endpoints) > 0 { + // We expect exactly one address per endpoint because the proxy resolver + // uses "dns" resolution. + r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) + for _, endpoint := range state.Endpoints { + r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) + } + } else if state.Addresses != nil { + r.proxyAddrs = state.Addresses + } else { + r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received + } + err := r.updateClientConnStateLocked() + // Another possible approach was to block until updates are received from + // both resolvers. But this is not used because calling `New()` triggers + // `Build()` for the first resolver, which calls `UpdateState()`. And the + // second resolver hasn't sent an update yet, so it would cause `New()` to + // block indefinitely. + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return err +} + +// updateTargetResolverState is the StateListener function provided to the +// target resolver via wrappingClientConn. It updates the resolver state and +// marks the target resolver as ready. If the update includes at least one TCP +// address and the proxy resolver has not yet been constructed, it initializes +// the proxy resolver. A combined state update is triggered once both resolvers +// are ready. If all addresses are non-TCP, it proceeds without waiting for the +// proxy resolver. If ClientConn.UpdateState returns a non-nil error, +// ResolveNow() is called on the proxy resolver. +func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + + if logger.V(2) { + logger.Infof("Addresses received from target resolver: %v", state.Addresses) + } + r.targetResolverState = &state + // If all addresses returned by the target resolver have a non-TCP network + // type, or are listed in the `NO_PROXY` environment variable, do not wait + // for proxy update. + if !needsProxyResolver(r.targetResolverState) { + return r.cc.UpdateState(*r.targetResolverState) + } + + // The proxy resolver may be rebuilt multiple times, specifically each time + // the target resolver sends an update, even if the target resolver is built + // successfully but building the proxy resolver fails. + if len(r.proxyAddrs) == 0 { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if _, ok := r.proxyResolver.(nopResolver); !ok { + return + } + proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{}) + if err != nil { + r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err)) + return + } + r.proxyResolver = proxyResolver + }() + } + + err := r.updateClientConnStateLocked() + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return nil +} + +// wrappingClientConn serves as an intermediary between the parent ClientConn +// and the child resolvers created here. It implements the resolver.ClientConn +// interface and is passed in that capacity to the child resolvers. +type wrappingClientConn struct { + // Callback to deliver resolver state updates + stateListener func(state resolver.State) error + parent *delegatingResolver +} + +// UpdateState receives resolver state updates and forwards them to the +// appropriate listener function (either for the proxy or target resolver). +func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { + return wcc.stateListener(state) +} + +// ReportError intercepts errors from the child resolvers and passes them to +// ClientConn. +func (wcc *wrappingClientConn) ReportError(err error) { + wcc.parent.cc.ReportError(err) +} + +// NewAddress intercepts the new resolved address from the child resolvers and +// passes them to ClientConn. +func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { + wcc.UpdateState(resolver.State{Addresses: addrs}) +} + +// ParseServiceConfig parses the provided service config and returns an object +// that provides the parsed config. +func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { + return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) +} diff --git a/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 374c12fb7..ada5251cf 100644 --- a/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -131,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -180,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -260,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -322,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -345,25 +346,25 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -379,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -427,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/tools/vendor/google.golang.org/grpc/internal/status/status.go b/tools/vendor/google.golang.org/grpc/internal/status/status.go index 1186f1e9a..aad171cd0 100644 --- a/tools/vendor/google.golang.org/grpc/internal/status/status.go +++ b/tools/vendor/google.golang.org/grpc/internal/status/status.go @@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool { } return false } + +// RawStatusProto returns the internal protobuf message for use by gRPC itself. +func RawStatusProto(s *Status) *spb.Status { + if s == nil { + return nil + } + return s.s +} diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/tools/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 000000000..ccc0e017e --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and propagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ef72fbb3a..a2831e5d0 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } +// itemNodePool is used to reduce heap allocations. +var itemNodePool = sync.Pool{ + New: func() any { + return &itemNode{} + }, +} + type itemNode struct { it any next *itemNode @@ -51,7 +58,9 @@ type itemList struct { } func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} + n := itemNodePool.Get().(*itemNode) + n.next = nil + n.it = i if il.tail == nil { il.head, il.tail = n, n return @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any { return nil } i := il.head.it + temp := il.head il.head = il.head.next + itemNodePool.Put(temp) if il.head == nil { il.tail = nil } @@ -146,10 +157,11 @@ type earlyAbortStream struct { func (*earlyAbortStream) isTransportResponseFrame() bool { return false } type dataFrame struct { - streamID uint32 - endStream bool - h []byte - reader mem.Reader + streamID uint32 + endStream bool + h []byte + data mem.BufferSlice + processing bool // onEachWrite is called every time // a part of data is written out. onEachWrite func() @@ -234,6 +246,7 @@ type outStream struct { itl *itemList bytesOutStanding int wq *writeQuota + reader mem.Reader next *outStream prev *outStream @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() { v.onOrphaned(ErrConnClosing) } case *dataFrame: - _ = v.reader.Close() + if !v.processing { + v.data.Free() + } } } @@ -650,10 +665,11 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } l.estdStreams[h.streamID] = str } @@ -685,10 +701,11 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } return l.originateStream(str, h) } @@ -790,10 +807,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) + str.reader.Close() str.deleteSelf() for head := str.itl.dequeueAll(); head != nil; head = head.next { if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() + if !df.processing { + df.data.Free() + } } } } @@ -928,7 +948,13 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } + reader := str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + if !dataItem.processing { + dataItem.processing = true + str.reader.Reset(dataItem.data) + dataItem.data.Free() + } // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and data @@ -936,13 +962,13 @@ func (l *loopyWriter) processData() (bool, error) { // from data is copied to h to make as big as the maximum possible HTTP2 frame // size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() + _ = reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -971,8 +997,8 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + dSize := min(maxSize-hSize, reader.Remaining()) + remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize var buf *[]byte @@ -993,7 +1019,7 @@ func (l *loopyWriter) processData() (bool, error) { defer pool.Put(buf) copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) + _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1014,7 +1040,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + _ = reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c515..dfc0f224e 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693b..d954a64c3 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -277,11 +277,13 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. + s.hdrMu.Lock() for _, sh := range ht.stats { sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } + s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) return err @@ -289,14 +291,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +319,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +335,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +359,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +387,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +410,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +475,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") @@ -498,5 +500,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 62b81885d..5467fe971 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -43,6 +43,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/proxyattributes" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -123,7 +124,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -153,7 +154,7 @@ type http2Client struct { logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { @@ -175,10 +176,10 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error return fn(ctx, address) } if !ok { - networkType, address = parseDialTarget(address) + networkType, address = ParseDialTarget(address) } - if networkType == "tcp" && useProxy { - return proxyDial(ctx, address, grpcUA) + if opts, present := proxyattributes.Get(addr); present { + return proxyDial(ctx, addr, grpcUA, opts) } return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } @@ -199,10 +200,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -217,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // address specific arbitrary data to reach custom dialers and credential handshakers. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -308,11 +309,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts scheme = "https" } } - dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize - dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize @@ -339,7 +338,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -380,9 +379,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false } - if dynamicWindow { + if !opts.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -480,17 +478,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -506,7 +506,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -542,7 +542,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -589,6 +589,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) + if timeout <= 0 { + return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { @@ -597,12 +600,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -738,7 +735,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -752,6 +749,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, callHdr = &newCallHdr } + // The authority specified via the `CallAuthority` CallOption takes the + // highest precedence when determining the `:authority` header. It overrides + // any value present in the Host field of CallHdr. Before applying this + // override, the authority string is validated. If the credentials do not + // implement the AuthorityValidator interface, or if validation fails, the + // RPC is failed with a status code of `UNAVAILABLE`. + if callHdr.Authority != "" { + auth, ok := t.authInfo.(credentials.AuthorityValidator) + if !ok { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())} + } + if err := auth.ValidateAuthority(callHdr.Authority); err != nil { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)} + } + newCallHdr := *callHdr + newCallHdr.Host = callHdr.Authority + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -763,7 +779,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -908,21 +924,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1085,39 +1087,37 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { - reader := data.Reader() - +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + data: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return err } } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1127,7 +1127,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1136,7 +1136,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1242,7 +1242,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1258,7 +1258,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode = codes.DeadlineExceeded } } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) + st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode) + t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false) } func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { @@ -1383,11 +1384,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1406,8 +1407,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } @@ -1439,7 +1439,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1809,14 +1809,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe5..83cee314c 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -35,9 +35,11 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" @@ -111,7 +113,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -130,6 +132,10 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen logger *grpclog.PrefixLogger + // setResetPingStrikes is stored as a closure instead of making this a + // method on http2Server to avoid a heap allocation when converting a method + // to a closure for passing to frames objects. + setResetPingStrikes func() } // NewServerTransport creates a http2 transport with conn and configuration @@ -174,16 +180,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, Val: config.MaxStreams, }) } - dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize - dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize - dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ @@ -256,7 +259,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -264,6 +267,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, initialWindowSize: iwz, bufferPool: config.BufferPool, } + t.setResetPingStrikes = func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { czSecurity = au.GetSecurityValue() @@ -283,7 +289,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { + if !config.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -359,7 +365,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +391,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +545,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -568,7 +570,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -589,7 +591,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -598,10 +600,41 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade return nil } } + + if s.ctx.Err() != nil { + t.mu.Unlock() + // Early abort in case the timeout was zero or so low it already fired. + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusOK, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()), + rst: !frame.StreamEnded(), + }) + return nil + } + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } + + // Start a timer to close the stream on reaching the deadline. + if timeoutSet { + // We need to wait for s.cancel to be updated before calling + // t.closeStream to avoid data races. + cancelUpdated := make(chan struct{}) + timer := internal.TimeAfterFunc(timeout, func() { + <-cancelUpdated + t.closeStream(s, true, http2.ErrCodeCancel, false) + }) + oldCancel := s.cancel + s.cancel = func() { + oldCancel() + timer.Stop() + } + close(cancelUpdated) + } t.mu.Unlock() if channelz.IsOn() { t.channelz.SocketMetrics.StreamsStarted.Add(1) @@ -634,7 +667,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +731,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +749,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +759,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +996,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +1006,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1002,11 +1035,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { return nil } -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1042,11 +1071,11 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { return nil } -// WriteStatus sends stream status to the client and terminates the stream. +// writeStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1070,7 +1099,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := st.Proto(); p != nil && len(p.Details) > 0 { + if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 { // Do not use the user's grpc-status-details-bin (if present) if we are // even attempting to set our own. delete(s.trailer, grpcStatusDetailsBinHeader) @@ -1117,18 +1146,14 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { - reader := data.Reader() - +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { - _ = reader.Close() + if err := t.writeHeader(s, nil); err != nil { return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } @@ -1136,17 +1161,19 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + data: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return t.streamContextErr(s) } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } + t.incrMsgSent() return nil } @@ -1276,8 +1303,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { delete(t.activeStreams, s.id) @@ -1297,7 +1323,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,12 +1347,15 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. s.cancel() + // We can't return early even if the stream's state is "done" as the state + // might have been set by the `finishStream` method. Deleting the stream via + // `finishStream` can get blocked on flow control. s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1415,14 +1444,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1488,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go b/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go index 3613d7b64..e3663f87f 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -196,11 +196,11 @@ func decodeTimeout(s string) (time.Duration, error) { if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } - t, err := strconv.ParseInt(s[:size-1], 10, 64) + t, err := strconv.ParseUint(s[:size-1], 10, 64) if err != nil { return 0, err } - const maxHours = math.MaxInt64 / int64(time.Hour) + const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. return time.Duration(math.MaxInt64), nil @@ -439,8 +439,8 @@ func getWriteBufferPool(size int) *sync.Pool { return pool } -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { +// ParseDialTarget returns the network and address to pass to dialer. +func ParseDialTarget(target string) (string, string) { net := "tcp" m1 := strings.Index(target, ":") m2 := strings.Index(target, ":/") diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go b/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go index 54b224436..d77384595 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -30,34 +30,16 @@ import ( "net/url" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" ) const proxyAuthHeaderKey = "Proxy-Authorization" -var ( - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - return url, nil -} - // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. +// It's possible that this reader reads more than what's need for the response +// and stores those bytes in the buffer. bufConn wraps the original net.Conn +// and the bufio.Reader to make sure we don't lose the bytes in the buffer. type bufConn struct { net.Conn r io.Reader @@ -72,7 +54,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri req := &http.Request{ Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, + URL: &url.URL{Host: opts.ConnectAddr}, Header: map[string][]string{"User-Agent": {grpcUA}}, } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() + if user := opts.User; user != nil { + u := user.Username() + p, _ := user.Password() req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) } - if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } @@ -117,28 +98,13 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return conn, nil } -// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy -// is necessary, dials, does the HTTP CONNECT handshake, and returns the -// connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { - newAddr := addr - proxyURL, err := mapAddress(addr) - if err != nil { - return nil, err - } - if proxyURL != nil { - newAddr = proxyURL.Host - } - - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) +// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake. +func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) { + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr) if err != nil { return nil, err } - if proxyURL == nil { - // proxy is disabled if proxyURL is nil. - return conn, err - } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return doHTTPConnectHandshake(ctx, conn, grpcUA, opts) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/tools/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 000000000..cf8da0b52 --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + // cancel is invoked at the end of stream to cancel ctx. It also stops the + // timer for monitoring the rpc deadline if configured. + cancel func() + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/tools/vendor/google.golang.org/grpc/internal/transport/transport.go b/tools/vendor/google.golang.org/grpc/internal/transport/transport.go index e12cb0bc9..7dd53e80a 100644 --- a/tools/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/tools/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,90 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// ReadHeader reads data into the provided header slice from the stream. It -// first checks if there was an error during a previous read operation and +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of // the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered -// with partially read data, it is converted to `io.ErrUnexpectedEOF` to -// indicate an unexpected end of the stream. The method returns any error -// encountered during the read process or nil if the header was successfully -// read. -func (s *Stream) ReadHeader(header []byte) (err error) { +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -579,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -619,8 +413,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err @@ -639,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -683,6 +466,7 @@ type ServerConfig struct { MaxHeaderListSize *uint32 HeaderTableSize *uint32 BufferPool mem.BufferPool + StaticWindowSize bool } // ConnectOptions covers all relevant options for communicating with the server. @@ -719,21 +503,15 @@ type ConnectOptions struct { ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 - // UseProxy specifies if a proxy should be used. - UseProxy bool // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool + // StaticWindowSize controls whether dynamic window sizing is enabled. + StaticWindowSize bool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -765,6 +543,11 @@ type CallHdr struct { PreviousAttempts int // value of grpc-previous-rpc-attempts header to set DoneFunc func() // called when the stream is finished + + // Authority is used to explicitly override the `:authority` header. If set, + // this value takes precedence over the Host field and will be used as the + // value for the `:authority` header. + Authority string } // ClientTransport is the common interface for all gRPC client-side transport @@ -782,18 +565,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -813,12 +586,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -828,19 +595,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -852,12 +607,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go b/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go index 228e9c2f2..af510d20c 100644 --- a/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -132,6 +137,9 @@ type Reader interface { Close() error // Remaining returns the number of unread bytes remaining in the slice. Remaining() int + // Reset frees the currently held buffer slice and starts reading from the + // provided slice. This allows reusing the reader object. + Reset(s BufferSlice) } type sliceReader struct { @@ -145,6 +153,14 @@ func (r *sliceReader) Remaining() int { return r.len } +func (r *sliceReader) Reset(s BufferSlice) { + r.data.Free() + s.Ref() + r.data = s + r.len = s.Len() + r.bufferIdx = 0 +} + func (r *sliceReader) Close() error { r.data.Free() r.data = nil @@ -219,8 +235,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/tools/vendor/google.golang.org/grpc/picker_wrapper.go b/tools/vendor/google.golang.org/grpc/picker_wrapper.go index bdaa2130e..aa52bfe95 100644 --- a/tools/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/tools/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -123,13 +126,13 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err()) } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/tools/vendor/google.golang.org/grpc/preloader.go b/tools/vendor/google.golang.org/grpc/preloader.go index e87a17f36..ee0ff969a 100644 --- a/tools/vendor/google.golang.org/grpc/preloader.go +++ b/tools/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/tools/vendor/google.golang.org/grpc/resolver/map.go b/tools/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb7..c3c15ac96 100644 --- a/tools/vendor/google.golang.org/grpc/resolver/map.go +++ b/tools/vendor/google.golang.org/grpc/resolver/map.go @@ -18,16 +18,28 @@ package resolver -type addressMapEntry struct { +import ( + "encoding/base64" + "sort" + "strings" +) + +type addressMapEntry[T any] struct { addr Address - value any + value T } -// AddressMap is a map of addresses to arbitrary values taking into account +// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming +// release of grpc-go. +// +// Deprecated: use the generic AddressMapV2 type instead. +type AddressMap = AddressMapV2[any] + +// AddressMapV2 is a map of addresses to arbitrary values taking into account // Attributes. BalancerAttributes are ignored, as are Metadata and Type. // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. -type AddressMap struct { +type AddressMapV2[T any] struct { // The underlying map is keyed by an Address with fields that we don't care // about being set to their zero values. The only fields that we care about // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to @@ -41,23 +53,30 @@ type AddressMap struct { // The value type of the map contains a slice of addresses which match the key // in their `Addr` and `ServerName` fields and contain the corresponding value // associated with them. - m map[Address]addressMapEntryList + m map[Address]addressMapEntryList[T] } func toMapKey(addr *Address) Address { return Address{Addr: addr.Addr, ServerName: addr.ServerName} } -type addressMapEntryList []*addressMapEntry +type addressMapEntryList[T any] []*addressMapEntry[T] -// NewAddressMap creates a new AddressMap. +// NewAddressMap creates a new AddressMapV2[any]. +// +// Deprecated: use the generic NewAddressMapV2 constructor instead. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} + return NewAddressMapV2[any]() +} + +// NewAddressMapV2 creates a new AddressMapV2. +func NewAddressMapV2[T any]() *AddressMapV2[T] { + return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. -func (l addressMapEntryList) find(addr Address) int { +func (l addressMapEntryList[T]) find(addr Address) int { for i, entry := range l { // Attributes are the only thing to match on here, since `Addr` and // `ServerName` are already equal. @@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMapV2[T]) Set(addr Address, value T) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { entryList[entry].value = value return } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value}) } // Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { +func (a *AddressMapV2[T]) Delete(addr Address) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] entry := entryList.find(addr) @@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) { } // Len returns the number of entries in the map. -func (a *AddressMap) Len() int { +func (a *AddressMapV2[T]) Len() int { ret := 0 for _, entryList := range a.m { ret += len(entryList) @@ -116,7 +135,7 @@ func (a *AddressMap) Len() int { } // Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { +func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { @@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMapV2[T]) Values() []T { + ret := make([]T, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any +type EndpointMap[T any] struct { + endpoints map[endpointMapKey]endpointData[T] +} + +type endpointData[T any] struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value T } // NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), +func NewEndpointMap[T any]() *EndpointMap[T] { + return &EndpointMap[T]{ + endpoints: make(map[endpointMapKey]endpointData[T]), } } +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) + } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) +} + // Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) { + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return +func (em *EndpointMap[T]) Set(e Endpoint, value T) { + en := encodeEndpoint(e) + em.endpoints[en] = endpointData[T]{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { +func (em *EndpointMap[T]) Len() int { return len(em.endpoints) } @@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { +func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } // Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) +func (em *EndpointMap[T]) Values() []T { + ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } +func (em *EndpointMap[T]) Delete(e Endpoint) { + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/tools/vendor/google.golang.org/grpc/resolver/resolver.go b/tools/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511..8e6af9514 100644 --- a/tools/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/tools/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -29,6 +30,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -174,6 +176,8 @@ type BuildOptions struct { // Authority is the effective authority of the clientconn for which the // resolver is built. Authority string + // MetricsRecorder is the metrics recorder to do recording. + MetricsRecorder stats.MetricsRecorder } // An Endpoint is one network endpoint, or server, which may have multiple @@ -237,8 +241,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -328,5 +332,27 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/tools/vendor/google.golang.org/grpc/resolver_wrapper.go b/tools/vendor/google.golang.org/grpc/resolver_wrapper.go index 23bb3fb25..80e16a327 100644 --- a/tools/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/tools/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/resolver/delegatingresolver" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -76,9 +77,19 @@ func (ccr *ccResolverWrapper) start() error { CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, Authority: ccr.cc.authority, + MetricsRecorder: ccr.cc.metricsRecorderList, } var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + // The delegating resolver is used unless: + // - A custom dialer is provided via WithContextDialer dialoption or + // - Proxy usage is disabled through WithNoProxy dialoption. + // In these cases, the resolver is built based on the scheme of target, + // using the appropriate resolver builder. + if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy { + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + } else { + ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution) + } errCh <- err }) return <-errCh @@ -123,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -161,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -199,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/tools/vendor/google.golang.org/grpc/rpc_util.go b/tools/vendor/google.golang.org/grpc/rpc_util.go index aba1ae3e6..47ea09f5c 100644 --- a/tools/vendor/google.golang.org/grpc/rpc_util.go +++ b/tools/vendor/google.golang.org/grpc/rpc_util.go @@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorType string + compressorName string failFast bool maxReceiveMessageSize *int maxSendMessageSize *int @@ -160,6 +160,7 @@ type callInfo struct { codec baseCodec maxRetryRPCBufferSize int onFinish []func(err error) + authority string } func defaultCallInfo() *callInfo { @@ -222,7 +223,7 @@ type HeaderCallOption struct { func (o HeaderCallOption) before(*callInfo) error { return nil } func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.HeaderAddr, _ = attempt.s.Header() + *o.HeaderAddr, _ = attempt.transportStream.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -244,7 +245,7 @@ type TrailerCallOption struct { func (o TrailerCallOption) before(*callInfo) error { return nil } func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.TrailerAddr = attempt.s.Trailer() + *o.TrailerAddr = attempt.transportStream.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -266,7 +267,7 @@ type PeerCallOption struct { func (o PeerCallOption) before(*callInfo) error { return nil } func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { - if x, ok := peer.FromContext(attempt.s.Context()); ok { + if x, ok := peer.FromContext(attempt.transportStream.Context()); ok { *o.PeerAddr = *x } } @@ -365,6 +366,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} +// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of +// an RPC to the specified value. When using CallAuthority, the credentials in +// use must implement the AuthorityValidator interface. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func CallAuthority(authority string) CallOption { + return AuthorityOverrideCallOption{Authority: authority} +} + +// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2 +// :authority header value to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a later +// release. +type AuthorityOverrideCallOption struct { + Authority string +} + +func (o AuthorityOverrideCallOption) before(c *callInfo) error { + c.authority = o.Authority + return nil +} + +func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {} + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default // `math.MaxInt32`. @@ -435,7 +466,7 @@ type CompressorCallOption struct { } func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType + c.compressorName = o.CompressorType return nil } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} @@ -622,7 +653,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +687,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +695,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -695,9 +723,9 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(b.Len()) > math.MaxUint32 { + if bufSize := uint(b.Len()); bufSize > math.MaxUint32 { b.Free() - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize) } return b, nil } @@ -817,7 +845,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -831,30 +859,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, st.Err() } - var size int if pf.isCompressed() { defer compressed.Free() - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. - if dc != nil { - var uncompressedBuf []byte - uncompressedBuf, err = dc.Do(compressed.Reader()) - if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} - } - size = len(uncompressedBuf) - } else { - out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) - } + out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) - } - if size > maxReceiveMessageSize { - out.Free() - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, err } } else { out = compressed @@ -869,49 +880,56 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return out, nil } -// Using compressor, decompress d, returning data and size. -// Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { - dcReader, err := compressor.Decompress(d.Reader()) - if err != nil { - return nil, 0, err +// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. +// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data +// does not exceed the specified maximum size and returns an error if this limit is exceeded. +// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { + if dc != nil { + uncompressed, err := dc.Do(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if len(uncompressed) > maxReceiveMessageSize { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) + } + return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } + if compressor != nil { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) + } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) + if err != nil { + out.Free() + return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) + } - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - out.Free() - return nil, 0, err + if out.Len() > maxReceiveMessageSize { + out.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) + } + return out, nil } - return out, out.Len(), nil + return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") +} + +type recvCompressor interface { + RecvCompress() string } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/tools/vendor/google.golang.org/grpc/server.go b/tools/vendor/google.golang.org/grpc/server.go index d1e1415a4..1da2a542a 100644 --- a/tools/vendor/google.golang.org/grpc/server.go +++ b/tools/vendor/google.golang.org/grpc/server.go @@ -37,12 +37,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/mem" @@ -82,17 +84,21 @@ func init() { internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption internal.BufferPool = bufferPool + internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder { + return istats.NewMetricsRecorderList(srv.opts.statsHandlers) + } } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -173,6 +179,7 @@ type serverOptions struct { numServerWorkers uint32 bufferPool mem.BufferPool waitForHandlers bool + staticWindowSize bool } var defaultServerOptions = serverOptions{ @@ -273,6 +280,7 @@ func ReadBufferSize(s int) ServerOption { func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s + o.staticWindowSize = true }) } @@ -281,6 +289,29 @@ func InitialWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticStreamWindowSize returns a ServerOption to set the initial stream +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticStreamWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticConnWindowSize returns a ServerOption to set the initial connection +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + o.staticWindowSize = true }) } @@ -621,8 +652,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -642,7 +673,7 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + s.serverWorkerChannelClose = sync.OnceFunc(func() { close(s.serverWorkerChannel) }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { @@ -980,6 +1011,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, BufferPool: s.opts.bufferPool, + StaticWindowSize: s.opts.staticWindowSize, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1020,7 +1052,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1168,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1197,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1244,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1352,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,16 +1386,21 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } - defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1404,7 +1441,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1468,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1521,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1536,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1575,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,10 +1595,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, @@ -1643,12 +1677,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc + ss.decompressorV0 = s.opts.dc } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { + ss.decompressorV1 = encoding.GetCompressor(rc) + if ss.decompressorV1 == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1658,12 +1692,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { - ss.cp = s.opts.cp + ss.compressorV0 = s.opts.cp ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { + ss.compressorV1 = encoding.GetCompressor(rc) + if ss.compressorV1 != nil { ss.sendCompressorName = rc } } @@ -1674,7 +1708,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran } } - ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1) if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1717,7 +1751,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1769,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1802,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1817,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1839,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1862,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1925,7 +1962,7 @@ func (s *Server) stop(graceful bool) { s.conns = nil if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // Closing the channel (only once, via sync.OnceFunc) after all the // connections have been closed above ensures that there are no // goroutines executing the callback passed to st.HandleStreams (where // the channel is written to). @@ -2100,7 +2137,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2159,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/tools/vendor/google.golang.org/grpc/service_config.go b/tools/vendor/google.golang.org/grpc/service_config.go index 2671c5ef6..8d451e07c 100644 --- a/tools/vendor/google.golang.org/grpc/service_config.go +++ b/tools/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -267,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { @@ -297,7 +301,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/tools/vendor/google.golang.org/grpc/stats/handlers.go b/tools/vendor/google.golang.org/grpc/stats/handlers.go index dc03731e4..67194a592 100644 --- a/tools/vendor/google.golang.org/grpc/stats/handlers.go +++ b/tools/vendor/google.golang.org/grpc/stats/handlers.go @@ -38,6 +38,15 @@ type RPCTagInfo struct { // FailFast indicates if this RPC is failfast. // This field is only valid on client side, it's always false on server side. FailFast bool + // NameResolutionDelay indicates if the RPC needed to wait for the + // initial name resolver update before it could begin. This should only + // happen if the channel is IDLE when the RPC is started. Note that + // all retry or hedging attempts for an RPC that experienced a delay + // will have it set. + // + // This field is only valid on the client side; it is always false on + // the server side. + NameResolutionDelay bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). diff --git a/tools/vendor/google.golang.org/grpc/stats/metrics.go b/tools/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 000000000..641c8e979 --- /dev/null +++ b/tools/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/tools/vendor/google.golang.org/grpc/stats/stats.go b/tools/vendor/google.golang.org/grpc/stats/stats.go index 71195c494..10bf998aa 100644 --- a/tools/vendor/google.golang.org/grpc/stats/stats.go +++ b/tools/vendor/google.golang.org/grpc/stats/stats.go @@ -36,7 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC attempt begins. +// Begin contains stats for the start of an RPC attempt. +// +// - Server-side: Triggered after `InHeader`, as headers are processed +// before the RPC lifecycle begins. +// - Client-side: The first stats event recorded. +// // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. @@ -59,17 +64,23 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} -// InPayload contains the information for an incoming payload. +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete + +// InPayload contains stats about an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool @@ -98,7 +109,9 @@ func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} -// InHeader contains stats when a header is received. +// InHeader contains stats about header reception. +// +// - Server-side: The first stats event after the RPC request is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool @@ -123,7 +136,7 @@ func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} -// InTrailer contains stats when a trailer is received. +// InTrailer contains stats about trailer reception. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool @@ -139,7 +152,7 @@ func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} -// OutPayload contains the information for an outgoing payload. +// OutPayload contains stats about an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool @@ -166,7 +179,10 @@ func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} -// OutHeader contains stats when a header is sent. +// OutHeader contains stats about header transmission. +// +// - Client-side: Only occurs after 'Begin', as headers are always the first +// thing sent on a stream. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool @@ -189,14 +205,15 @@ func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} -// OutTrailer contains stats when a trailer is sent. +// OutTrailer contains stats about trailer transmission. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. + // Deprecated: This field is never set. The length is not known when this + // message is emitted because the trailer fields are compressed with hpack + // after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. @@ -208,7 +225,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} -// End contains stats when an RPC ends. +// End contains stats about RPC completion. type End struct { // Client is true if this End is from client side. Client bool @@ -238,7 +255,7 @@ type ConnStats interface { IsClient() bool } -// ConnBegin contains the stats of a connection when it is established. +// ConnBegin contains stats about connection establishment. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool @@ -249,7 +266,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} -// ConnEnd contains the stats of a connection when it ends. +// ConnEnd contains stats about connection termination. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool @@ -260,84 +277,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b -} - -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/tools/vendor/google.golang.org/grpc/stream.go b/tools/vendor/google.golang.org/grpc/stream.go index bb2b2a216..d9bbd4c57 100644 --- a/tools/vendor/google.golang.org/grpc/stream.go +++ b/tools/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -101,9 +101,9 @@ type ClientStream interface { // It must only be called after stream.CloseAndRecv has returned, or // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. + // CloseSend closes the send direction of the stream. This method always + // returns a nil error. The status of the stream may be discovered using + // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg. CloseSend() error // Context returns the context for this stream. // @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -210,14 +212,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { + nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) + if err != nil { return nil, err } var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) } rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} @@ -255,10 +258,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { - c := defaultCallInfo() +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { + callInfo := defaultCallInfo() if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + callInfo.failFast = !*mc.WaitForReady } // Possible context leak: @@ -279,59 +282,61 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client }() for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(callInfo); err != nil { return nil, toRPCErr(err) } } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { + callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize) + callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(callInfo); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - ContentSubtype: c.contentSubtype, + ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, + Authority: callInfo.authority, } // Set our outgoing compression according to the UseCompressor CallOption, if // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + var compressorV0 Compressor + var compressorV1 encoding.Compressor + if ct := callInfo.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { + compressorV1 = encoding.GetCompressor(ct) + if compressorV1 == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp + } else if cc.dopts.compressorV0 != nil { + callHdr.SendCompress = cc.dopts.compressorV0.Type() + compressorV0 = cc.dopts.compressorV0 } - if c.creds != nil { - callHdr.Creds = c.creds + if callInfo.creds != nil { + callHdr.Creds = callInfo.creds } cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: c, - cc: cc, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - cancel: cancel, - firstAttempt: true, - onCommit: onCommit, + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: callInfo, + cc: cc, + desc: desc, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + nameResolutionDelay: nameResolutionDelayed, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) @@ -410,12 +415,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) return nil, ErrClientConnClosing } - ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time shs := cs.cc.dopts.copts.StatsHandlers for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) beginTime = time.Now() begin := &stats.Begin{ Client: true, @@ -452,20 +457,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) } return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandlers: shs, - trInfo: trInfo, + ctx: ctx, + beginTime: beginTime, + cs: cs, + decompressorV0: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, }, nil } func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -474,7 +480,12 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) + } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } } return nil } @@ -501,7 +512,7 @@ func (a *csAttempt) newStream() error { a.ctx = metadata.NewOutgoingContext(a.ctx, md) } - s, err := a.t.NewStream(a.ctx, cs.callHdr) + s, err := a.transport.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) if !ok { @@ -516,9 +527,9 @@ func (a *csAttempt) newStream() error { // Unwrap and convert error. return toRPCErr(nse.Err) } - a.s = s + a.transportStream = s a.ctx = s.Context() - a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -530,9 +541,9 @@ type clientStream struct { cc *ClientConn desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor + codec baseCodec + compressorV0 Compressor + compressorV1 encoding.Compressor cancel context.CancelFunc // cancels all attempts @@ -571,6 +582,9 @@ type clientStream struct { onCommit func() replayBuffer []replayOp // operations to replay on retry replayBufferSize int // current size of replayBuffer + // nameResolutionDelay indicates if there was a delay in the name resolution. + // This field is only valid on client side, it's always false on server side. + nameResolutionDelay bool } type replayOp struct { @@ -581,17 +595,17 @@ type replayOp struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - pickResult balancer.PickResult - - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool + ctx context.Context + cs *clientStream + transport transport.ClientTransport + transportStream *transport.ClientStream + parser *parser + pickResult balancer.PickResult + + finished bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + decompressorSet bool mu sync.Mutex // guards trInfo.tr // trInfo may be nil (if EnableTracing is false). @@ -637,14 +651,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } - if a.s == nil && a.allowTransparentRetry { + if a.transportStream == nil && a.allowTransparentRetry { return true, nil } // Wait for the trailers. unprocessed := false - if a.s != nil { - <-a.s.Done() - unprocessed = a.s.Unprocessed() + if a.transportStream != nil { + <-a.transportStream.Done() + unprocessed = a.transportStream.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -656,14 +670,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if a.s != nil { - if !a.s.TrailersOnly() { + if a.transportStream != nil { + if !a.transportStream.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := a.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -680,8 +694,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { } var code codes.Code - if a.s != nil { - code = a.s.Status().Code() + if a.transportStream != nil { + code = a.transportStream.Status().Code() } else { code = status.Code(err) } @@ -706,11 +720,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -755,8 +768,8 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - if cs.attempt.s != nil { - return cs.attempt.s.Context() + if cs.attempt.transportStream != nil { + return cs.attempt.transportStream.Context() } return cs.ctx } @@ -793,9 +806,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) continue } if err == io.EOF { - <-a.s.Done() + <-a.transportStream.Done() } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) { onSuccess() cs.mu.Unlock() return err @@ -811,7 +824,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD err := cs.withRetry(func(a *csAttempt) error { var err error - m, err = a.s.Header() + m, err = a.transportStream.Header() return toRPCErr(err) }, cs.commitAttemptLocked) @@ -855,10 +868,10 @@ func (cs *clientStream) Trailer() metadata.MD { // directions -- it will prevent races and should not meaningfully impact // performance. cs.commitAttempt() - if cs.attempt.s == nil { + if cs.attempt.transportStream == nil { return nil } - return cs.attempt.s.Trailer() + return cs.attempt.transportStream.Trailer() } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { @@ -903,7 +916,7 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool) if err != nil { return err } @@ -986,12 +999,12 @@ func (cs *clientStream) RecvMsg(m any) error { func (cs *clientStream) CloseSend() error { if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1007,7 +1020,10 @@ func (cs *clientStream) CloseSend() error { binlog.Log(cs.ctx, chc) } } - // We never returned an error here for reasons. + // We don't return an error here as we expect users to read all messages + // from the stream and get the RPC status from RecvMsg(). Note that + // SendMsg() must return an error when one occurs so the application + // knows to stop sending messages, but that does not apply here. return nil } @@ -1029,7 +1045,7 @@ func (cs *clientStream) finish(err error) { if cs.attempt != nil { cs.attempt.finish(err) // after functions all rely upon having a stream. - if cs.attempt.s != nil { + if cs.attempt.transportStream != nil { for _, o := range cs.opts { o.after(cs.callInfo, cs.attempt) } @@ -1083,7 +1099,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1113,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1110,25 +1123,25 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { defer payInfo.free() } - if !a.decompSet { + if !a.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { + if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.decompressorV0 == nil || a.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) + a.decompressorV0 = nil + a.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - a.dc = nil + a.decompressorV0 = nil } // Only initialize this state once per stream. - a.decompSet = true + a.decompressorSet = true } - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { + if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1153,21 +1166,18 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1182,20 +1192,20 @@ func (a *csAttempt) finish(err error) { err = nil } var tr metadata.MD - if a.s != nil { - a.t.CloseStream(a.s, err) - tr = a.s.Trailer() + if a.transportStream != nil { + a.transportStream.Close(err) + tr = a.transportStream.Trailer() } if a.pickResult.Done != nil { br := false - if a.s != nil { - br = a.s.BytesReceived() + if a.transportStream != nil { + br = a.transportStream.BytesReceived() } a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, - BytesSent: a.s != nil, + BytesSent: a.transportStream != nil, BytesReceived: br, ServerLoad: balancerload.Parse(tr), }) @@ -1277,7 +1287,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // if set. var cp Compressor var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + if ct := c.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) @@ -1285,9 +1295,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp + } else if ac.cc.dopts.compressorV0 != nil { + callHdr.SendCompress = ac.cc.dopts.compressorV0.Type() + cp = ac.cc.dopts.compressorV0 } if c.creds != nil { callHdr.Creds = c.creds @@ -1295,26 +1305,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // Use a special addrConnStream to avoid retry. as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - s, err := as.t.NewStream(as.ctx, as.callHdr) + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + sendCompressorV0: cp, + sendCompressorV1: comp, + transport: t, + } + + s, err := as.transport.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } - as.s = s - as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.transportStream = s + as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1340,29 +1350,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool + transportStream *transport.ClientStream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + transport transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + sendCompressorV0 Compressor + sendCompressorV1 encoding.Compressor + decompressorSet bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + parser *parser + + // mu guards finished and is held for the entire finish method. + mu sync.Mutex + finished bool } func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() + m, err := as.transportStream.Header() if err != nil { as.finish(toRPCErr(err)) } @@ -1370,17 +1382,17 @@ func (as *addrConnStream) Header() (metadata.MD, error) { } func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() + return as.transportStream.Trailer() } func (as *addrConnStream) CloseSend() error { if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1389,7 +1401,7 @@ func (as *addrConnStream) CloseSend() error { } func (as *addrConnStream) Context() context.Context { - return as.s.Context() + return as.transportStream.Context() } func (as *addrConnStream) SendMsg(m any) (err error) { @@ -1411,7 +1423,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool) if err != nil { return err } @@ -1430,7 +1442,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1452,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1454,25 +1463,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } }() - if !as.decompSet { + if !as.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { + if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.decompressorV0 == nil || as.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) + as.decompressorV0 = nil + as.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - as.dc = nil + as.decompressorV0 = nil } // Only initialize this state once per stream. - as.decompSet = true + as.decompressorSet = true } - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { + if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1480,9 +1489,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1490,12 +1496,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1509,8 +1515,8 @@ func (as *addrConnStream) finish(err error) { // Ending a stream with EOF indicates a success. err = nil } - if as.s != nil { - as.t.CloseStream(as.s, err) + if as.transportStream != nil { + as.transportStream.Close(err) } if err != nil { @@ -1577,18 +1583,20 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor + compressorV0 Compressor + compressorV1 encoding.Compressor + decompressorV0 Decompressor + decompressorV1 encoding.Compressor sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1628,7 +1636,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1676,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,20 +1684,17 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.compressorV1 = encoding.GetCompressor(sendCompressorsName) ss.sendCompressorName = sendCompressorsName } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool) if err != nil { return err } @@ -1710,7 +1715,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1761,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,16 +1769,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1781,13 +1783,18 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1807,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/tools/vendor/google.golang.org/grpc/version.go b/tools/vendor/google.golang.org/grpc/version.go index 5a47094ae..468f11065 100644 --- a/tools/vendor/google.golang.org/grpc/version.go +++ b/tools/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.1" +const Version = "1.75.1" diff --git a/tools/vendor/helm.sh/helm/v3/internal/version/version.go b/tools/vendor/helm.sh/helm/v3/internal/version/version.go index 539659f5e..6f6f319b0 100644 --- a/tools/vendor/helm.sh/helm/v3/internal/version/version.go +++ b/tools/vendor/helm.sh/helm/v3/internal/version/version.go @@ -29,7 +29,7 @@ var ( // // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. - version = "v3.16" + version = "v3.18" // metadata is extra build time data metadata = "" diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/action.go b/tools/vendor/helm.sh/helm/v3/pkg/action/action.go index fe91de048..9aaf64ca4 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/action.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/action.go @@ -19,6 +19,7 @@ package action import ( "bytes" "fmt" + "io" "os" "path" "path/filepath" @@ -95,6 +96,9 @@ type Configuration struct { Capabilities *chartutil.Capabilities Log func(string, ...interface{}) + + // HookOutputFunc called with container name and returns and expects writer that will receive the log output. + HookOutputFunc func(namespace, pod, container string) io.Writer } // renderResources renders the templates in a chart @@ -122,7 +126,7 @@ func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Valu var err2 error // A `helm template` should not talk to the remote cluster. However, commands with the flag - //`--dry-run` with the value of `false`, `none`, or `server` should try to interact with the cluster. + // `--dry-run` with the value of `false`, `none`, or `server` should try to interact with the cluster. // It may break in interesting and exotic ways because other data (e.g. discovery) is mocked. if interactWithRemote && cfg.RESTClientGetter != nil { restConfig, err := cfg.RESTClientGetter.ToRESTConfig() @@ -422,6 +426,12 @@ func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namesp cfg.KubeClient = kc cfg.Releases = store cfg.Log = log + cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard } return nil } + +// SetHookOutputFunc sets the HookOutputFunc on the Configuration. +func (cfg *Configuration) SetHookOutputFunc(hookOutputFunc func(_, _, _ string) io.Writer) { + cfg.HookOutputFunc = hookOutputFunc +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/dependency.go b/tools/vendor/helm.sh/helm/v3/pkg/action/dependency.go index 3265f1f17..19305fee8 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/dependency.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/dependency.go @@ -34,10 +34,17 @@ import ( // // It provides the implementation of 'helm dependency' and its respective subcommands. type Dependency struct { - Verify bool - Keyring string - SkipRefresh bool - ColumnWidth uint + Verify bool + Keyring string + SkipRefresh bool + ColumnWidth uint + Username string + Password string + CertFile string + KeyFile string + CaFile string + InsecureSkipTLSverify bool + PlainHTTP bool } // NewDependency creates a new Dependency object with the given configuration. diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go b/tools/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go index ec096ae16..f79788c3b 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go @@ -16,7 +16,13 @@ limitations under the License. package action -import "time" +import ( + "sort" + "strings" + "time" + + "helm.sh/helm/v3/pkg/chart" +) // GetMetadata is the action for checking a given release's metadata. // @@ -28,14 +34,16 @@ type GetMetadata struct { } type Metadata struct { - Name string `json:"name" yaml:"name"` - Chart string `json:"chart" yaml:"chart"` - Version string `json:"version" yaml:"version"` - AppVersion string `json:"appVersion" yaml:"appVersion"` - Namespace string `json:"namespace" yaml:"namespace"` - Revision int `json:"revision" yaml:"revision"` - Status string `json:"status" yaml:"status"` - DeployedAt string `json:"deployedAt" yaml:"deployedAt"` + Name string `json:"name" yaml:"name"` + Chart string `json:"chart" yaml:"chart"` + Version string `json:"version" yaml:"version"` + AppVersion string `json:"appVersion" yaml:"appVersion"` + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + Dependencies []*chart.Dependency `json:"dependencies,omitempty" yaml:"dependencies,omitempty"` + Namespace string `json:"namespace" yaml:"namespace"` + Revision int `json:"revision" yaml:"revision"` + Status string `json:"status" yaml:"status"` + DeployedAt string `json:"deployedAt" yaml:"deployedAt"` } // NewGetMetadata creates a new GetMetadata object with the given configuration. @@ -57,13 +65,26 @@ func (g *GetMetadata) Run(name string) (*Metadata, error) { } return &Metadata{ - Name: rel.Name, - Chart: rel.Chart.Metadata.Name, - Version: rel.Chart.Metadata.Version, - AppVersion: rel.Chart.Metadata.AppVersion, - Namespace: rel.Namespace, - Revision: rel.Version, - Status: rel.Info.Status.String(), - DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339), + Name: rel.Name, + Chart: rel.Chart.Metadata.Name, + Version: rel.Chart.Metadata.Version, + AppVersion: rel.Chart.Metadata.AppVersion, + Dependencies: rel.Chart.Metadata.Dependencies, + Annotations: rel.Chart.Metadata.Annotations, + Namespace: rel.Namespace, + Revision: rel.Version, + Status: rel.Info.Status.String(), + DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339), }, nil } + +// FormattedDepNames formats metadata.dependencies names into a comma-separated list. +func (m *Metadata) FormattedDepNames() string { + depsNames := make([]string, 0, len(m.Dependencies)) + for _, dep := range m.Dependencies { + depsNames = append(depsNames, dep.Name) + } + sort.StringSlice(depsNames).Sort() + + return strings.Join(depsNames, ",") +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go index 4bffb6ae0..16cc13bdd 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/hooks.go @@ -17,10 +17,16 @@ package action import ( "bytes" + "fmt" + "log" + "slices" "sort" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/pkg/errors" + "gopkg.in/yaml.v3" "helm.sh/helm/v3/pkg/kube" "helm.sh/helm/v3/pkg/release" @@ -44,7 +50,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, for _, h := range executingHooks { // Set default delete policy to before-hook-creation - if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { + if len(h.DeletePolicies) == 0 { // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion // resources. For all other resource types update in place if a // resource with the same name already exists and is owned by the @@ -87,10 +93,16 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Mark hook as succeeded or failed if err != nil { h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side + if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error outputting logs for hook failure: %v", errOutputting) + } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if err := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); err != nil { - return err + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } return err } @@ -98,9 +110,13 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // under succeeded condition. If so, then clear the corresponding resource object in each hook + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook for i := len(executingHooks) - 1; i >= 0; i-- { h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { return err } @@ -138,7 +154,7 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return errors.New(joinErrors(errs)) } - //wait for resources until they are deleted to avoid conflicts + // wait for resources until they are deleted to avoid conflicts if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { if err := kubeClient.WaitForDelete(resources, timeout); err != nil { return err @@ -158,3 +174,57 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool } return false } + +// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to +func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error { + if !hookHasOutputLogPolicy(h, policy) { + return nil + } + namespace, err := cfg.deriveNamespace(h, releaseNamespace) + if err != nil { + return err + } + switch h.Kind { + case "Job": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) + case "Pod": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) + default: + return nil + } +} + +func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error { + // TODO Helm 4: Remove this check when GetPodList and OutputContainerLogsForPodList are moved from InterfaceLogs to Interface + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceLogs); ok { + podList, err := kubeClient.GetPodList(namespace, listOptions) + if err != nil { + return err + } + err = kubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc) + return err + } + return nil +} + +func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) { + tmp := struct { + Metadata struct { + Namespace string + } + }{} + err := yaml.Unmarshal([]byte(h.Manifest), &tmp) + if err != nil { + return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path) + } + if tmp.Metadata.Namespace == "" { + return namespace, nil + } + return tmp.Metadata.Namespace, nil +} + +// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies +// supported by helm. +func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { + return slices.Contains(h.OutputLogPolicies, policy) +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/install.go b/tools/vendor/helm.sh/helm/v3/pkg/action/install.go index 7ca40c88a..7bdfc2ab5 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/install.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -455,7 +455,11 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource if len(toBeAdopted) == 0 && len(resources) > 0 { _, err = i.cfg.KubeClient.Create(resources) } else if len(resources) > 0 { - _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.Force) + if i.TakeOwnership { + _, err = i.cfg.KubeClient.(kube.InterfaceThreeWayMerge).UpdateThreeWayMerge(toBeAdopted, resources, i.Force) + } else { + _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.Force) + } } if err != nil { return rel, err @@ -770,6 +774,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), getter.WithPlainHTTP(c.PlainHTTP), + getter.WithBasicAuth(c.Username, c.Password), }, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/package.go b/tools/vendor/helm.sh/helm/v3/pkg/action/package.go index 013b32f55..2357e3882 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/package.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/package.go @@ -44,8 +44,15 @@ type Package struct { Destination string DependencyUpdate bool - RepositoryConfig string - RepositoryCache string + RepositoryConfig string + RepositoryCache string + PlainHTTP bool + Username string + Password string + CertFile string + KeyFile string + CaFile string + InsecureSkipTLSverify bool } // NewPackage creates a new Package object with the given configuration. diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/push.go b/tools/vendor/helm.sh/helm/v3/pkg/action/push.go index 68d2ba42d..70e5c1582 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/push.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/push.go @@ -73,7 +73,7 @@ func WithPlainHTTP(plainHTTP bool) PushOpt { } } -// WithOptWriter sets the registryOut field on the push configuration object. +// WithPushOptWriter sets the registryOut field on the push configuration object. func WithPushOptWriter(out io.Writer) PushOpt { return func(p *Push) { p.out = out diff --git a/tools/vendor/helm.sh/helm/v3/pkg/action/registry_login.go b/tools/vendor/helm.sh/helm/v3/pkg/action/registry_login.go index cd144e1e7..b4e038123 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/action/registry_login.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/action/registry_login.go @@ -24,11 +24,12 @@ import ( // RegistryLogin performs a registry login operation. type RegistryLogin struct { - cfg *Configuration - certFile string - keyFile string - caFile string - insecure bool + cfg *Configuration + certFile string + keyFile string + caFile string + insecure bool + plainHTTP bool } type RegistryLoginOpt func(*RegistryLogin) error @@ -41,7 +42,7 @@ func WithCertFile(certFile string) RegistryLoginOpt { } } -// WithKeyFile specifies whether to very certificates when communicating. +// WithInsecure specifies whether to verify certificates. func WithInsecure(insecure bool) RegistryLoginOpt { return func(r *RegistryLogin) error { r.insecure = insecure @@ -65,6 +66,14 @@ func WithCAFile(caFile string) RegistryLoginOpt { } } +// WithPlainHTTPLogin use http rather than https for login. +func WithPlainHTTPLogin(isPlain bool) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.plainHTTP = isPlain + return nil + } +} + // NewRegistryLogin creates a new RegistryLogin object with the given configuration. func NewRegistryLogin(cfg *Configuration) *RegistryLogin { return &RegistryLogin{ @@ -84,5 +93,7 @@ func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, passw hostname, registry.LoginOptBasicAuth(username, password), registry.LoginOptInsecure(a.insecure), - registry.LoginOptTLSClientConfig(a.certFile, a.keyFile, a.caFile)) + registry.LoginOptTLSClientConfig(a.certFile, a.keyFile, a.caFile), + registry.LoginOptPlainText(a.plainHTTP), + ) } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/tools/vendor/helm.sh/helm/v3/pkg/chart/dependency.go index 4ef5eeb32..eda0f5a89 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chart/dependency.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chart/dependency.go @@ -25,28 +25,28 @@ type Dependency struct { // Name is the name of the dependency. // // This must mach the name in the dependency's Chart.yaml. - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Version is the version (range) of this chart. // // A lock file will always produce a single version, while a dependency // may contain a semantic version range. - Version string `json:"version,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` // The URL to the repository. // // Appending `index.yaml` to this string should result in a URL that can be // used to fetch the repository index. - Repository string `json:"repository"` + Repository string `json:"repository" yaml:"repository"` // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled ) - Condition string `json:"condition,omitempty"` + Condition string `json:"condition,omitempty" yaml:"condition,omitempty"` // Tags can be used to group charts for enabling/disabling together - Tags []string `json:"tags,omitempty"` + Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"` // Enabled bool determines if chart should be loaded - Enabled bool `json:"enabled,omitempty"` + Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a // string or pair of child/parent sublist items. - ImportValues []interface{} `json:"import-values,omitempty"` + ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"` // Alias usable alias to be used for the chart - Alias string `json:"alias,omitempty"` + Alias string `json:"alias,omitempty" yaml:"alias,omitempty"` } // Validate checks for common problems with the dependency datastructure in diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go index 8bb549346..6272a564f 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go @@ -33,6 +33,15 @@ import ( "helm.sh/helm/v3/pkg/chart" ) +// MaxDecompressedChartSize is the maximum size of a chart archive that will be +// decompressed. This is the decompressed size of all the files. +// The default value is 100 MiB. +var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB + +// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load. +// The size of the file is the decompressed version of it when it is stored in an archive. +var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB + var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) // FileLoader loads a chart from a file @@ -119,6 +128,7 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { files := []*BufferedFile{} tr := tar.NewReader(unzipped) + remainingSize := MaxDecompressedChartSize for { b := bytes.NewBuffer(nil) hd, err := tr.Next() @@ -178,10 +188,30 @@ func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { return nil, errors.New("chart yaml not in base directory") } - if _, err := io.Copy(b, tr); err != nil { + if hd.Size > remainingSize { + return nil, fmt.Errorf("decompressed chart is larger than the maximum file size %d", MaxDecompressedChartSize) + } + + if hd.Size > MaxDecompressedFileSize { + return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize) + } + + limitedReader := io.LimitReader(tr, remainingSize) + + bytesWritten, err := io.Copy(b, limitedReader) + if err != nil { return nil, err } + remainingSize -= bytesWritten + // When the bytesWritten are less than the file size it means the limit reader ended + // copying early. Here we report that error. This is important if the last file extracted + // is the one that goes over the limit. It assumes the Size stored in the tar header + // is correct, something many applications do. + if bytesWritten < hd.Size || remainingSize <= 0 { + return nil, fmt.Errorf("decompressed chart is larger than the maximum file size %d", MaxDecompressedChartSize) + } + data := bytes.TrimPrefix(b.Bytes(), utf8bom) files = append(files, &BufferedFile{Name: n, Data: data}) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go index 9bcbee60c..fd8e02e1a 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go @@ -101,6 +101,10 @@ func LoadDir(dir string) (*chart.Chart, error) { return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) } + if fi.Size() > MaxDecompressedFileSize { + return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), MaxDecompressedFileSize) + } + data, err := os.ReadFile(name) if err != nil { return errors.Wrapf(err, "error reading %s", n) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go index f59c35a5e..a68a05aa9 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go @@ -174,7 +174,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { case filepath.Ext(n) == ".tgz": file := files[0] if file.Name != n { - return c, errors.Errorf("error unpacking tar in %s: expected %s, got %s", c.Name(), n, file.Name) + return c, errors.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name) } // Untar the chart and add to c.Dependencies sc, err = LoadArchive(bytes.NewBuffer(file.Data)) @@ -194,7 +194,7 @@ func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { } if err != nil { - return c, errors.Wrapf(err, "error unpacking %s in %s", n, c.Name()) + return c, errors.Wrapf(err, "error unpacking subchart %s in %s", n, c.Name()) } c.AddDependency(sc) } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go index f0272fd6a..40bce2a68 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -237,6 +237,9 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) } } else { + // If the key is a child chart, coalesce tables with Merge set to true + merge := childChartMergeTrue(c, key, merge) + // Because v has higher precedence than nv, dest values override src // values. coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) @@ -249,6 +252,15 @@ func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, pr } } +func childChartMergeTrue(chrt *chart.Chart, key string, merge bool) bool { + for _, subchart := range chrt.Dependencies() { + if subchart.Name() == key { + return true + } + } + return merge +} + // CoalesceTables merges a source map into a destination map. // // dest is considered authoritative. diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go index e9769932f..321d3d2c0 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -117,13 +117,13 @@ image: # Overrides the image tag whose default is the chart appVersion. tag: "" -# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # This is to override the chart name. nameOverride: "" fullnameOverride: "" -#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ +# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ serviceAccount: # Specifies whether a service account should be created create: true @@ -136,7 +136,7 @@ serviceAccount: name: "" # This is for setting Kubernetes Annotations to a Pod. -# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ podAnnotations: {} # This is for setting Kubernetes Labels to a Pod. # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ @@ -199,7 +199,7 @@ readinessProbe: path: / port: http -#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ autoscaling: enabled: false minReplicas: 1 @@ -327,24 +327,34 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include ".serviceAccountName" . }} + {{- with .Values.podSecurityContext }} securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: {{ .Chart.Name }} + {{- with .Values.securityContext }} securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} + {{- toYaml . | nindent 12 }} + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: {{ .Values.service.port }} protocol: TCP + {{- with .Values.livenessProbe }} livenessProbe: - {{- toYaml .Values.livenessProbe | nindent 12 }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} readinessProbe: - {{- toYaml .Values.readinessProbe | nindent 12 }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} resources: - {{- toYaml .Values.resources | nindent 12 }} + {{- toYaml . | nindent 12 }} + {{- end }} {{- with .Values.volumeMounts }} volumeMounts: {{- toYaml . | nindent 12 }} @@ -620,6 +630,10 @@ func Create(name, dir string) (string, error) { return cdir, errors.Errorf("file %s already exists and is not a directory", cdir) } + // Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and + // ingress below); or making an existing template disabled by default, add the enabling condition in + // `TestHelmCreateChart_CheckDeprecatedWarnings` in `pkg/lint/lint_test.go` to make it run through deprecation checks + // with latest Kubernetes version. files := []struct { path string content []byte diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go index 36a341927..37452cec7 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -16,6 +16,7 @@ limitations under the License. package chartutil import ( + "fmt" "log" "strings" @@ -255,8 +256,8 @@ func processImportValues(c *chart.Chart, merge bool) error { for _, riv := range r.ImportValues { switch iv := riv.(type) { case map[string]interface{}: - child := iv["child"].(string) - parent := iv["parent"].(string) + child := fmt.Sprintf("%v", iv["child"]) + parent := fmt.Sprintf("%v", iv["parent"]) outiv = append(outiv, map[string]string{ "child": child, diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go index 7ae1ae6fa..ac59f2575 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go @@ -52,6 +52,9 @@ func Expand(dir string, r io.Reader) error { } // Find the base directory + // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up + // being wrong or returning an error. This was introduced in v0.4.0. + dir = filepath.Clean(dir) chartdir, err := securejoin.SecureJoin(dir, chartName) if err != nil { return err diff --git a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go index 7b9768fd3..d712316c5 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -18,20 +18,62 @@ package chartutil import ( "bytes" + "crypto/tls" + "errors" "fmt" "strings" + "time" - "github.com/pkg/errors" - "github.com/xeipuuv/gojsonschema" - "sigs.k8s.io/yaml" + "github.com/santhosh-tekuri/jsonschema/v6" + "net/http" + + "helm.sh/helm/v3/internal/version" "helm.sh/helm/v3/pkg/chart" ) +// HTTPURLLoader implements a loader for HTTP/HTTPS URLs +type HTTPURLLoader http.Client + +func (l *HTTPURLLoader) Load(urlStr string) (any, error) { + client := (*http.Client)(l) + + req, err := http.NewRequest(http.MethodGet, urlStr, nil) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request for %s: %w", urlStr, err) + } + req.Header.Set("User-Agent", version.GetUserAgent()) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP request failed for %s: %w", urlStr, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP request to %s returned status %d (%s)", urlStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return jsonschema.UnmarshalJSON(resp.Body) +} + +// newHTTPURLLoader creates a HTTP URL loader with proxy support. +func newHTTPURLLoader() *HTTPURLLoader { + httpLoader := HTTPURLLoader(http.Client{ + Timeout: 15 * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{}, + }, + }) + return &httpLoader +} + // ValidateAgainstSchema checks that values does not violate the structure laid out in schema func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { var sb strings.Builder if chrt.Schema != nil { + err := ValidateAgainstSingleSchema(values, chrt.Schema) if err != nil { sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) @@ -39,7 +81,6 @@ func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) err } } - // For each dependency, recursively call this function with the coalesced values for _, subchart := range chrt.Dependencies() { subchartValues := values[subchart.Name()].(map[string]interface{}) if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { @@ -62,32 +103,48 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error } }() - valuesData, err := yaml.Marshal(values) + // This unmarshal function leverages UseNumber() for number precision. The parser + // used for values does this as well. + schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON)) if err != nil { return err } - valuesJSON, err := yaml.YAMLToJSON(valuesData) + + // Configure compiler with loaders for different URL schemes + loader := jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "http": newHTTPURLLoader(), + "https": newHTTPURLLoader(), + } + + compiler := jsonschema.NewCompiler() + compiler.UseLoader(loader) + err = compiler.AddResource("file:///values.schema.json", schema) if err != nil { return err } - if bytes.Equal(valuesJSON, []byte("null")) { - valuesJSON = []byte("{}") - } - schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) - valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) - result, err := gojsonschema.Validate(schemaLoader, valuesLoader) + validator, err := compiler.Compile("file:///values.schema.json") if err != nil { return err } - if !result.Valid() { - var sb strings.Builder - for _, desc := range result.Errors() { - sb.WriteString(fmt.Sprintf("- %s\n", desc)) - } - return errors.New(sb.String()) + err = validator.Validate(values.AsMap()) + if err != nil { + return JSONSchemaValidationError{err} } return nil } + +type JSONSchemaValidationError struct { + embeddedErr error +} + +func (e JSONSchemaValidationError) Error() string { + errStr := e.embeddedErr.Error() + + errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n") + + return errStr + "\n" +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go index 0f28c61fd..635806344 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/cli/environment.go @@ -36,6 +36,7 @@ import ( "helm.sh/helm/v3/internal/version" "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/kube" ) // defaultMaxHistory sets the maximum number of releases to 0: unlimited @@ -127,7 +128,7 @@ func New() *EnvSettings { config.Burst = env.BurstLimit config.QPS = env.QPS config.Wrap(func(rt http.RoundTripper) http.RoundTripper { - return &retryingRoundTripper{wrapped: rt} + return &kube.RetryingRoundTripper{Wrapped: rt} }) config.UserAgent = version.GetUserAgent() return config diff --git a/tools/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/tools/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go index a95894e00..9e8e243b8 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -23,7 +23,6 @@ import ( "path/filepath" "strings" - "github.com/Masterminds/semver/v3" "github.com/pkg/errors" "helm.sh/helm/v3/internal/fileutil" @@ -97,6 +96,8 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven return "", nil, err } + c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream")) + data, err := g.Get(u.String(), c.Options...) if err != nil { return "", nil, err @@ -141,39 +142,6 @@ func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *proven return destfile, ver, nil } -func (c *ChartDownloader) getOciURI(ref, version string, u *url.URL) (*url.URL, error) { - var tag string - var err error - - // Evaluate whether an explicit version has been provided. Otherwise, determine version to use - _, errSemVer := semver.NewVersion(version) - if errSemVer == nil { - tag = version - } else { - // Retrieve list of repository tags - tags, err := c.RegistryClient.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", registry.OCIScheme))) - if err != nil { - return nil, err - } - if len(tags) == 0 { - return nil, errors.Errorf("Unable to locate any tags in provided repository: %s", ref) - } - - // Determine if version provided - // If empty, try to get the highest available tag - // If exact version, try to find it - // If semver constraint string, try to find a match - tag, err = registry.GetTagMatchingVersionOrConstraint(tags, version) - if err != nil { - return nil, err - } - } - - u.Path = fmt.Sprintf("%s:%s", u.Path, tag) - - return u, err -} - // ResolveChartVersion resolves a chart reference to a URL. // // It returns the URL and sets the ChartDownloader's Options that can fetch @@ -196,7 +164,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er } if registry.IsOCI(u.String()) { - return c.getOciURI(ref, version, u) + return c.RegistryClient.ValidateReference(ref, version, u) } rf, err := loadRepoConfig(c.RepositoryConfig) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go index ec4056d27..cc7850aae 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -852,6 +852,20 @@ func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error { lockfileName = "requirements.lock" } dest := filepath.Join(chartpath, lockfileName) + + info, err := os.Lstat(dest) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error getting info for %q: %w", dest, err) + } else if err == nil { + if info.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(dest) + if err != nil { + return fmt.Errorf("error reading symlink for %q: %w", dest, err) + } + return fmt.Errorf("the %s file is a symlink to %q", lockfileName, link) + } + } + return os.WriteFile(dest, data, 0644) } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go index df3a600a3..d8ee313e1 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -206,7 +206,7 @@ func (e Engine) initFunMap(t *template.Template) { log.Printf("[INFO] Missing required value: %s", warn) return "", nil } - return val, errors.Errorf(warnWrap(warn)) + return val, errors.New(warnWrap(warn)) } else if _, ok := val.(string); ok { if val == "" { if e.LintMode { @@ -214,7 +214,7 @@ func (e Engine) initFunMap(t *template.Template) { log.Printf("[INFO] Missing required value: %s", warn) return "", nil } - return val, errors.Errorf(warnWrap(warn)) + return val, errors.New(warnWrap(warn)) } } return val, nil diff --git a/tools/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/tools/vendor/helm.sh/helm/v3/pkg/engine/funcs.go index 8f05a3a1d..d03a818c2 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/engine/funcs.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/engine/funcs.go @@ -25,6 +25,7 @@ import ( "github.com/BurntSushi/toml" "github.com/Masterminds/sprig/v3" "sigs.k8s.io/yaml" + goYaml "sigs.k8s.io/yaml/goyaml.v3" ) // funcMap returns a mapping of all of the functions that Engine has. @@ -48,7 +49,9 @@ func funcMap() template.FuncMap { // Add some extra functionality extra := template.FuncMap{ "toToml": toTOML, + "fromToml": fromTOML, "toYaml": toYAML, + "toYamlPretty": toYAMLPretty, "fromYaml": fromYAML, "fromYamlArray": fromYAMLArray, "toJson": toJSON, @@ -88,6 +91,19 @@ func toYAML(v interface{}) string { return strings.TrimSuffix(string(data), "\n") } +func toYAMLPretty(v interface{}) string { + var data bytes.Buffer + encoder := goYaml.NewEncoder(&data) + encoder.SetIndent(2) + err := encoder.Encode(v) + + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(data.String(), "\n") +} + // fromYAML converts a YAML document into a map[string]interface{}. // // This is not a general-purpose YAML parser, and will not parse all valid @@ -132,6 +148,21 @@ func toTOML(v interface{}) string { return b.String() } +// fromTOML converts a TOML document into a map[string]interface{}. +// +// This is not a general-purpose TOML parser, and will not parse all valid +// TOML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromTOML(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := toml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + // toJSON takes an interface, marshals it to json, and returns a string. It will // always return a string, even on marshal error (empty string). // diff --git a/tools/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/tools/vendor/helm.sh/helm/v3/pkg/getter/getter.go index 45ab4da7e..1acb2093d 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -38,6 +38,7 @@ type options struct { unTar bool insecureSkipVerifyTLS bool plainHTTP bool + acceptHeader string username string password string passCredentialsAll bool @@ -60,6 +61,13 @@ func WithURL(url string) Option { } } +// WithAcceptHeader sets the request's Accept header as some REST APIs serve multiple content types +func WithAcceptHeader(header string) Option { + return func(opts *options) { + opts.acceptHeader = header + } +} + // WithBasicAuth sets the request's Authorization header to use the provided credentials func WithBasicAuth(username, password string) Option { return func(opts *options) { diff --git a/tools/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/tools/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go index b53e558e3..df3dcd910 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -53,6 +53,10 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { return nil, err } + if g.opts.acceptHeader != "" { + req.Header.Set("Accept", g.opts.acceptHeader) + } + req.Header.Set("User-Agent", version.GetUserAgent()) if g.opts.userAgent != "" { req.Header.Set("User-Agent", g.opts.userAgent) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/tools/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go index 0547cdcbb..5b0522395 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "net/http" + "path" "strings" "sync" "time" @@ -58,6 +59,9 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)) + if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") { + ref = fmt.Sprintf("%s:%s", ref, version) + } var pullOpts []registry.PullOption requestingProv := strings.HasSuffix(ref, ".prov") if requestingProv { diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go index d979fd22c..6e7142119 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -46,6 +46,8 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/mergepatch" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" @@ -55,6 +57,7 @@ import ( "k8s.io/client-go/rest" cachetools "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" cmdutil "k8s.io/kubectl/pkg/cmd/util" ) @@ -82,26 +85,25 @@ type Client struct { // Namespace allows to bypass the kubeconfig file for the choice of the namespace Namespace string - kubeClient *kubernetes.Clientset + kubeClient kubernetes.Interface } -var addToScheme sync.Once +func init() { + // Add CRDs to the scheme. They are missing by default. + if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { + // This should never happen. + panic(err) + } + if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } +} // New creates a new Client. func New(getter genericclioptions.RESTClientGetter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) } - // Add CRDs to the scheme. They are missing by default. - addToScheme.Do(func() { - if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { - // This should never happen. - panic(err) - } - if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil { - panic(err) - } - }) return &Client{ Factory: cmdutil.NewFactory(getter), Log: nopLogger, @@ -111,7 +113,7 @@ func New(getter genericclioptions.RESTClientGetter) *Client { var nopLogger = func(_ string, _ ...interface{}) {} // getKubeClient get or create a new KubernetesClientSet -func (c *Client) getKubeClient() (*kubernetes.Clientset, error) { +func (c *Client) getKubeClient() (kubernetes.Interface, error) { var err error if c.kubeClient == nil { c.kubeClient, err = c.Factory.KubernetesClientSet() @@ -131,7 +133,7 @@ func (c *Client) IsReachable() error { if err != nil { return errors.Wrap(err, "Kubernetes cluster unreachable") } - if _, err := client.ServerVersion(); err != nil { + if _, err := client.Discovery().ServerVersion(); err != nil { return errors.Wrap(err, "Kubernetes cluster unreachable") } return nil @@ -379,14 +381,7 @@ func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, erro return result, scrubValidationError(err) } -// Update takes the current list of objects and target list of objects and -// creates resources that don't already exist, updates resources that have been -// modified in the target configuration, and deletes resources from the current -// configuration that are not present in the target configuration. If an error -// occurs, a Result will still be returned with the error, containing all -// resource updates, creations, and deletions that were attempted. These can be -// used for cleanup or other logging purposes. -func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) { +func (c *Client) update(original, target ResourceList, force, threeWayMerge bool) (*Result, error) { updateErrors := []string{} res := &Result{} @@ -421,7 +416,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err return errors.Errorf("no %s with the name %q found", kind, info.Name) } - if err := updateResource(c, info, originalInfo.Object, force); err != nil { + if err := updateResource(c, info, originalInfo.Object, force, threeWayMerge); err != nil { c.Log("error updating the resource %q:\n\t %v", info.Name, err) updateErrors = append(updateErrors, err.Error()) } @@ -435,7 +430,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err case err != nil: return res, err case len(updateErrors) != 0: - return res, errors.Errorf(strings.Join(updateErrors, " && ")) + return res, errors.New(strings.Join(updateErrors, " && ")) } for _, info := range original.Difference(target) { @@ -462,6 +457,31 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err return res, nil } +// Update takes the current list of objects and target list of objects and +// creates resources that don't already exist, updates resources that have been +// modified in the target configuration, and deletes resources from the current +// configuration that are not present in the target configuration. If an error +// occurs, a Result will still be returned with the error, containing all +// resource updates, creations, and deletions that were attempted. These can be +// used for cleanup or other logging purposes. +// +// The difference to Update is that UpdateThreeWayMerge does a three-way-merge +// for unstructured objects. +func (c *Client) UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) { + return c.update(original, target, force, true) +} + +// Update takes the current list of objects and target list of objects and +// creates resources that don't already exist, updates resources that have been +// modified in the target configuration, and deletes resources from the current +// configuration that are not present in the target configuration. If an error +// occurs, a Result will still be returned with the error, containing all +// resource updates, creations, and deletions that were attempted. These can be +// used for cleanup or other logging purposes. +func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) { + return c.update(original, target, force, false) +} + // Delete deletes Kubernetes resources specified in the resources list with // background cascade deletion. It will attempt to delete all resources even // if one or more fail and collect any errors. All successfully deleted items @@ -596,20 +616,28 @@ func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- } func createResource(info *resource.Info) error { - obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object) - if err != nil { - return err - } - return info.Refresh(obj, true) + return retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object) + if err != nil { + return err + } + return info.Refresh(obj, true) + }) } func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) error { - opts := &metav1.DeleteOptions{PropagationPolicy: &policy} - _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts) - return err + return retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + opts := &metav1.DeleteOptions{PropagationPolicy: &policy} + _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts) + return err + }) } -func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) { +func createPatch(target *resource.Info, current runtime.Object, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) { oldData, err := json.Marshal(current) if err != nil { return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing current configuration") @@ -637,7 +665,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P // Unstructured objects, such as CRDs, may not have a not registered error // returned from ConvertToVersion. Anything that's unstructured should - // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported + // use generic JSON merge patch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) @@ -645,6 +673,19 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) if isUnstructured || isCRD { + if threeWayMergeForUnstructured { + // from https://github.com/kubernetes/kubectl/blob/b83b2ec7d15f286720bccf7872b5c72372cb8e80/pkg/cmd/apply/patcher.go#L129 + preconditions := []mergepatch.PreconditionFunc{ + mergepatch.RequireKeyUnchanged("apiVersion"), + mergepatch.RequireKeyUnchanged("kind"), + mergepatch.RequireMetadataKeyUnchanged("name"), + } + patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(oldData, newData, currentData, preconditions...) + if err != nil && mergepatch.IsPreconditionFailed(err) { + err = fmt.Errorf("%w: at least one field was changed: apiVersion, kind or name", err) + } + return patch, types.MergePatchType, err + } // fall back to generic JSON merge patch patch, err := jsonpatch.CreateMergePatch(oldData, newData) return patch, types.MergePatchType, err @@ -659,7 +700,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P return patch, types.StrategicMergePatchType, err } -func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { +func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force, threeWayMergeForUnstructured bool) error { var ( obj runtime.Object helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) @@ -675,7 +716,7 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, } c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind) } else { - patch, patchType, err := createPatch(target, currentObj) + patch, patchType, err := createPatch(target, currentObj, threeWayMergeForUnstructured) if err != nil { return errors.Wrap(err, "failed to create patch") } @@ -804,6 +845,48 @@ func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error return false, nil } +// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions +func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { + podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) + if err != nil { + return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err) + } + return podList, nil +} + +// OutputContainerLogsForPodList is a helper that outputs logs for a list of pods +func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error { + for _, pod := range podList.Items { + for _, container := range pod.Spec.Containers { + options := &v1.PodLogOptions{ + Container: container.Name, + } + request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options) + err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name)) + if err2 != nil { + return err2 + } + } + } + return nil +} + +func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error { + readCloser, err := request.Stream(context.Background()) + if err != nil { + return errors.Errorf("Failed to stream pod logs for pod: %s, container: %s", podName, containerName) + } + defer readCloser.Close() + _, err = io.Copy(writer, readCloser) + if err != nil { + return errors.Errorf("Failed to copy IO from logs for pod: %s, container: %s", podName, containerName) + } + if err != nil { + return errors.Errorf("Failed to close reader for pod: %s, container: %s", podName, containerName) + } + return nil +} + // scrubValidationError removes kubectl info from the message. func scrubValidationError(err error) error { if err == nil { diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go index 267020d57..852a3e015 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go @@ -44,6 +44,7 @@ type FailingKubeClient struct { BuildError error BuildTableError error BuildDummy bool + DummyResources kube.ResourceList BuildUnstructuredError error WaitAndGetCompletedPodPhaseError error WaitDuration time.Duration @@ -114,11 +115,22 @@ func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) return f.PrintingKubeClient.Update(r, modified, ignoreMe) } +// Update returns the configured error if set or prints +func (f *FailingKubeClient) UpdateThreeWayMerge(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { + if f.UpdateError != nil { + return &kube.Result{}, f.UpdateError + } + return f.PrintingKubeClient.Update(r, modified, ignoreMe) +} + // Build returns the configured error if set or prints func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) { if f.BuildError != nil { return []*resource.Info{}, f.BuildError } + if f.DummyResources != nil { + return f.DummyResources, nil + } if f.BuildDummy { return createDummyResourceList(), nil } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go index cc2c84b40..95c89e0fd 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go @@ -17,6 +17,7 @@ limitations under the License. package fake import ( + "fmt" "io" "strings" "time" @@ -32,7 +33,8 @@ import ( // PrintingKubeClient implements KubeClient, but simply prints the reader to // the given output. type PrintingKubeClient struct { - Out io.Writer + Out io.Writer + LogOutput io.Writer } // IsReachable checks if the cluster is reachable @@ -116,6 +118,17 @@ func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Durati return v1.PodSucceeded, nil } +// GetPodList implements KubeClient GetPodList. +func (p *PrintingKubeClient) GetPodList(_ string, _ metav1.ListOptions) (*v1.PodList, error) { + return &v1.PodList{}, nil +} + +// OutputContainerLogsForPodList implements KubeClient OutputContainerLogsForPodList. +func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNamespace string, _ func(namespace, pod, container string) io.Writer) error { + _, err := io.Copy(p.LogOutput, strings.NewReader(fmt.Sprintf("attempted to output logs for namespace: %s", someNamespace))) + return err +} + // DeleteWithPropagationPolicy implements KubeClient delete. // // It only prints out the content to be deleted. diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/interface.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/interface.go index ce42ed950..db7591f65 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/interface.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/interface.go @@ -72,7 +72,7 @@ type Interface interface { IsReachable() error } -// InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. +// InterfaceExt was introduced to avoid breaking backwards compatibility for Interface implementers. // // TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. type InterfaceExt interface { @@ -80,11 +80,29 @@ type InterfaceExt interface { WaitForDelete(resources ResourceList, timeout time.Duration) error } +// InterfaceThreeWayMerge was introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceThreeWayMerge and integrate its method(s) into the Interface. +type InterfaceThreeWayMerge interface { + UpdateThreeWayMerge(original, target ResourceList, force bool) (*Result, error) +} + +// InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceLogs and integrate its method(s) into the Interface. +type InterfaceLogs interface { + // GetPodList list all pods that match the specified listOptions + GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) + + // OutputContainerLogsForPodList output the logs for a pod list + OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error +} + // InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers. // // TODO Helm 4: Remove InterfaceDeletionPropagation and integrate its method(s) into the Interface. type InterfaceDeletionPropagation interface { - // Delete destroys one or more resources. The deletion propagation is handled as per the given deletion propagation value. + // DeleteWithPropagationPolicy destroys one or more resources. The deletion propagation is handled as per the given deletion propagation value. DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) } @@ -112,5 +130,7 @@ type InterfaceResources interface { var _ Interface = (*Client)(nil) var _ InterfaceExt = (*Client)(nil) +var _ InterfaceThreeWayMerge = (*Client)(nil) +var _ InterfaceLogs = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceResources = (*Client)(nil) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go index d441db8a7..db8e9178e 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/resource.go @@ -81,5 +81,5 @@ func (r ResourceList) Intersect(rs ResourceList) ResourceList { // isMatchingInfo returns true if infos match on Name and GroupVersionKind. func isMatchingInfo(a, b *resource.Info) bool { - return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind + return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind && a.Mapping.GroupVersionKind.Group == b.Mapping.GroupVersionKind.Group } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go similarity index 87% rename from tools/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go rename to tools/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go index 9cd4eacba..fdb103529 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/roundtripper.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cli +package kube import ( "bytes" @@ -24,19 +24,19 @@ import ( "strings" ) -type retryingRoundTripper struct { - wrapped http.RoundTripper +type RetryingRoundTripper struct { + Wrapped http.RoundTripper } -func (rt *retryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { +func (rt *RetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return rt.roundTrip(req, 1, nil) } -func (rt *retryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { +func (rt *RetryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { if retry < 0 { return prevResp, nil } - resp, rtErr := rt.wrapped.RoundTrip(req) + resp, rtErr := rt.Wrapped.RoundTrip(req) if rtErr != nil { return resp, rtErr } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/kube/wait.go b/tools/vendor/helm.sh/helm/v3/pkg/kube/wait.go index 36110d0de..c602004ad 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/kube/wait.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/kube/wait.go @@ -49,6 +49,7 @@ type waiter struct { func (w *waiter) waitForResources(created ResourceList) error { w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) + startTime := time.Now() ctx, cancel := context.WithTimeout(context.Background(), w.timeout) defer cancel() @@ -57,7 +58,7 @@ func (w *waiter) waitForResources(created ResourceList) error { numberOfErrors[i] = 0 } - return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { waitRetries := 30 for i, v := range created { ready, err := w.c.IsReady(ctx, v) @@ -78,6 +79,15 @@ func (w *waiter) waitForResources(created ResourceList) error { } return true, nil }) + + elapsed := time.Since(startTime).Round(time.Second) + if err != nil { + w.log("wait for resources failed after %v: %v", elapsed, err) + } else { + w.log("wait for resources succeeded within %v", elapsed) + } + + return err } func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { @@ -153,7 +163,7 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er case *batchv1.Job: selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) case *corev1.Service: - if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 { + if len(t.Spec.Selector) == 0 { return nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name) } selector = labels.SelectorFromSet(t.Spec.Selector) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go index 910602b7d..555ec71ba 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go @@ -151,6 +151,9 @@ func validateChartVersion(cf *chart.Metadata) error { func validateChartMaintainer(cf *chart.Metadata) error { for _, maintainer := range cf.Maintainers { + if maintainer == nil { + return errors.New("a maintainer entry is empty") + } if maintainer.Name == "" { return errors.New("each maintainer requires a name") } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { diff --git a/tools/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go b/tools/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go index e3481515f..34d3163a4 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go @@ -25,5 +25,8 @@ const ( Update = "update" ) +// PlatformHooks is a map of events to a command for a particular operating system and architecture. +type PlatformHooks map[string][]PlatformCommand + // Hooks is a map of events to commands. type Hooks map[string]string diff --git a/tools/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/tools/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go index 5bb743481..e2fb78672 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go @@ -44,9 +44,10 @@ type Downloaders struct { // PlatformCommand represents a command for a particular operating system and architecture type PlatformCommand struct { - OperatingSystem string `json:"os"` - Architecture string `json:"arch"` - Command string `json:"command"` + OperatingSystem string `json:"os"` + Architecture string `json:"arch"` + Command string `json:"command"` + Args []string `json:"args"` } // Metadata describes a plugin. @@ -65,23 +66,35 @@ type Metadata struct { // Description is a long description shown in places like `helm help` Description string `json:"description"` - // Command is the command, as a single string. + // PlatformCommand is the plugin command, with a platform selector and support for args. // - // The command will be passed through environment expansion, so env vars can + // The command and args will be passed through environment expansion, so env vars can // be present in this command. Unless IgnoreFlags is set, this will // also merge the flags passed from Helm. // - // Note that command is not executed in a shell. To do so, we suggest + // Note that the command is not executed in a shell. To do so, we suggest // pointing the command to a shell script. // - // The following rules will apply to processing commands: - // - If platformCommand is present, it will be searched first + // The following rules will apply to processing platform commands: + // - If PlatformCommand is present, it will be used // - If both OS and Arch match the current platform, search will stop and the command will be executed - // - If OS matches and there is no more specific match, the command will be executed + // - If OS matches and Arch is empty, the command will be executed // - If no OS/Arch match is found, the default command will be executed - // - If no command is present and no matches are found in platformCommand, Helm will exit with an error + // - If no matches are found in platformCommand, Helm will exit with an error PlatformCommand []PlatformCommand `json:"platformCommand"` - Command string `json:"command"` + + // Command is the plugin command, as a single string. + // Providing a command will result in an deprecation warning if PlatformCommand is also set. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. Unless IgnoreFlags is set, this will + // also merge the flags passed from Helm. + // + // Note that command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // DEPRECATED: Use PlatformCommand instead. Remove in Helm 4. + Command string `json:"command"` // IgnoreFlags ignores any flags passed in from Helm // @@ -90,7 +103,31 @@ type Metadata struct { // the `--debug` flag will be discarded. IgnoreFlags bool `json:"ignoreFlags"` - // Hooks are commands that will run on events. + // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args. + // + // The command and args will be passed through environment expansion, so env vars can + // be present in the command. + // + // Note that the command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // The following rules will apply to processing platform hooks: + // - If PlatformHooks is present, it will be used + // - If both OS and Arch match the current platform, search will stop and the command will be executed + // - If OS matches and Arch is empty, the command will be executed + // - If no OS/Arch match is found, the default command will be executed + // - If no matches are found in platformHooks, Helm will skip the event + PlatformHooks PlatformHooks `json:"platformHooks"` + + // Hooks are commands that will run on plugin events, as a single string. + // Providing a hooks will result in an error if PlatformHooks is also set. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. + // + // Note that the command is executed in the sh shell. + // + // DEPRECATED: Use PlatformHooks instead. Remove in Helm 4. Hooks Hooks // Downloaders field is used if the plugin supply downloader mechanism @@ -112,62 +149,106 @@ type Plugin struct { Dir string } -// The following rules will apply to processing the Plugin.PlatformCommand.Command: -// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution -// - If OS matches and there is no more specific match, the command will be prepared for execution -// - If no OS/Arch match is found, return nil -func getPlatformCommand(cmds []PlatformCommand) []string { - var command []string +// Returns command and args strings based on the following rules in priority order: +// - From the PlatformCommand where OS and Arch match the current platform +// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified +// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform +// - From the PlatformCommand where OS and Arch are both empty/unspecified +// - Return nil, nil +func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) { + var command, args []string + found := false + foundOs := false + eq := strings.EqualFold for _, c := range cmds { - if eq(c.OperatingSystem, runtime.GOOS) { - command = strings.Split(c.Command, " ") - } if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { - return strings.Split(c.Command, " ") + // Return early for an exact match + return strings.Split(c.Command, " "), c.Args + } + + if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 { + // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match + continue + } + + if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) { + // First OS match with empty arch, can only be overridden by a direct match + command = strings.Split(c.Command, " ") + args = c.Args + found = true + foundOs = true + } else if !found { + // First empty match, can be overridden by a direct match or an OS match + command = strings.Split(c.Command, " ") + args = c.Args + found = true } } - return command + + return command, args } -// PrepareCommand takes a Plugin.PlatformCommand.Command, a Plugin.Command and will applying the following processing: -// - If platformCommand is present, it will be searched first -// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution -// - If OS matches and there is no more specific match, the command will be prepared for execution -// - If no OS/Arch match is found, the default command will be prepared for execution -// - If no command is present and no matches are found in platformCommand, will exit with an error +// PrepareCommands takes a []Plugin.PlatformCommand +// and prepares the command and arguments for execution. // // It merges extraArgs into any arguments supplied in the plugin. It -// returns the name of the command and an args array. +// returns the main command and an args array. // // The result is suitable to pass to exec.Command. -func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { - var parts []string - platCmdLen := len(p.Metadata.PlatformCommand) - if platCmdLen > 0 { - parts = getPlatformCommand(p.Metadata.PlatformCommand) - } - if platCmdLen == 0 || parts == nil { - parts = strings.Split(p.Metadata.Command, " ") - } - if len(parts) == 0 || parts[0] == "" { +func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string) (string, []string, error) { + cmdParts, args := getPlatformCommand(cmds) + if len(cmdParts) == 0 || cmdParts[0] == "" { return "", nil, fmt.Errorf("no plugin command is applicable") } - main := os.ExpandEnv(parts[0]) + main := os.ExpandEnv(cmdParts[0]) baseArgs := []string{} - if len(parts) > 1 { - for _, cmdpart := range parts[1:] { - cmdexp := os.ExpandEnv(cmdpart) - baseArgs = append(baseArgs, cmdexp) + if len(cmdParts) > 1 { + for _, cmdPart := range cmdParts[1:] { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(cmdPart)) + } else { + baseArgs = append(baseArgs, cmdPart) + } } } - if !p.Metadata.IgnoreFlags { + + for _, arg := range args { + if expandArgs { + baseArgs = append(baseArgs, os.ExpandEnv(arg)) + } else { + baseArgs = append(baseArgs, arg) + } + } + + if len(extraArgs) > 0 { baseArgs = append(baseArgs, extraArgs...) } + return main, baseArgs, nil } +// PrepareCommand gets the correct command and arguments for a plugin. +// +// It merges extraArgs into any arguments supplied in the plugin. It returns the name of the command and an args array. +// +// The result is suitable to pass to exec.Command. +func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { + var extraArgsIn []string + + if !p.Metadata.IgnoreFlags { + extraArgsIn = extraArgs + } + + cmds := p.Metadata.PlatformCommand + if len(cmds) == 0 && len(p.Metadata.Command) > 0 { + cmds = []PlatformCommand{{Command: p.Metadata.Command}} + } + + return PrepareCommands(cmds, true, extraArgsIn) +} + // validPluginName is a regular expression that validates plugin names. // // Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-. @@ -184,6 +265,14 @@ func validatePluginData(plug *Plugin, filepath string) error { } plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) + if len(plug.Metadata.PlatformCommand) > 0 && len(plug.Metadata.Command) > 0 { + fmt.Printf("WARNING: both 'platformCommand' and 'command' are set in %q (this will become an error in a future Helm version)\n", filepath) + } + + if len(plug.Metadata.PlatformHooks) > 0 && len(plug.Metadata.Hooks) > 0 { + fmt.Printf("WARNING: both 'platformHooks' and 'hooks' are set in %q (this will become an error in a future Helm version)\n", filepath) + } + // We could also validate SemVer, executable, and other fields should we so choose. return nil } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go b/tools/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go index b37a0c605..33296aadd 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go @@ -90,8 +90,9 @@ func (pusher *OCIPusher) push(chartRef, href string) error { path.Join(strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)), meta.Metadata.Name), meta.Metadata.Version) - chartCreationTime := ctime.Created(stat) - pushOpts = append(pushOpts, registry.PushOptCreationTime(chartCreationTime.Format(time.RFC3339))) + // The time the chart was "created" is semantically the time the chart archive file was last written(modified) + chartArchiveFileCreatedTime := ctime.Modified(stat) + pushOpts = append(pushOpts, registry.PushOptCreationTime(chartArchiveFileCreatedTime.Format(time.RFC3339))) _, err = client.Push(chartBytes, ref, pushOpts...) return err diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/client.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/client.go index 42f736816..8818f5763 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/registry/client.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/client.go @@ -18,24 +18,31 @@ package registry // import "helm.sh/helm/v3/pkg/registry" import ( "context" + "crypto/tls" + "crypto/x509" "encoding/json" "fmt" "io" "net/http" + "net/url" + "os" "sort" "strings" + "sync" "github.com/Masterminds/semver/v3" "github.com/containerd/containerd/remotes" + "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "oras.land/oras-go/pkg/auth" - dockerauth "oras.land/oras-go/pkg/auth/docker" - "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/oras" - "oras.land/oras-go/pkg/registry" - registryremote "oras.land/oras-go/pkg/registry/remote" - registryauth "oras.land/oras-go/pkg/registry/remote/auth" + "oras.land/oras-go/v2" + "oras.land/oras-go/v2/content" + "oras.land/oras-go/v2/content/memory" + "oras.land/oras-go/v2/registry" + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials" + "oras.land/oras-go/v2/registry/remote/retry" "helm.sh/helm/v3/internal/version" "helm.sh/helm/v3/pkg/chart" @@ -49,19 +56,34 @@ storing semantic versions, Helm adopts the convention of changing plus (+) to an underscore (_) in chart version tags when pushing to a registry and back to a plus (+) when pulling from a registry.` +var errDeprecatedRemote = errors.New("providing github.com/containerd/containerd/remotes.Resolver via ClientOptResolver is no longer suported") + type ( + // RemoteClient shadows the ORAS remote.Client interface + // (hiding the ORAS type from Helm client visibility) + // https://pkg.go.dev/oras.land/oras-go/pkg/registry/remote#Client + RemoteClient interface { + Do(req *http.Request) (*http.Response, error) + } + // Client works with OCI-compliant registries Client struct { debug bool enableCache bool // path to repository config file e.g. ~/.docker/config.json credentialsFile string + username string + password string out io.Writer - authorizer auth.Client - registryAuthorizer *registryauth.Client - resolver func(ref registry.Reference) (remotes.Resolver, error) + authorizer *auth.Client + registryAuthorizer RemoteClient + credentialsStore credentials.Store httpClient *http.Client plainHTTP bool + err error // pass any errors from the ClientOption functions + + // credentialsFileTemp captures if the empty file / EOF work around is being used. + credentialsFileTemp bool } // ClientOption allows specifying various settings configurable by the user for overriding the defaults @@ -76,81 +98,67 @@ func NewClient(options ...ClientOption) (*Client, error) { } for _, option := range options { option(client) + if client.err != nil { + return nil, client.err + } } if client.credentialsFile == "" { client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename) } - if client.authorizer == nil { - authClient, err := dockerauth.NewClientWithDockerFallback(client.credentialsFile) - if err != nil { - return nil, err + if client.httpClient == nil { + transport := newTransport(client.debug) + client.httpClient = &http.Client{ + Transport: transport, } - client.authorizer = authClient } - resolverFn := client.resolver // copy for avoiding recursive call - client.resolver = func(ref registry.Reference) (remotes.Resolver, error) { - if resolverFn != nil { - // validate if the resolverFn returns a valid resolver - if resolver, err := resolverFn(ref); resolver != nil && err == nil { - return resolver, nil + storeOptions := credentials.StoreOptions{ + AllowPlaintextPut: true, + DetectDefaultNativeStore: true, + } + store, err := credentials.NewStore(client.credentialsFile, storeOptions) + if err != nil { + // If the file exists and is empty there will be an EOF error. This error is not wrapped so + // a check with errors.Is will not work. The only way to capture it is an EOF error is + // with string parsing. + // This handling passes no file location which will cause NewStore to invoke its + // fault tolerance for a file not existing. A bool records this bypass so that if the + // credential store needs to be written to it this work around can be handled. See the + // Login method for more details. + if strings.Contains(err.Error(), "invalid config format: EOF") { + var err2 error + store, err2 = credentials.NewStore("", storeOptions) + if err2 != nil { + return nil, err } - } - headers := http.Header{} - headers.Set("User-Agent", version.GetUserAgent()) - opts := []auth.ResolverOption{auth.WithResolverHeaders(headers)} - if client.httpClient != nil { - opts = append(opts, auth.WithResolverClient(client.httpClient)) - } - if client.plainHTTP { - opts = append(opts, auth.WithResolverPlainHTTP()) - } - resolver, err := client.authorizer.ResolverWithOpts(opts...) - if err != nil { + client.credentialsFileTemp = true + } else { return nil, err } - return resolver, nil } - - // allocate a cache if option is set - var cache registryauth.Cache - if client.enableCache { - cache = registryauth.DefaultCache + dockerStore, err := credentials.NewStoreFromDocker(storeOptions) + if err != nil { + client.credentialsStore = store + } else { + // use Helm credentials with fallback to Docker + client.credentialsStore = credentials.NewStoreWithFallbacks(store, dockerStore) } - if client.registryAuthorizer == nil { - client.registryAuthorizer = ®istryauth.Client{ - Client: client.httpClient, - Header: http.Header{ - "User-Agent": {version.GetUserAgent()}, - }, - Cache: cache, - Credential: func(_ context.Context, reg string) (registryauth.Credential, error) { - dockerClient, ok := client.authorizer.(*dockerauth.Client) - if !ok { - return registryauth.EmptyCredential, errors.New("unable to obtain docker client") - } - username, password, err := dockerClient.Credential(reg) - if err != nil { - return registryauth.EmptyCredential, errors.New("unable to retrieve credentials") - } - - // A blank returned username and password value is a bearer token - if username == "" && password != "" { - return registryauth.Credential{ - RefreshToken: password, - }, nil - } + if client.authorizer == nil { + authorizer := auth.Client{ + Client: client.httpClient, + } + authorizer.SetUserAgent(version.GetUserAgent()) - return registryauth.Credential{ - Username: username, - Password: password, - }, nil + authorizer.Credential = credentials.Credential(client.credentialsStore) - }, + if client.enableCache { + authorizer.Cache = auth.NewCache() } + client.authorizer = &authorizer } + return client, nil } @@ -168,6 +176,14 @@ func ClientOptEnableCache(enableCache bool) ClientOption { } } +// ClientOptBasicAuth returns a function that sets the username and password setting on client options set +func ClientOptBasicAuth(username, password string) ClientOption { + return func(client *Client) { + client.username = username + client.password = password + } +} + // ClientOptWriter returns a function that sets the writer setting on client options set func ClientOptWriter(out io.Writer) ClientOption { return func(client *Client) { @@ -175,6 +191,26 @@ func ClientOptWriter(out io.Writer) ClientOption { } } +// ClientOptAuthorizer returns a function that sets the authorizer setting on a client options set. This +// can be used to override the default authorization mechanism. +// +// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer. +func ClientOptAuthorizer(authorizer auth.Client) ClientOption { + return func(client *Client) { + client.authorizer = &authorizer + } +} + +// ClientOptRegistryAuthorizer returns a function that sets the registry authorizer setting on a client options set. This +// can be used to override the default authorization mechanism. +// +// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer. +func ClientOptRegistryAuthorizer(registryAuthorizer RemoteClient) ClientOption { + return func(client *Client) { + client.registryAuthorizer = registryAuthorizer + } +} + // ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set func ClientOptCredentialsFile(credentialsFile string) ClientOption { return func(client *Client) { @@ -195,12 +231,9 @@ func ClientOptPlainHTTP() ClientOption { } } -// ClientOptResolver returns a function that sets the resolver setting on a client options set -func ClientOptResolver(resolver remotes.Resolver) ClientOption { - return func(client *Client) { - client.resolver = func(_ registry.Reference) (remotes.Resolver, error) { - return resolver, nil - } +func ClientOptResolver(_ remotes.Resolver) ClientOption { + return func(c *Client) { + c.err = errDeprecatedRemote } } @@ -209,60 +242,194 @@ type ( LoginOption func(*loginOperation) loginOperation struct { - username string - password string - insecure bool - certFile string - keyFile string - caFile string + host string + client *Client } ) +// Deprecated: will be removed in Helm 4 +// Added for backwards compatibility for Helm < 3.18.0 after moving to ORAS v2 +// ref: https://github.com/helm/helm/issues/30873 +// TODO: document that Helm 4 `registry login` does accept full URLs +func (c *Client) stripURL(host string) string { + // strip scheme from host in URL + for _, s := range []string{"oci://", "http://", "https://"} { + if strings.HasPrefix(host, s) { + plain := strings.TrimPrefix(host, s) + if c.debug { + fmt.Fprintf(c.out, "[WARNING] Invalid registry passed: registries should NOT be prefixed with a URL scheme. Use %q instead\n", plain) + } + host = plain + break + } + } + // strip repo from registry in URL + if idx := strings.Index(host, "/"); idx != -1 { + host = host[:idx] + if c.debug { + fmt.Fprintf(c.out, "[WARNING] Invalid registry passed: registries should NOT include a repository. Use %q instead\n", host) + } + return host + } + + return host +} + // Login logs into a registry func (c *Client) Login(host string, options ...LoginOption) error { - operation := &loginOperation{} + // This is the lowest available point to strip incorrect URL parts + host = c.stripURL(host) + for _, option := range options { - option(operation) + option(&loginOperation{host, c}) } - authorizerLoginOpts := []auth.LoginOption{ - auth.WithLoginContext(ctx(c.out, c.debug)), - auth.WithLoginHostname(host), - auth.WithLoginUsername(operation.username), - auth.WithLoginSecret(operation.password), - auth.WithLoginUserAgent(version.GetUserAgent()), - auth.WithLoginTLS(operation.certFile, operation.keyFile, operation.caFile), + + reg, err := remote.NewRegistry(host) + if err != nil { + return err } - if operation.insecure { - authorizerLoginOpts = append(authorizerLoginOpts, auth.WithLoginInsecure()) + reg.PlainHTTP = c.plainHTTP + cred := auth.Credential{Username: c.username, Password: c.password} + c.authorizer.ForceAttemptOAuth2 = true + reg.Client = c.authorizer + + ctx := context.Background() + if err := reg.Ping(ctx); err != nil { + c.authorizer.ForceAttemptOAuth2 = false + if err := reg.Ping(ctx); err != nil { + return fmt.Errorf("authenticating to %q: %w", host, err) + } } - if err := c.authorizer.LoginWithOpts(authorizerLoginOpts...); err != nil { + + // The credentialsStore loader does not handle empty files. So, there is a workaround. + // This can be removed when the credentials loader can handle empty files. + // When Helm catches an empty file error it causes the loader to trigger its fault + // tolerance for a file not existing and records it with a bool. If that bool is set and the + // file needs to be written, the file needs to be put into a usable state and loaded + // properly. + // See the NewClient function for the bypass setup. + if c.credentialsFileTemp { + err = os.WriteFile(c.credentialsFile, []byte("{}"), 0600) + if err != nil { + return err + } + storeOptions := credentials.StoreOptions{ + AllowPlaintextPut: true, + DetectDefaultNativeStore: true, + } + store, err := credentials.NewStore(c.credentialsFile, storeOptions) + if err != nil { + return err + } + c.credentialsStore = store + c.credentialsFileTemp = false + } + + key := credentials.ServerAddressFromRegistry(host) + key = credentials.ServerAddressFromHostname(key) + if err := c.credentialsStore.Put(ctx, key, cred); err != nil { return err } + fmt.Fprintln(c.out, "Login Succeeded") return nil } // LoginOptBasicAuth returns a function that sets the username/password settings on login func LoginOptBasicAuth(username string, password string) LoginOption { - return func(operation *loginOperation) { - operation.username = username - operation.password = password + return func(o *loginOperation) { + o.client.username = username + o.client.password = password + o.client.authorizer.Credential = auth.StaticCredential(o.host, auth.Credential{Username: username, Password: password}) + } +} + +// LoginOptPlainText returns a function that allows plaintext (HTTP) login +func LoginOptPlainText(isPlainText bool) LoginOption { + return func(o *loginOperation) { + o.client.plainHTTP = isPlainText + } +} + +func ensureTLSConfig(client *auth.Client) (*tls.Config, error) { + var transport *http.Transport + + switch t := client.Client.Transport.(type) { + case *http.Transport: + transport = t + case *fallbackTransport: + switch t := t.Base.(type) { + case *http.Transport: + transport = t + case *retry.Transport: + switch t := t.Base.(type) { + case *http.Transport: + transport = t + case *LoggingTransport: + switch t := t.RoundTripper.(type) { + case *http.Transport: + transport = t + } + } + } + } + + if transport == nil { + // we don't know how to access the http.Transport, most likely the + // auth.Client.Client was provided by API user + return nil, fmt.Errorf("unable to access TLS client configuration, the provided HTTP Transport is not supported, given: %T", client.Client.Transport) } + + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + + return transport.TLSClientConfig, nil } // LoginOptInsecure returns a function that sets the insecure setting on login func LoginOptInsecure(insecure bool) LoginOption { - return func(operation *loginOperation) { - operation.insecure = insecure + return func(o *loginOperation) { + tlsConfig, err := ensureTLSConfig(o.client.authorizer) + + if err != nil { + panic(err) + } + + tlsConfig.InsecureSkipVerify = insecure } } // LoginOptTLSClientConfig returns a function that sets the TLS settings on login. func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption { - return func(operation *loginOperation) { - operation.certFile = certFile - operation.keyFile = keyFile - operation.caFile = caFile + return func(o *loginOperation) { + if (certFile == "" || keyFile == "") && caFile == "" { + return + } + tlsConfig, err := ensureTLSConfig(o.client.authorizer) + if err != nil { + panic(err) + } + + if certFile != "" && keyFile != "" { + authCert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + panic(err) + } + tlsConfig.Certificates = []tls.Certificate{authCert} + } + + if caFile != "" { + certPool := x509.NewCertPool() + ca, err := os.ReadFile(caFile) + if err != nil { + panic(err) + } + if !certPool.AppendCertsFromPEM(ca) { + panic(fmt.Errorf("unable to parse CA file: %q", caFile)) + } + tlsConfig.RootCAs = certPool + } } } @@ -279,7 +446,8 @@ func (c *Client) Logout(host string, opts ...LogoutOption) error { for _, opt := range opts { opt(operation) } - if err := c.authorizer.Logout(ctx(c.out, c.debug), host); err != nil { + + if err := credentials.Logout(context.Background(), c.credentialsStore, host); err != nil { return err } fmt.Fprintf(c.out, "Removing login credentials for %s\n", host) @@ -319,7 +487,7 @@ type ( // Pull downloads a chart from a registry func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { - parsedRef, err := parseReference(ref) + parsedRef, err := newReference(ref) if err != nil { return nil, err } @@ -334,8 +502,9 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { return nil, errors.New( "must specify at least one layer to pull (chart/prov)") } - memoryStore := content.NewMemory() + memoryStore := memory.New() allowedMediaTypes := []string{ + ocispec.MediaTypeImageManifest, ConfigMediaType, } minNumDescriptors := 1 // 1 for the config @@ -351,23 +520,38 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { } var descriptors, layers []ocispec.Descriptor - remotesResolver, err := c.resolver(parsedRef) + + repository, err := remote.NewRepository(parsedRef.String()) if err != nil { return nil, err } - registryStore := content.Registry{Resolver: remotesResolver} + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + ctx := context.Background() - manifest, err := oras.Copy(ctx(c.out, c.debug), registryStore, parsedRef.String(), memoryStore, "", - oras.WithPullEmptyNameAllowed(), - oras.WithAllowedMediaTypes(allowedMediaTypes), - oras.WithLayerDescriptors(func(l []ocispec.Descriptor) { - layers = l - })) + sort.Strings(allowedMediaTypes) + + var mu sync.Mutex + manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{ + CopyGraphOptions: oras.CopyGraphOptions{ + PreCopy: func(_ context.Context, desc ocispec.Descriptor) error { + mediaType := desc.MediaType + if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType { + return oras.SkipNode + } + + mu.Lock() + layers = append(layers, desc) + mu.Unlock() + return nil + }, + }, + }) if err != nil { return nil, err } - descriptors = append(descriptors, manifest) descriptors = append(descriptors, layers...) numDescriptors := len(descriptors) @@ -421,54 +605,37 @@ func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { Prov: &DescriptorPullSummary{}, Ref: parsedRef.String(), } - var getManifestErr error - if _, manifestData, ok := memoryStore.Get(manifest); !ok { - getManifestErr = errors.Errorf("Unable to retrieve blob with digest %s", manifest.Digest) - } else { - result.Manifest.Data = manifestData - } - if getManifestErr != nil { - return nil, getManifestErr + + result.Manifest.Data, err = content.FetchAll(ctx, memoryStore, manifest) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", manifest.Digest, err) } - var getConfigDescriptorErr error - if _, configData, ok := memoryStore.Get(*configDescriptor); !ok { - getConfigDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", configDescriptor.Digest) - } else { - result.Config.Data = configData - var meta *chart.Metadata - if err := json.Unmarshal(configData, &meta); err != nil { - return nil, err - } - result.Chart.Meta = meta + + result.Config.Data, err = content.FetchAll(ctx, memoryStore, *configDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", configDescriptor.Digest, err) } - if getConfigDescriptorErr != nil { - return nil, getConfigDescriptorErr + + if err := json.Unmarshal(result.Config.Data, &result.Chart.Meta); err != nil { + return nil, err } + if operation.withChart { - var getChartDescriptorErr error - if _, chartData, ok := memoryStore.Get(*chartDescriptor); !ok { - getChartDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", chartDescriptor.Digest) - } else { - result.Chart.Data = chartData - result.Chart.Digest = chartDescriptor.Digest.String() - result.Chart.Size = chartDescriptor.Size - } - if getChartDescriptorErr != nil { - return nil, getChartDescriptorErr + result.Chart.Data, err = content.FetchAll(ctx, memoryStore, *chartDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", chartDescriptor.Digest, err) } + result.Chart.Digest = chartDescriptor.Digest.String() + result.Chart.Size = chartDescriptor.Size } + if operation.withProv && !provMissing { - var getProvDescriptorErr error - if _, provData, ok := memoryStore.Get(*provDescriptor); !ok { - getProvDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", provDescriptor.Digest) - } else { - result.Prov.Data = provData - result.Prov.Digest = provDescriptor.Digest.String() - result.Prov.Size = provDescriptor.Size - } - if getProvDescriptorErr != nil { - return nil, getProvDescriptorErr + result.Prov.Data, err = content.FetchAll(ctx, memoryStore, *provDescriptor) + if err != nil { + return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", provDescriptor.Digest, err) } + result.Prov.Digest = provDescriptor.Digest.String() + result.Prov.Size = provDescriptor.Size } fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref) @@ -535,7 +702,7 @@ type ( // Push uploads a chart to a registry. func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) { - parsedRef, err := parseReference(ref) + parsedRef, err := newReference(ref) if err != nil { return nil, err } @@ -556,8 +723,11 @@ func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResu "strict mode enabled, ref basename and tag must match the chart name and version") } } - memoryStore := content.NewMemory() - chartDescriptor, err := memoryStore.Add("", ChartLayerMediaType, data) + + ctx := context.Background() + + memoryStore := memory.New() + chartDescriptor, err := oras.PushBytes(ctx, memoryStore, ChartLayerMediaType, data) if err != nil { return nil, err } @@ -567,43 +737,47 @@ func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResu return nil, err } - configDescriptor, err := memoryStore.Add("", ConfigMediaType, configData) + configDescriptor, err := oras.PushBytes(ctx, memoryStore, ConfigMediaType, configData) if err != nil { return nil, err } - descriptors := []ocispec.Descriptor{chartDescriptor} + layers := []ocispec.Descriptor{chartDescriptor} var provDescriptor ocispec.Descriptor if operation.provData != nil { - provDescriptor, err = memoryStore.Add("", ProvLayerMediaType, operation.provData) + provDescriptor, err = oras.PushBytes(ctx, memoryStore, ProvLayerMediaType, operation.provData) if err != nil { return nil, err } - descriptors = append(descriptors, provDescriptor) + layers = append(layers, provDescriptor) } + // sort layers for determinism, similar to how ORAS v1 does it + sort.Slice(layers, func(i, j int) bool { + return layers[i].Digest < layers[j].Digest + }) + ociAnnotations := generateOCIAnnotations(meta, operation.creationTime) - manifestData, manifest, err := content.GenerateManifest(&configDescriptor, ociAnnotations, descriptors...) + manifestDescriptor, err := c.tagManifest(ctx, memoryStore, configDescriptor, + layers, ociAnnotations, parsedRef) if err != nil { return nil, err } - if err := memoryStore.StoreManifest(parsedRef.String(), manifest, manifestData); err != nil { - return nil, err - } - - remotesResolver, err := c.resolver(parsedRef) + repository, err := remote.NewRepository(parsedRef.String()) if err != nil { return nil, err } - registryStore := content.Registry{Resolver: remotesResolver} - _, err = oras.Copy(ctx(c.out, c.debug), memoryStore, parsedRef.String(), registryStore, "", - oras.WithNameValidation(nil)) + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer + + manifestDescriptor, err = oras.ExtendedCopy(ctx, memoryStore, parsedRef.String(), repository, parsedRef.String(), oras.DefaultExtendedCopyOptions) if err != nil { return nil, err } + chartSummary := &descriptorPushSummaryWithMeta{ Meta: meta, } @@ -611,8 +785,8 @@ func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResu chartSummary.Size = chartDescriptor.Size result := &PushResult{ Manifest: &descriptorPushSummary{ - Digest: manifest.Digest.String(), - Size: manifest.Size, + Digest: manifestDescriptor.Digest.String(), + Size: manifestDescriptor.Size, }, Config: &descriptorPushSummary{ Digest: configDescriptor.Digest.String(), @@ -630,7 +804,7 @@ func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResu } fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref) fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) - if strings.Contains(parsedRef.Reference, "_") { + if strings.Contains(parsedRef.orasReference.Reference, "_") { fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) fmt.Fprint(c.out, registryUnderscoreMessage+"\n") } @@ -666,27 +840,29 @@ func (c *Client) Tags(ref string) ([]string, error) { return nil, err } - repository := registryremote.Repository{ - Reference: parsedReference, - Client: c.registryAuthorizer, - PlainHTTP: c.plainHTTP, - } - - var registryTags []string - - registryTags, err = registry.Tags(ctx(c.out, c.debug), &repository) + ctx := context.Background() + repository, err := remote.NewRepository(parsedReference.String()) if err != nil { return nil, err } + repository.PlainHTTP = c.plainHTTP + repository.Client = c.authorizer var tagVersions []*semver.Version - for _, tag := range registryTags { - // Change underscore (_) back to plus (+) for Helm - // See https://github.com/helm/helm/issues/10166 - tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+")) - if err == nil { - tagVersions = append(tagVersions, tagVersion) + err = repository.Tags(ctx, "", func(tags []string) error { + for _, tag := range tags { + // Change underscore (_) back to plus (+) for Helm + // See https://github.com/helm/helm/issues/10166 + tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+")) + if err == nil { + tagVersions = append(tagVersions, tagVersion) + } } + + return nil + }) + if err != nil { + return nil, err } // Sort the collection @@ -701,3 +877,109 @@ func (c *Client) Tags(ref string) ([]string, error) { return tags, nil } + +// Resolve a reference to a descriptor. +func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) { + remoteRepository, err := remote.NewRepository(ref) + if err != nil { + return desc, err + } + remoteRepository.PlainHTTP = c.plainHTTP + + parsedReference, err := newReference(ref) + if err != nil { + return desc, err + } + + ctx := context.Background() + parsedString := parsedReference.String() + return remoteRepository.Resolve(ctx, parsedString) +} + +// ValidateReference for path and version +func (c *Client) ValidateReference(ref, version string, u *url.URL) (*url.URL, error) { + var tag string + + registryReference, err := newReference(u.Host + u.Path) + if err != nil { + return nil, err + } + + if version == "" { + // Use OCI URI tag as default + version = registryReference.Tag + } else { + if registryReference.Tag != "" && registryReference.Tag != version { + return nil, errors.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag) + } + } + + if registryReference.Digest != "" { + if version == "" { + // Install by digest only + return u, nil + } + u.Path = fmt.Sprintf("%s@%s", registryReference.Repository, registryReference.Digest) + + // Validate the tag if it was specified + path := registryReference.Registry + "/" + registryReference.Repository + ":" + version + desc, err := c.Resolve(path) + if err != nil { + // The resource does not have to be tagged when digest is specified + return u, nil + } + if desc.Digest.String() != registryReference.Digest { + return nil, errors.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest) + } + return u, nil + } + + // Evaluate whether an explicit version has been provided. Otherwise, determine version to use + _, errSemVer := semver.NewVersion(version) + if errSemVer == nil { + tag = version + } else { + // Retrieve list of repository tags + tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme))) + if err != nil { + return nil, err + } + if len(tags) == 0 { + return nil, errors.Errorf("Unable to locate any tags in provided repository: %s", ref) + } + + // Determine if version provided + // If empty, try to get the highest available tag + // If exact version, try to find it + // If semver constraint string, try to find a match + tag, err = GetTagMatchingVersionOrConstraint(tags, version) + if err != nil { + return nil, err + } + } + + u.Path = fmt.Sprintf("%s:%s", registryReference.Repository, tag) + + return u, err +} + +// tagManifest prepares and tags a manifest in memory storage +func (c *Client) tagManifest(ctx context.Context, memoryStore *memory.Store, + configDescriptor ocispec.Descriptor, layers []ocispec.Descriptor, + ociAnnotations map[string]string, parsedRef reference) (ocispec.Descriptor, error) { + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: configDescriptor, + Layers: layers, + Annotations: ociAnnotations, + } + + manifestData, err := json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, err + } + + return oras.TagBytes(ctx, memoryStore, ocispec.MediaTypeImageManifest, + manifestData, parsedRef.String()) +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/fallback.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/fallback.go new file mode 100644 index 000000000..1db729576 --- /dev/null +++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/fallback.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "crypto/tls" + "net/http" + "sync/atomic" +) + +// NOTE(terryhowe): This fallback feature is only provided in v3 for backward +// compatibility. ORAS v1 had this feature and this code was added when helm +// updated to ORAS v2. This will not be supported in helm v4. + +type fallbackTransport struct { + Base http.RoundTripper + forceHTTP atomic.Bool +} + +func newTransport(debug bool) *fallbackTransport { + baseTransport := NewTransport(debug) + return &fallbackTransport{ + Base: baseTransport, + } +} + +// RoundTrip wraps base round trip with conditional insecure retry. +func (t *fallbackTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if ok := t.forceHTTP.Load(); ok { + req.URL.Scheme = "http" + return t.Base.RoundTrip(req) + } + resp, err := t.Base.RoundTrip(req) + // We are falling back to http here for backward compatibility with Helm v3. + // ORAS v1 provided fallback automatically, but ORAS v2 does not. + if err != nil && req.URL.Scheme == "https" { + if tlsErr, ok := err.(tls.RecordHeaderError); ok { + if string(tlsErr.RecordHeader[:]) == "HTTP/" { + t.forceHTTP.Store(true) + req.URL.Scheme = "http" + return t.Base.RoundTrip(req) + } + } + } + return resp, err +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/reference.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/reference.go new file mode 100644 index 000000000..b5677761d --- /dev/null +++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/reference.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "strings" + + "oras.land/oras-go/v2/registry" +) + +type reference struct { + orasReference registry.Reference + Registry string + Repository string + Tag string + Digest string +} + +// newReference will parse and validate the reference, and clean tags when +// applicable tags are only cleaned when plus (+) signs are present, and are +// converted to underscores (_) before pushing +// See https://github.com/helm/helm/issues/10166 +func newReference(raw string) (result reference, err error) { + // Remove oci:// prefix if it is there + raw = strings.TrimPrefix(raw, OCIScheme+"://") + + // The sole possible reference modification is replacing plus (+) signs + // present in tags with underscores (_). To do this properly, we first + // need to identify a tag, and then pass it on to the reference parser + // NOTE: Passing immediately to the reference parser will fail since (+) + // signs are an invalid tag character, and simply replacing all plus (+) + // occurrences could invalidate other portions of the URI + lastIndex := strings.LastIndex(raw, "@") + if lastIndex >= 0 { + result.Digest = raw[(lastIndex + 1):] + raw = raw[:lastIndex] + } + parts := strings.Split(raw, ":") + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { + tag := parts[len(parts)-1] + + if tag != "" { + // Replace any plus (+) signs with known underscore (_) conversion + newTag := strings.ReplaceAll(tag, "+", "_") + raw = strings.ReplaceAll(raw, tag, newTag) + } + } + + result.orasReference, err = registry.ParseReference(raw) + if err != nil { + return result, err + } + result.Registry = result.orasReference.Registry + result.Repository = result.orasReference.Repository + result.Tag = result.orasReference.Reference + return result, nil +} + +func (r *reference) String() string { + if r.Tag == "" { + return r.orasReference.String() + "@" + r.Digest + } + return r.orasReference.String() +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/transport.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/transport.go new file mode 100644 index 000000000..4d8a59ac6 --- /dev/null +++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/transport.go @@ -0,0 +1,187 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "bytes" + "fmt" + "io" + "log/slog" + "mime" + "net/http" + "os" + "strings" + "sync/atomic" + + "oras.land/oras-go/v2/registry/remote/retry" +) + +var ( + // requestCount records the number of logged request-response pairs and will + // be used as the unique id for the next pair. + requestCount uint64 + + // toScrub is a set of headers that should be scrubbed from the log. + toScrub = []string{ + "Authorization", + "Set-Cookie", + } +) + +// payloadSizeLimit limits the maximum size of the response body to be printed. +const payloadSizeLimit int64 = 16 * 1024 // 16 KiB + +// LoggingTransport is an http.RoundTripper that keeps track of the in-flight +// request and add hooks to report HTTP tracing events. +type LoggingTransport struct { + http.RoundTripper + logger *slog.Logger +} + +// NewTransport creates and returns a new instance of LoggingTransport +func NewTransport(debug bool) *retry.Transport { + type cloner[T any] interface { + Clone() T + } + + // try to copy (clone) the http.DefaultTransport so any mutations we + // perform on it (e.g. TLS config) are not reflected globally + // follow https://github.com/golang/go/issues/39299 for a more elegant + // solution in the future + transport := http.DefaultTransport + if t, ok := transport.(cloner[*http.Transport]); ok { + transport = t.Clone() + } else if t, ok := transport.(cloner[http.RoundTripper]); ok { + // this branch will not be used with go 1.20, it was added + // optimistically to try to clone if the http.DefaultTransport + // implementation changes, still the Clone method in that case + // might not return http.RoundTripper... + transport = t.Clone() + } + if debug { + replace := func(groups []string, a slog.Attr) slog.Attr { + // Remove time. + if a.Key == slog.TimeKey && len(groups) == 0 { + return slog.Attr{} + } + return a + } + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + ReplaceAttr: replace, + Level: slog.LevelDebug})) + transport = &LoggingTransport{RoundTripper: transport, logger: logger} + } + + return retry.NewTransport(transport) +} + +// RoundTrip calls base round trip while keeping track of the current request. +func (t *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + id := atomic.AddUint64(&requestCount, 1) - 1 + + t.logger.Debug("Request", "id", id, "url", req.URL, "method", req.Method, "header", logHeader(req.Header)) + resp, err = t.RoundTripper.RoundTrip(req) + if err != nil { + t.logger.Debug("Response", "id", id, "error", err) + } else if resp != nil { + t.logger.Debug("Response", "id", id, "status", resp.Status, "header", logHeader(resp.Header), "body", logResponseBody(resp)) + } else { + t.logger.Debug("Response", "id", id, "response", "nil") + } + + return resp, err +} + +// logHeader prints out the provided header keys and values, with auth header scrubbed. +func logHeader(header http.Header) string { + if len(header) > 0 { + headers := []string{} + for k, v := range header { + for _, h := range toScrub { + if strings.EqualFold(k, h) { + v = []string{"*****"} + } + } + headers = append(headers, fmt.Sprintf(" %q: %q", k, strings.Join(v, ", "))) + } + return strings.Join(headers, "\n") + } + return " Empty header" +} + +// logResponseBody prints out the response body if it is printable and within size limit. +func logResponseBody(resp *http.Response) string { + if resp.Body == nil || resp.Body == http.NoBody { + return " No response body to print" + } + + // non-applicable body is not printed and remains untouched for subsequent processing + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + return " Response body without a content type is not printed" + } + if !isPrintableContentType(contentType) { + return fmt.Sprintf(" Response body of content type %q is not printed", contentType) + } + + buf := bytes.NewBuffer(nil) + body := resp.Body + // restore the body by concatenating the read body with the remaining body + resp.Body = struct { + io.Reader + io.Closer + }{ + Reader: io.MultiReader(buf, body), + Closer: body, + } + // read the body up to limit+1 to check if the body exceeds the limit + if _, err := io.CopyN(buf, body, payloadSizeLimit+1); err != nil && err != io.EOF { + return fmt.Sprintf(" Error reading response body: %v", err) + } + + readBody := buf.String() + if len(readBody) == 0 { + return " Response body is empty" + } + if containsCredentials(readBody) { + return " Response body redacted due to potential credentials" + } + if len(readBody) > int(payloadSizeLimit) { + return readBody[:payloadSizeLimit] + "\n...(truncated)" + } + return readBody +} + +// isPrintableContentType returns true if the contentType is printable. +func isPrintableContentType(contentType string) bool { + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + + switch mediaType { + case "application/json", // JSON types + "text/plain", "text/html": // text types + return true + } + return strings.HasSuffix(mediaType, "+json") +} + +// containsCredentials returns true if the body contains potential credentials. +func containsCredentials(body string) bool { + return strings.Contains(body, `"token"`) || strings.Contains(body, `"access_token"`) +} diff --git a/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go b/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go index 727cdae03..84ee69807 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/registry/util.go @@ -18,25 +18,20 @@ package registry // import "helm.sh/helm/v3/pkg/registry" import ( "bytes" - "context" "fmt" "io" "net/http" "strings" "time" + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" helmtime "helm.sh/helm/v3/pkg/time" "github.com/Masterminds/semver/v3" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - orascontext "oras.land/oras-go/pkg/context" - "oras.land/oras-go/pkg/registry" - - "helm.sh/helm/v3/internal/tlsutil" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" ) var immutableOciAnnotations = []string{ @@ -44,7 +39,7 @@ var immutableOciAnnotations = []string{ ocispec.AnnotationTitle, } -// IsOCI determines whether or not a URL is to be treated as an OCI URL +// IsOCI determines whether a URL is to be treated as an OCI URL func IsOCI(url string) bool { return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme)) } @@ -104,42 +99,6 @@ func extractChartMeta(chartData []byte) (*chart.Metadata, error) { return ch.Metadata, nil } -// ctx retrieves a fresh context. -// disable verbose logging coming from ORAS (unless debug is enabled) -func ctx(out io.Writer, debug bool) context.Context { - if !debug { - return orascontext.Background() - } - ctx := orascontext.WithLoggerFromWriter(context.Background(), out) - orascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel) - return ctx -} - -// parseReference will parse and validate the reference, and clean tags when -// applicable tags are only cleaned when plus (+) signs are present, and are -// converted to underscores (_) before pushing -// See https://github.com/helm/helm/issues/10166 -func parseReference(raw string) (registry.Reference, error) { - // The sole possible reference modification is replacing plus (+) signs - // present in tags with underscores (_). To do this properly, we first - // need to identify a tag, and then pass it on to the reference parser - // NOTE: Passing immediately to the reference parser will fail since (+) - // signs are an invalid tag character, and simply replacing all plus (+) - // occurrences could invalidate other portions of the URI - parts := strings.Split(raw, ":") - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { - tag := parts[len(parts)-1] - - if tag != "" { - // Replace any plus (+) signs with known underscore (_) conversion - newTag := strings.ReplaceAll(tag, "+", "_") - raw = strings.ReplaceAll(raw, tag, newTag) - } - } - - return registry.ParseReference(raw) -} - // NewRegistryClientWithTLS is a helper function to create a new registry client with TLS enabled. func NewRegistryClientWithTLS(out io.Writer, certFile, keyFile, caFile string, insecureSkipTLSverify bool, registryConfig string, debug bool) (*Client, error) { tlsConf, err := tlsutil.NewClientTLS(certFile, keyFile, caFile, insecureSkipTLSverify) @@ -208,7 +167,7 @@ func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationSource, meta.Sources[0]) } - if meta.Maintainers != nil && len(meta.Maintainers) > 0 { + if len(meta.Maintainers) > 0 { var maintainerSb strings.Builder for maintainerIdx, maintainer := range meta.Maintainers { diff --git a/tools/vendor/helm.sh/helm/v3/pkg/release/hook.go b/tools/vendor/helm.sh/helm/v3/pkg/release/hook.go index cb9955582..425074ac1 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/release/hook.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/release/hook.go @@ -50,6 +50,17 @@ const ( func (x HookDeletePolicy) String() string { return string(x) } +// HookOutputLogPolicy specifies the hook output log policy +type HookOutputLogPolicy string + +// Hook output log policy types +const ( + HookOutputOnSucceeded HookOutputLogPolicy = "hook-succeeded" + HookOutputOnFailed HookOutputLogPolicy = "hook-failed" +) + +func (x HookOutputLogPolicy) String() string { return string(x) } + // HookAnnotation is the label name for a hook const HookAnnotation = "helm.sh/hook" @@ -59,6 +70,9 @@ const HookWeightAnnotation = "helm.sh/hook-weight" // HookDeleteAnnotation is the label name for the delete policy for a hook const HookDeleteAnnotation = "helm.sh/hook-delete-policy" +// HookOutputLogAnnotation is the label name for the output log policy for a hook +const HookOutputLogAnnotation = "helm.sh/hook-output-log-policy" + // Hook defines a hook object. type Hook struct { Name string `json:"name,omitempty"` @@ -76,6 +90,8 @@ type Hook struct { Weight int `json:"weight,omitempty"` // DeletePolicies are the policies that indicate when to delete the hook DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"` + // OutputLogPolicies defines whether we should copy hook logs back to main process + OutputLogPolicies []HookOutputLogPolicy `json:"output_log_policies,omitempty"` } // A HookExecution records the result for the last execution of a hook for a given release. diff --git a/tools/vendor/helm.sh/helm/v3/pkg/release/mock.go b/tools/vendor/helm.sh/helm/v3/pkg/release/mock.go index a28e1dc16..eb0b5157d 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/release/mock.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/release/mock.go @@ -74,6 +74,24 @@ func Mock(opts *MockReleaseOptions) *Release { Name: "foo", Version: "0.1.0-beta.1", AppVersion: "1.0", + Annotations: map[string]string{ + "category": "web-apps", + "supported": "true", + }, + Dependencies: []*chart.Dependency{ + { + Name: "cool-plugin", + Version: "1.0.0", + Repository: "https://coolplugin.io/charts", + Condition: "coolPlugin.enabled", + Enabled: true, + }, + { + Name: "crds", + Version: "2.7.1", + Condition: "crds.enabled", + }, + }, }, Templates: []*chart.File{ {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, diff --git a/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go index 4b6109929..cf851baa8 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go @@ -128,6 +128,14 @@ func SortManifests(files map[string]string, _ chartutil.VersionSet, ordering Kin // metadata: // annotations: // helm.sh/hook-delete-policy: hook-succeeded +// +// To determine the policy to output logs of the hook (for Pod and Job only), it looks for a YAML structure like this: +// +// kind: Pod +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-output-log-policy: hook-succeeded,hook-failed func (file *manifestFile) sort(result *result) error { // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) var sortedEntryKeys []string @@ -166,13 +174,14 @@ func (file *manifestFile) sort(result *result) error { hw := calculateHookWeight(entry) h := &release.Hook{ - Name: entry.Metadata.Name, - Kind: entry.Kind, - Path: file.path, - Manifest: m, - Events: []release.HookEvent{}, - Weight: hw, - DeletePolicies: []release.HookDeletePolicy{}, + Name: entry.Metadata.Name, + Kind: entry.Kind, + Path: file.path, + Manifest: m, + Events: []release.HookEvent{}, + Weight: hw, + DeletePolicies: []release.HookDeletePolicy{}, + OutputLogPolicies: []release.HookOutputLogPolicy{}, } isUnknownHook := false @@ -196,6 +205,10 @@ func (file *manifestFile) sort(result *result) error { operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) { h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value)) }) + + operateAnnotationValues(entry, release.HookOutputLogAnnotation, func(value string) { + h.OutputLogPolicies = append(h.OutputLogPolicies, release.HookOutputLogPolicy(value)) + }) } return nil diff --git a/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go b/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go index e1ce3c62d..a93314ab8 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -357,6 +357,7 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { for idx := len(cvs) - 1; idx >= 0; idx-- { if cvs[idx] == nil { log.Printf("skipping loading invalid entry for chart %q from %s: empty entry", name, source) + cvs = append(cvs[:idx], cvs[idx+1:]...) continue } // When metadata section missing, initialize with no data diff --git a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go index 5fd64ea59..ce88c662b 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go @@ -18,6 +18,7 @@ package driver // import "helm.sh/helm/v3/pkg/storage/driver" import ( "context" + "fmt" "strconv" "strings" "time" @@ -160,7 +161,7 @@ func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) // create a new configmap to hold the release obj, err := newConfigMapsObject(key, rls, lbs) @@ -188,7 +189,7 @@ func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) // create a new configmap object to hold the release obj, err := newConfigMapsObject(key, rls, lbs) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go index 9c2f805f2..95a7e9032 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go @@ -18,6 +18,7 @@ package driver // import "helm.sh/helm/v3/pkg/storage/driver" import ( "context" + "fmt" "strconv" "strings" "time" @@ -151,7 +152,7 @@ func (secrets *Secrets) Create(key string, rls *rspb.Release) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix())) // create a new secret to hold the release obj, err := newSecretsObject(key, rls, lbs) @@ -177,7 +178,7 @@ func (secrets *Secrets) Update(key string, rls *rspb.Release) error { lbs.init() lbs.fromMap(rls.Labels) - lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix())) // create a new secret object to hold the release obj, err := newSecretsObject(key, rls, lbs) diff --git a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go index f2998d76d..63a41c0bf 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime.go @@ -21,5 +21,9 @@ import ( ) func Created(fi os.FileInfo) time.Time { - return created(fi) + return modified(fi) +} + +func Modified(fi os.FileInfo) time.Time { + return modified(fi) } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go index c3cea1d78..d8a6ea1a1 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_linux.go @@ -23,8 +23,8 @@ import ( "time" ) -func created(fi os.FileInfo) time.Time { +func modified(fi os.FileInfo) time.Time { st := fi.Sys().(*syscall.Stat_t) //nolint - return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)) + return time.Unix(int64(st.Mtim.Sec), int64(st.Mtim.Nsec)) } diff --git a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go index f21ed7347..12afc6df2 100644 --- a/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go +++ b/tools/vendor/helm.sh/helm/v3/pkg/time/ctime/ctime_other.go @@ -22,6 +22,6 @@ import ( "time" ) -func created(fi os.FileInfo) time.Time { +func modified(fi os.FileInfo) time.Time { return fi.ModTime() } diff --git a/tools/vendor/k8s.io/api/admission/v1/doc.go b/tools/vendor/k8s.io/api/admission/v1/doc.go index e7df9f629..cab652821 100644 --- a/tools/vendor/k8s.io/api/admission/v1/doc.go +++ b/tools/vendor/k8s.io/api/admission/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=admission.k8s.io -package v1 // import "k8s.io/api/admission/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/admission/v1beta1/doc.go b/tools/vendor/k8s.io/api/admission/v1beta1/doc.go index a5669022a..447495684 100644 --- a/tools/vendor/k8s.io/api/admission/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=admission.k8s.io -package v1beta1 // import "k8s.io/api/admission/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1/doc.go b/tools/vendor/k8s.io/api/admissionregistration/v1/doc.go index ca0086188..ec0ebb9c4 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1/doc.go +++ b/tools/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -24,4 +24,4 @@ limitations under the License. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the // new dynamic admission controller configuration. -package v1 // import "k8s.io/api/admissionregistration/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 98066211d..344af9ae0 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index 88344ce87..d23f21cc8 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -272,9 +272,9 @@ message MatchResources { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; - // ObjectSelector decides whether to run the validation based on if the + // ObjectSelector decides whether to run the policy based on if the // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the cel validation, and + // the oldObject and newObject that would be sent to the policy's expression (CEL), and // is considered to match if either object matches the selector. A null // object (oldObject in the case of create, or newObject in the case of // delete) or an object that cannot have labels (like a @@ -286,13 +286,13 @@ message MatchResources { // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; - // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // ResourceRules describes what operations on what resources/subresources the admission policy matches. // The policy cares about an operation if it matches _any_ Rule. // +listType=atomic // +optional repeated NamedRuleWithOperations resourceRules = 3; - // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about. // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) // +listType=atomic // +optional @@ -304,12 +304,13 @@ message MatchResources { // - Exact: match a request only if it exactly matches a specified rule. // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups. // // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1 + // API groups. The API server translates the request to a matched resource API if necessary. // // Defaults to "Equivalent" // +optional diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index ee50fbe2d..f183498a5 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -56,9 +56,9 @@ const ( type FailurePolicyType string const ( - // Ignore means that an error calling the webhook is ignored. + // Ignore means that an error calling the admission webhook or admission policy is ignored. Ignore FailurePolicyType = "Ignore" - // Fail means that an error calling the webhook causes the admission to fail. + // Fail means that an error calling the admission webhook or admission policy causes resource admission to fail. Fail FailurePolicyType = "Fail" ) @@ -67,9 +67,11 @@ const ( type MatchPolicyType string const ( - // Exact means requests should only be sent to the webhook if they exactly match a given rule. + // Exact means requests should only be sent to the admission webhook or admission policy if they exactly match a given rule. Exact MatchPolicyType = "Exact" - // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + // Equivalent means requests should be sent to the admission webhook or admission policy if they modify a resource listed + // in rules via an equivalent API group or version. For example, `autoscaling/v1` and `autoscaling/v2` + // HorizontalPodAutoscalers are equivalent: the same set of resources appear via both APIs. Equivalent MatchPolicyType = "Equivalent" ) @@ -577,9 +579,9 @@ type MatchResources struct { // Default to the empty LabelSelector, which matches everything. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` - // ObjectSelector decides whether to run the validation based on if the + // ObjectSelector decides whether to run the policy based on if the // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the cel validation, and + // the oldObject and newObject that would be sent to the policy's expression (CEL), and // is considered to match if either object matches the selector. A null // object (oldObject in the case of create, or newObject in the case of // delete) or an object that cannot have labels (like a @@ -590,12 +592,12 @@ type MatchResources struct { // Default to the empty LabelSelector, which matches everything. // +optional ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` - // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // ResourceRules describes what operations on what resources/subresources the admission policy matches. // The policy cares about an operation if it matches _any_ Rule. // +listType=atomic // +optional ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` - // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about. // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) // +listType=atomic // +optional @@ -606,12 +608,13 @@ type MatchResources struct { // - Exact: match a request only if it exactly matches a specified rule. // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups. // // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1 + // API groups. The API server translates the request to a matched resource API if necessary. // // Defaults to "Equivalent" // +optional diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index 32222a81b..116e56e06 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -68,10 +68,10 @@ func (JSONPatch) SwaggerDoc() map[string]string { var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", - "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", - "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "objectSelector": "ObjectSelector decides whether to run the policy based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the policy's expression (CEL), and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the admission policy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1 API groups. The API server translates the request to a matched resource API if necessary.\n\nDefaults to \"Equivalent\"", } func (MatchResources) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/tools/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 0095cb257..40d831573 100644 --- a/tools/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -24,4 +24,4 @@ limitations under the License. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the // new dynamic admission controller configuration. -package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/apidiscovery/v2/doc.go b/tools/vendor/k8s.io/api/apidiscovery/v2/doc.go index 4f3ad5f13..f46d33e94 100644 --- a/tools/vendor/k8s.io/api/apidiscovery/v2/doc.go +++ b/tools/vendor/k8s.io/api/apidiscovery/v2/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=apidiscovery.k8s.io -package v2 // import "k8s.io/api/apidiscovery/v2" +package v2 diff --git a/tools/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go b/tools/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go index e85da226e..d4fceab68 100644 --- a/tools/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go +++ b/tools/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=apidiscovery.k8s.io -package v2beta1 // import "k8s.io/api/apidiscovery/v2beta1" +package v2beta1 diff --git a/tools/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/tools/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go index a4da95d44..867d74165 100644 --- a/tools/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go @@ -22,4 +22,4 @@ limitations under the License. // Package v1alpha1 contains the v1alpha1 version of the API used by the // apiservers themselves. -package v1alpha1 // import "k8s.io/api/apiserverinternal/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/apps/v1/doc.go b/tools/vendor/k8s.io/api/apps/v1/doc.go index d189e860f..51fe12c53 100644 --- a/tools/vendor/k8s.io/api/apps/v1/doc.go +++ b/tools/vendor/k8s.io/api/apps/v1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1 // import "k8s.io/api/apps/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/apps/v1/generated.pb.go b/tools/vendor/k8s.io/api/apps/v1/generated.pb.go index ea62a099f..eacc25931 100644 --- a/tools/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/tools/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -928,145 +928,147 @@ func init() { } var fileDescriptor_5b781835628d5338 = []byte{ - // 2194 bytes of a gzipped FileDescriptorProto + // 2225 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8, - 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab, - 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b, - 0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04, - 0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f, - 0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7, - 0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d, - 0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a, - 0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa, - 0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71, - 0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76, - 0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73, - 0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75, - 0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec, - 0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c, - 0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8, - 0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90, - 0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7, - 0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43, - 0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08, - 0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11, - 0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81, - 0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a, - 0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36, - 0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd, - 0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5, - 0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2, - 0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8, - 0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7, - 0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00, - 0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f, - 0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac, - 0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48, - 0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5, - 0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17, - 0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12, - 0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc, - 0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a, - 0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c, - 0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d, - 0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0, - 0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8, - 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a, - 0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8, - 0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51, - 0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e, - 0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60, - 0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d, - 0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00, - 0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e, - 0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5, - 0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13, - 0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b, - 0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5, - 0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee, - 0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20, - 0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50, - 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64, - 0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24, - 0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35, - 0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24, - 0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad, - 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3, - 0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31, - 0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86, - 0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c, - 0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe, - 0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58, - 0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a, - 0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84, - 0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8, - 0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8, - 0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13, - 0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9, - 0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74, - 0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1, - 0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd, - 0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04, - 0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3, - 0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d, - 0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16, - 0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8, - 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02, - 0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52, - 0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b, - 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e, - 0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72, - 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98, - 0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00, - 0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd, - 0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8, - 0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07, - 0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa, - 0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49, - 0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9, - 0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07, - 0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1, - 0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e, - 0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14, - 0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05, - 0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc, - 0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2, - 0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14, - 0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2, - 0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9, - 0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe, - 0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9, - 0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94, - 0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80, - 0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7, - 0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd, - 0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf, - 0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80, - 0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1, - 0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9, - 0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94, - 0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2, - 0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb, - 0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04, - 0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9, - 0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72, - 0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20, - 0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71, - 0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95, - 0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c, - 0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20, - 0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96, - 0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51, - 0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1, - 0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9, - 0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84, - 0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39, - 0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7, - 0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7, - 0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e, - 0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29, - 0x00, 0x00, + 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x0d, 0x2d, 0xc9, 0x1e, 0xa9, 0x12, 0x63, 0x37, 0xa4, 0xbb, 0x71, + 0x6d, 0x25, 0x8e, 0xc9, 0xda, 0x71, 0x82, 0xc0, 0x29, 0x12, 0x88, 0x54, 0x9a, 0xba, 0xd1, 0x57, + 0x87, 0x92, 0x03, 0xb8, 0x69, 0xd1, 0xd1, 0x72, 0x4c, 0x6d, 0xbc, 0x5f, 0xd8, 0x1d, 0x2a, 0x16, + 0x7a, 0x29, 0x0a, 0x14, 0xe8, 0x21, 0x87, 0xfe, 0x0d, 0xfd, 0x07, 0x8a, 0xa2, 0x68, 0x6e, 0x45, + 0x50, 0xf4, 0xe2, 0x4b, 0x81, 0xa0, 0x97, 0xe6, 0x44, 0xd4, 0xcc, 0xa9, 0x28, 0x7a, 0x6b, 0x2f, + 0xbe, 0xb4, 0x98, 0xd9, 0xd9, 0xef, 0x59, 0x91, 0x92, 0x63, 0xa5, 0x09, 0x7c, 0xe3, 0xce, 0x7b, + 0xef, 0x37, 0x6f, 0x66, 0xde, 0x9b, 0xf7, 0x9b, 0x19, 0x02, 0xf5, 0xfe, 0xeb, 0x5e, 0x43, 0xb7, + 0x9b, 0xd8, 0xd1, 0x9b, 0xd8, 0x71, 0xbc, 0xe6, 0xc1, 0xf5, 0x66, 0x8f, 0x58, 0xc4, 0xc5, 0x94, + 0x74, 0x1b, 0x8e, 0x6b, 0x53, 0x1b, 0x42, 0x5f, 0xa7, 0x81, 0x1d, 0xbd, 0xc1, 0x74, 0x1a, 0x07, + 0xd7, 0xcf, 0x5f, 0xeb, 0xe9, 0x74, 0xbf, 0xbf, 0xd7, 0xd0, 0x6c, 0xb3, 0xd9, 0xb3, 0x7b, 0x76, + 0x93, 0xab, 0xee, 0xf5, 0xef, 0xf1, 0x2f, 0xfe, 0xc1, 0x7f, 0xf9, 0x10, 0xe7, 0xe3, 0xdd, 0x68, + 0xb6, 0x4b, 0x24, 0xdd, 0x9c, 0xbf, 0x19, 0xe9, 0x98, 0x58, 0xdb, 0xd7, 0x2d, 0xe2, 0x1e, 0x36, + 0x9d, 0xfb, 0x3d, 0xd6, 0xe0, 0x35, 0x4d, 0x42, 0xb1, 0xcc, 0xaa, 0x99, 0x67, 0xe5, 0xf6, 0x2d, + 0xaa, 0x9b, 0x24, 0x63, 0xf0, 0xda, 0x28, 0x03, 0x4f, 0xdb, 0x27, 0x26, 0xce, 0xd8, 0xbd, 0x92, + 0x67, 0xd7, 0xa7, 0xba, 0xd1, 0xd4, 0x2d, 0xea, 0x51, 0x37, 0x6d, 0xa4, 0xfe, 0x47, 0x01, 0xb0, + 0x6d, 0x5b, 0xd4, 0xb5, 0x0d, 0x83, 0xb8, 0x88, 0x1c, 0xe8, 0x9e, 0x6e, 0x5b, 0xf0, 0xa7, 0xa0, + 0xcc, 0xc6, 0xd3, 0xc5, 0x14, 0x57, 0x95, 0x8b, 0xca, 0x4a, 0xe5, 0xc6, 0x77, 0x1a, 0xd1, 0x24, + 0x87, 0xf0, 0x0d, 0xe7, 0x7e, 0x8f, 0x35, 0x78, 0x0d, 0xa6, 0xdd, 0x38, 0xb8, 0xde, 0xd8, 0xda, + 0xfb, 0x80, 0x68, 0x74, 0x83, 0x50, 0xdc, 0x82, 0x0f, 0x07, 0xf5, 0x89, 0xe1, 0xa0, 0x0e, 0xa2, + 0x36, 0x14, 0xa2, 0xc2, 0x2d, 0x50, 0xe4, 0xe8, 0x05, 0x8e, 0x7e, 0x2d, 0x17, 0x5d, 0x0c, 0xba, + 0x81, 0xf0, 0x87, 0x6f, 0x3f, 0xa0, 0xc4, 0x62, 0xee, 0xb5, 0xce, 0x08, 0xe8, 0xe2, 0x1a, 0xa6, + 0x18, 0x71, 0x20, 0xf8, 0x32, 0x28, 0xbb, 0xc2, 0xfd, 0xea, 0xe4, 0x45, 0x65, 0x65, 0xb2, 0x75, + 0x56, 0x68, 0x95, 0x83, 0x61, 0xa1, 0x50, 0x43, 0xfd, 0xb3, 0x02, 0x96, 0xb2, 0xe3, 0x5e, 0xd7, + 0x3d, 0x0a, 0xdf, 0xcf, 0x8c, 0xbd, 0x31, 0xde, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xd8, 0x71, 0xd0, + 0x12, 0x1b, 0xf7, 0xbb, 0xa0, 0xa4, 0x53, 0x62, 0x7a, 0xd5, 0xc2, 0xc5, 0xc9, 0x95, 0xca, 0x8d, + 0xcb, 0x8d, 0x6c, 0xec, 0x36, 0xb2, 0x8e, 0xb5, 0x66, 0x05, 0x64, 0xe9, 0x36, 0x33, 0x46, 0x3e, + 0x86, 0xfa, 0x5f, 0x05, 0xcc, 0xac, 0x61, 0x62, 0xda, 0x56, 0x87, 0xd0, 0x53, 0x58, 0xb4, 0x36, + 0x28, 0x7a, 0x0e, 0xd1, 0xc4, 0xa2, 0x7d, 0x4b, 0xe6, 0x7b, 0xe8, 0x4e, 0xc7, 0x21, 0x5a, 0xb4, + 0x50, 0xec, 0x0b, 0x71, 0x63, 0xf8, 0x2e, 0x98, 0xf2, 0x28, 0xa6, 0x7d, 0x8f, 0x2f, 0x53, 0xe5, + 0xc6, 0x0b, 0x47, 0xc3, 0x70, 0xd5, 0xd6, 0x9c, 0x00, 0x9a, 0xf2, 0xbf, 0x91, 0x80, 0x50, 0xff, + 0x51, 0x00, 0x30, 0xd4, 0x6d, 0xdb, 0x56, 0x57, 0xa7, 0x2c, 0x7e, 0x6f, 0x81, 0x22, 0x3d, 0x74, + 0x08, 0x9f, 0x86, 0x99, 0xd6, 0xe5, 0xc0, 0x8b, 0x9d, 0x43, 0x87, 0x3c, 0x1e, 0xd4, 0x97, 0xb2, + 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0x5c, 0x0f, 0xfd, 0x2b, 0x70, 0xeb, 0x9b, 0xc9, 0xae, 0x1f, 0x0f, + 0xea, 0x92, 0xcd, 0xa2, 0x11, 0x22, 0x25, 0x1d, 0x84, 0x07, 0x00, 0x1a, 0xd8, 0xa3, 0x3b, 0x2e, + 0xb6, 0x3c, 0xbf, 0x27, 0xdd, 0x24, 0x62, 0xe4, 0x2f, 0x8d, 0xb7, 0x3c, 0xcc, 0xa2, 0x75, 0x5e, + 0x78, 0x01, 0xd7, 0x33, 0x68, 0x48, 0xd2, 0x03, 0xbc, 0x0c, 0xa6, 0x5c, 0x82, 0x3d, 0xdb, 0xaa, + 0x16, 0xf9, 0x28, 0xc2, 0x09, 0x44, 0xbc, 0x15, 0x09, 0x29, 0x7c, 0x11, 0x4c, 0x9b, 0xc4, 0xf3, + 0x70, 0x8f, 0x54, 0x4b, 0x5c, 0x71, 0x5e, 0x28, 0x4e, 0x6f, 0xf8, 0xcd, 0x28, 0x90, 0xab, 0xbf, + 0x53, 0xc0, 0x6c, 0x38, 0x73, 0xa7, 0x90, 0x2a, 0xad, 0x64, 0xaa, 0x3c, 0x7f, 0x64, 0x9c, 0xe4, + 0x64, 0xc8, 0x27, 0x93, 0x31, 0x9f, 0x59, 0x10, 0xc2, 0x1f, 0x83, 0xb2, 0x47, 0x0c, 0xa2, 0x51, + 0xdb, 0x15, 0x3e, 0xbf, 0x32, 0xa6, 0xcf, 0x78, 0x8f, 0x18, 0x1d, 0x61, 0xda, 0x3a, 0xc3, 0x9c, + 0x0e, 0xbe, 0x50, 0x08, 0x09, 0x7f, 0x08, 0xca, 0x94, 0x98, 0x8e, 0x81, 0x29, 0x11, 0x69, 0x92, + 0x88, 0x6f, 0x16, 0x2e, 0x0c, 0x6c, 0xdb, 0xee, 0xee, 0x08, 0x35, 0x9e, 0x28, 0xe1, 0x3c, 0x04, + 0xad, 0x28, 0x84, 0x81, 0xf7, 0xc1, 0x5c, 0xdf, 0xe9, 0x32, 0x4d, 0xca, 0xb6, 0xee, 0xde, 0xa1, + 0x08, 0x9f, 0xab, 0x47, 0x4e, 0xc8, 0x6e, 0xc2, 0xa4, 0xb5, 0x24, 0x3a, 0x98, 0x4b, 0xb6, 0xa3, + 0x14, 0x34, 0x5c, 0x05, 0xf3, 0xa6, 0x6e, 0x21, 0x82, 0xbb, 0x87, 0x1d, 0xa2, 0xd9, 0x56, 0xd7, + 0xe3, 0x01, 0x54, 0x6a, 0x2d, 0x0b, 0x80, 0xf9, 0x8d, 0xa4, 0x18, 0xa5, 0xf5, 0xe1, 0x3a, 0x58, + 0x0c, 0xf6, 0xd9, 0xef, 0xeb, 0x1e, 0xb5, 0xdd, 0xc3, 0x75, 0xdd, 0xd4, 0x69, 0x75, 0x8a, 0xe3, + 0x54, 0x87, 0x83, 0xfa, 0x22, 0x92, 0xc8, 0x91, 0xd4, 0x4a, 0xfd, 0x68, 0x0a, 0xcc, 0xa7, 0x76, + 0x03, 0x78, 0x07, 0x2c, 0x69, 0x7d, 0xd7, 0x25, 0x16, 0xdd, 0xec, 0x9b, 0x7b, 0xc4, 0xed, 0x68, + 0xfb, 0xa4, 0xdb, 0x37, 0x48, 0x97, 0xaf, 0x68, 0xa9, 0x55, 0x13, 0xbe, 0x2e, 0xb5, 0xa5, 0x5a, + 0x28, 0xc7, 0x1a, 0xfe, 0x00, 0x40, 0x8b, 0x37, 0x6d, 0xe8, 0x9e, 0x17, 0x62, 0x16, 0x38, 0x66, + 0x98, 0x80, 0x9b, 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x3e, 0x76, 0x89, 0xa7, 0xbb, 0xa4, 0x9b, 0xf6, + 0x71, 0x32, 0xe9, 0xe3, 0x9a, 0x54, 0x0b, 0xe5, 0x58, 0xc3, 0x57, 0x41, 0xc5, 0xef, 0x8d, 0xcf, + 0xb9, 0x58, 0x9c, 0x05, 0x01, 0x56, 0xd9, 0x8c, 0x44, 0x28, 0xae, 0xc7, 0x86, 0x66, 0xef, 0x79, + 0xc4, 0x3d, 0x20, 0xdd, 0x77, 0x7c, 0x0e, 0xc0, 0x0a, 0x65, 0x89, 0x17, 0xca, 0x70, 0x68, 0x5b, + 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x43, 0xf3, 0xa3, 0x26, 0x33, 0xb4, 0xa9, 0xe4, 0xd0, 0x76, 0xa5, + 0x5a, 0x28, 0xc7, 0x9a, 0xc5, 0x9e, 0xef, 0xf2, 0xea, 0x01, 0xd6, 0x0d, 0xbc, 0x67, 0x90, 0xea, + 0x74, 0x32, 0xf6, 0x36, 0x93, 0x62, 0x94, 0xd6, 0x87, 0xef, 0x80, 0x73, 0x7e, 0xd3, 0xae, 0x85, + 0x43, 0x90, 0x32, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdb, 0x4c, 0x2b, 0xa0, 0xac, 0x0d, 0xbc, 0x05, + 0xe6, 0x34, 0xdb, 0x30, 0x78, 0x3c, 0xb6, 0xed, 0xbe, 0x45, 0xab, 0x33, 0x1c, 0x05, 0xb2, 0x1c, + 0x6a, 0x27, 0x24, 0x28, 0xa5, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x82, 0xfc, 0x42, + 0x9f, 0xad, 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x43, 0x53, 0x3f, 0x51, 0xc0, 0x72, 0x4e, + 0x8e, 0xc3, 0xb7, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, + 0xcc, 0x32, 0xde, 0xa1, 0x5b, 0x3d, 0x5f, 0x45, 0xec, 0x60, 0x2f, 0xc9, 0x7c, 0x47, 0x71, 0xc5, + 0x68, 0x1b, 0x3e, 0x37, 0x1c, 0xd4, 0x67, 0x13, 0x32, 0x94, 0xc4, 0x54, 0x7f, 0x51, 0x00, 0x60, + 0x8d, 0x38, 0x86, 0x7d, 0x68, 0x12, 0xeb, 0x34, 0x58, 0xcb, 0x5a, 0x82, 0xb5, 0xa8, 0xd2, 0x85, + 0x08, 0xfd, 0xc9, 0xa5, 0x2d, 0xeb, 0x29, 0xda, 0x72, 0x69, 0x04, 0xce, 0xd1, 0xbc, 0xe5, 0x6f, + 0x93, 0x60, 0x21, 0x52, 0x8e, 0x88, 0xcb, 0x1b, 0x89, 0x25, 0xbc, 0x92, 0x5a, 0xc2, 0x65, 0x89, + 0xc9, 0x53, 0x63, 0x2e, 0x1f, 0x80, 0x39, 0xc6, 0x2b, 0xfc, 0x55, 0xe3, 0xac, 0x65, 0xea, 0xd8, + 0xac, 0x25, 0xac, 0x3a, 0xeb, 0x09, 0x24, 0x94, 0x42, 0xce, 0x61, 0x49, 0xd3, 0x5f, 0x45, 0x96, + 0xf4, 0x7b, 0x05, 0xcc, 0x45, 0xcb, 0x74, 0x0a, 0x34, 0xa9, 0x9d, 0xa4, 0x49, 0xb5, 0xa3, 0xe3, + 0x32, 0x87, 0x27, 0xfd, 0xb5, 0x18, 0xf7, 0x9a, 0x13, 0xa5, 0x15, 0x76, 0xa0, 0x72, 0x0c, 0x5d, + 0xc3, 0x9e, 0x28, 0xab, 0x67, 0xfc, 0xc3, 0x94, 0xdf, 0x86, 0x42, 0x69, 0x82, 0x52, 0x15, 0x9e, + 0x2e, 0xa5, 0x9a, 0xfc, 0x62, 0x28, 0xd5, 0x0e, 0x28, 0x7b, 0x01, 0x99, 0x2a, 0x72, 0xc8, 0xcb, + 0xa3, 0xd2, 0x59, 0xf0, 0xa8, 0x10, 0x35, 0x64, 0x50, 0x21, 0x92, 0x8c, 0x3b, 0x95, 0xbe, 0x4c, + 0xee, 0xc4, 0xc2, 0xdb, 0xc1, 0x7d, 0x8f, 0x74, 0x79, 0x2a, 0x95, 0xa3, 0xf0, 0xde, 0xe6, 0xad, + 0x48, 0x48, 0xe1, 0x2e, 0x58, 0x76, 0x5c, 0xbb, 0xe7, 0x12, 0xcf, 0x5b, 0x23, 0xb8, 0x6b, 0xe8, + 0x16, 0x09, 0x06, 0xe0, 0x57, 0xbd, 0x0b, 0xc3, 0x41, 0x7d, 0x79, 0x5b, 0xae, 0x82, 0xf2, 0x6c, + 0xd5, 0x5f, 0x95, 0xc0, 0xd9, 0xf4, 0x8e, 0x98, 0x43, 0x44, 0x94, 0x13, 0x11, 0x91, 0x97, 0x63, + 0x21, 0xea, 0xb3, 0xb4, 0xd8, 0x99, 0x3f, 0x13, 0xa6, 0xab, 0x60, 0x5e, 0x10, 0x8f, 0x40, 0x28, + 0xa8, 0x58, 0xb8, 0x3c, 0xbb, 0x49, 0x31, 0x4a, 0xeb, 0xc3, 0x37, 0xc0, 0xac, 0xcb, 0xb9, 0x55, + 0x00, 0xe0, 0xf3, 0x93, 0x6f, 0x08, 0x80, 0x59, 0x14, 0x17, 0xa2, 0xa4, 0x2e, 0xe3, 0x26, 0x11, + 0xe5, 0x08, 0x00, 0x8a, 0x49, 0x6e, 0xb2, 0x9a, 0x56, 0x40, 0x59, 0x1b, 0xb8, 0x01, 0x16, 0xfa, + 0x56, 0x16, 0xca, 0x8f, 0xb5, 0x0b, 0x02, 0x6a, 0x61, 0x37, 0xab, 0x82, 0x64, 0x76, 0xf0, 0x36, + 0x58, 0xa0, 0xc4, 0x35, 0x75, 0x0b, 0x53, 0xdd, 0xea, 0x85, 0x70, 0xfe, 0xca, 0x2f, 0x33, 0xa8, + 0x9d, 0xac, 0x18, 0xc9, 0x6c, 0xe0, 0x8f, 0x12, 0xcc, 0x67, 0x8a, 0x6f, 0x48, 0x57, 0x8e, 0xce, + 0xac, 0xb1, 0xa9, 0x8f, 0x84, 0x92, 0x95, 0xc7, 0xa5, 0x64, 0xea, 0xc7, 0x0a, 0x80, 0xd9, 0x6c, + 0x1e, 0x79, 0x4f, 0x90, 0xb1, 0x88, 0x55, 0xdb, 0xae, 0x9c, 0x2c, 0x5d, 0x1d, 0x4d, 0x96, 0xa2, + 0xcd, 0x78, 0x3c, 0xb6, 0x24, 0xa6, 0xf7, 0x74, 0xee, 0x78, 0xc6, 0x60, 0x4b, 0x91, 0x3f, 0x4f, + 0xc6, 0x96, 0x62, 0x38, 0x47, 0xb3, 0xa5, 0x7f, 0x16, 0xc0, 0x42, 0xa4, 0x3c, 0x36, 0x5b, 0x92, + 0x98, 0x3c, 0xbb, 0xe7, 0x19, 0x8f, 0xc1, 0x44, 0x53, 0xf7, 0x7f, 0xc2, 0x60, 0x22, 0x87, 0x72, + 0x18, 0xcc, 0x6f, 0x0b, 0x71, 0xaf, 0x8f, 0xc9, 0x60, 0xbe, 0x80, 0x5b, 0x8f, 0xaf, 0x1c, 0x09, + 0x52, 0x3f, 0x2a, 0x82, 0xb3, 0xe9, 0x14, 0x4c, 0x94, 0x54, 0x65, 0x64, 0x49, 0xdd, 0x06, 0x8b, + 0xf7, 0xfa, 0x86, 0x71, 0xc8, 0xc7, 0x10, 0xab, 0xab, 0x7e, 0x31, 0xfe, 0xa6, 0xb0, 0x5c, 0xfc, + 0x9e, 0x44, 0x07, 0x49, 0x2d, 0xb3, 0x15, 0xb6, 0xf8, 0xa4, 0x15, 0xb6, 0x74, 0x82, 0x0a, 0x9b, + 0x53, 0x12, 0xa7, 0x4f, 0x50, 0x12, 0xe5, 0x7c, 0x67, 0xf2, 0x44, 0x7c, 0x67, 0xec, 0xf2, 0x2a, + 0xd9, 0xf9, 0x46, 0xde, 0x2c, 0x0c, 0x15, 0xb0, 0x24, 0x3f, 0xd4, 0x43, 0x03, 0xcc, 0x99, 0xf8, + 0x41, 0xfc, 0x4a, 0x65, 0x54, 0xed, 0xe9, 0x53, 0xdd, 0x68, 0xf8, 0x6f, 0x4e, 0x8d, 0xdb, 0x16, + 0xdd, 0x72, 0x3b, 0xd4, 0xd5, 0xad, 0x9e, 0x5f, 0xab, 0x37, 0x12, 0x58, 0x28, 0x85, 0x0d, 0xef, + 0x82, 0xb2, 0x89, 0x1f, 0x74, 0xfa, 0x6e, 0x2f, 0xa8, 0xa9, 0xc7, 0xef, 0x87, 0xa7, 0xd1, 0x86, + 0x40, 0x41, 0x21, 0x9e, 0xfa, 0xb9, 0x02, 0x96, 0x73, 0x8a, 0xf1, 0xd7, 0x68, 0x94, 0x7f, 0x54, + 0xc0, 0xc5, 0xc4, 0x28, 0x59, 0x72, 0x93, 0x7b, 0x7d, 0x83, 0xe7, 0xb9, 0xe0, 0x3e, 0x57, 0xc1, + 0x8c, 0x83, 0x5d, 0xaa, 0x87, 0xfc, 0xbb, 0xd4, 0x9a, 0x1d, 0x0e, 0xea, 0x33, 0xdb, 0x41, 0x23, + 0x8a, 0xe4, 0x92, 0xb9, 0x29, 0x3c, 0xbd, 0xb9, 0x51, 0x7f, 0x59, 0x00, 0x95, 0x98, 0xcb, 0xa7, + 0xc0, 0x7a, 0xde, 0x4e, 0xb0, 0x1e, 0xe9, 0x93, 0x54, 0x7c, 0x0e, 0xf3, 0x68, 0xcf, 0x46, 0x8a, + 0xf6, 0x7c, 0x7b, 0x14, 0xd0, 0xd1, 0xbc, 0xe7, 0x5f, 0x05, 0xb0, 0x18, 0xd3, 0x8e, 0x88, 0xcf, + 0x77, 0x13, 0xc4, 0x67, 0x25, 0x45, 0x7c, 0xaa, 0x32, 0x9b, 0x67, 0xcc, 0x67, 0x34, 0xf3, 0xf9, + 0x83, 0x02, 0xe6, 0x63, 0x73, 0x77, 0x0a, 0xd4, 0x67, 0x2d, 0x49, 0x7d, 0xea, 0x23, 0xe2, 0x25, + 0x87, 0xfb, 0xdc, 0x02, 0x0b, 0x31, 0xa5, 0x2d, 0xb7, 0xab, 0x5b, 0xd8, 0xf0, 0xe0, 0x0b, 0xa0, + 0xe4, 0x51, 0xec, 0xd2, 0x20, 0xbb, 0x03, 0xdb, 0x0e, 0x6b, 0x44, 0xbe, 0x4c, 0xfd, 0xb7, 0x02, + 0x9a, 0x31, 0xe3, 0x6d, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0xef, 0xd8, 0x46, 0xdf, 0x24, 0x6d, + 0x03, 0xeb, 0x26, 0x22, 0xac, 0x41, 0xb7, 0xad, 0x6d, 0xdb, 0xd0, 0xb5, 0x43, 0x88, 0x41, 0xe5, + 0xc3, 0x7d, 0x62, 0xad, 0x11, 0x83, 0x50, 0xf1, 0xe8, 0x32, 0xd3, 0x7a, 0x2b, 0x78, 0x83, 0x78, + 0x2f, 0x12, 0x3d, 0x1e, 0xd4, 0x57, 0xc6, 0x41, 0xe4, 0xc1, 0x19, 0xc7, 0x84, 0x3f, 0x01, 0x80, + 0x7d, 0x76, 0x34, 0x1c, 0x3c, 0xc1, 0xcc, 0xb4, 0xde, 0x0c, 0x52, 0xf8, 0xbd, 0x50, 0x72, 0xac, + 0x0e, 0x62, 0x88, 0xea, 0x6f, 0xca, 0x89, 0xa5, 0xfe, 0xda, 0xdf, 0x78, 0xfd, 0x0c, 0x2c, 0x1e, + 0x44, 0xb3, 0x13, 0x28, 0x30, 0x7a, 0xc5, 0xe2, 0xee, 0x45, 0x29, 0xbc, 0x6c, 0x5e, 0x23, 0x52, + 0x77, 0x47, 0x02, 0x87, 0xa4, 0x9d, 0xc0, 0x57, 0x41, 0x85, 0x71, 0x19, 0x5d, 0x23, 0x9b, 0xd8, + 0x0c, 0xd2, 0x30, 0x7c, 0xb3, 0xea, 0x44, 0x22, 0x14, 0xd7, 0x83, 0xfb, 0x60, 0xc1, 0xb1, 0xbb, + 0x1b, 0xd8, 0xc2, 0x3d, 0xc2, 0x2a, 0xb4, 0xbf, 0x94, 0xfc, 0x2e, 0x6c, 0xa6, 0xf5, 0x5a, 0x70, + 0xcf, 0xb1, 0x9d, 0x55, 0x61, 0x87, 0x3f, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x24, 0x34, 0x33, 0x4f, + 0xac, 0xd3, 0x99, 0xff, 0xa5, 0xc8, 0xf2, 0xf1, 0x84, 0x8f, 0xac, 0x79, 0xb7, 0x7c, 0xe5, 0x13, + 0xdd, 0xf2, 0x49, 0x0e, 0x2f, 0x33, 0xc7, 0x3c, 0xbc, 0xfc, 0x49, 0x01, 0x97, 0x9c, 0x31, 0xd2, + 0xa8, 0x0a, 0xf8, 0xb4, 0xb4, 0x47, 0x4c, 0xcb, 0x38, 0x19, 0xd9, 0x5a, 0x19, 0x0e, 0xea, 0x97, + 0xc6, 0xd1, 0x44, 0x63, 0xb9, 0xc6, 0x92, 0xc6, 0x16, 0x3b, 0x5f, 0xb5, 0xc2, 0xdd, 0xbc, 0x32, + 0xc2, 0xcd, 0x60, 0xa3, 0xf4, 0xf3, 0x30, 0xf8, 0x42, 0x21, 0x8c, 0xfa, 0x71, 0x09, 0x9c, 0xcb, + 0x54, 0xeb, 0x2f, 0xf1, 0x06, 0x33, 0x73, 0x38, 0x9a, 0x3c, 0xc6, 0xe1, 0x68, 0x15, 0xcc, 0x8b, + 0x67, 0xef, 0xd4, 0xd9, 0x2a, 0x0c, 0x93, 0x76, 0x52, 0x8c, 0xd2, 0xfa, 0xb2, 0x1b, 0xd4, 0xd2, + 0x31, 0x6f, 0x50, 0xe3, 0x5e, 0x88, 0x7f, 0x6b, 0xf9, 0xf9, 0x9c, 0xf5, 0x42, 0xfc, 0x69, 0x2b, + 0xad, 0x0f, 0xdf, 0x0c, 0x92, 0x35, 0x44, 0x98, 0xe6, 0x08, 0xa9, 0xec, 0x0b, 0x01, 0x52, 0xda, + 0x4f, 0xf4, 0xb4, 0xfb, 0xbe, 0xe4, 0x69, 0x77, 0x65, 0x44, 0x98, 0x8d, 0x7f, 0xc3, 0x29, 0x3d, + 0xbf, 0x56, 0x8e, 0x7f, 0x7e, 0x55, 0xff, 0xa2, 0x80, 0xe7, 0x72, 0xb7, 0x29, 0xb8, 0x9a, 0x60, + 0x8f, 0xd7, 0x52, 0xec, 0xf1, 0xf9, 0x5c, 0xc3, 0x18, 0x85, 0x34, 0xe5, 0x97, 0x9f, 0x37, 0x47, + 0x5e, 0x7e, 0x4a, 0x4e, 0x22, 0xa3, 0x6f, 0x41, 0x5b, 0xaf, 0x3f, 0x7c, 0x54, 0x9b, 0xf8, 0xf4, + 0x51, 0x6d, 0xe2, 0xb3, 0x47, 0xb5, 0x89, 0x9f, 0x0f, 0x6b, 0xca, 0xc3, 0x61, 0x4d, 0xf9, 0x74, + 0x58, 0x53, 0x3e, 0x1b, 0xd6, 0x94, 0xbf, 0x0f, 0x6b, 0xca, 0xaf, 0x3f, 0xaf, 0x4d, 0xdc, 0x85, + 0xd9, 0xff, 0x8a, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0a, 0xea, 0xf9, 0x40, 0x2a, 0x00, + 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1748,6 +1750,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x48 + } if m.CollisionCount != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) i-- @@ -2054,6 +2061,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x38 + } if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -2915,6 +2927,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -3020,6 +3035,9 @@ func (m *ReplicaSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -3435,6 +3453,7 @@ func (this *DeploymentStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -3521,6 +3540,7 @@ func (this *ReplicaSetStatus) String() string { `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -5941,6 +5961,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6873,6 +6913,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/apps/v1/generated.proto b/tools/vendor/k8s.io/api/apps/v1/generated.proto index 388e638f4..38c8997e9 100644 --- a/tools/vendor/k8s.io/api/apps/v1/generated.proto +++ b/tools/vendor/k8s.io/api/apps/v1/generated.proto @@ -318,19 +318,19 @@ message DeploymentStatus { // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; - // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional optional int32 availableReplicas = 4; @@ -340,6 +340,13 @@ message DeploymentStatus { // +optional optional int32 unavailableReplicas = 5; + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 9; + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -421,16 +428,16 @@ message ReplicaSetList { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset repeated ReplicaSet items = 2; } // ReplicaSetSpec is the specification of a ReplicaSet. message ReplicaSetSpec { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional optional int32 replicas = 1; @@ -448,29 +455,36 @@ message ReplicaSetSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset optional int32 replicas = 1; - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional optional int32 fullyLabeledReplicas = 2; - // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional optional int32 readyReplicas = 4; - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional optional int32 availableReplicas = 5; + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 7; + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional optional int64 observedGeneration = 3; @@ -702,6 +716,7 @@ message StatefulSetSpec { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional optional string serviceName = 5; // podManagementPolicy controls how pods are created during initial scale up, diff --git a/tools/vendor/k8s.io/api/apps/v1/types.go b/tools/vendor/k8s.io/api/apps/v1/types.go index a68690b44..1362d875d 100644 --- a/tools/vendor/k8s.io/api/apps/v1/types.go +++ b/tools/vendor/k8s.io/api/apps/v1/types.go @@ -220,6 +220,7 @@ type StatefulSetSpec struct { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` // podManagementPolicy controls how pods are created during initial scale up, @@ -486,19 +487,19 @@ type DeploymentStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` @@ -508,6 +509,13 @@ type DeploymentStatus struct { // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"` + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -839,16 +847,16 @@ type ReplicaSetList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ReplicaSetSpec is the specification of a ReplicaSet. type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` @@ -866,29 +874,36 @@ type ReplicaSetSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"` + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` diff --git a/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 341ecdadb..f44ba7bc3 100644 --- a/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.", + "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "conditions": "Represents the latest available observations of a deployment's current state.", "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } @@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", + "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.", + "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.", + "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", } diff --git a/tools/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 6912986ac..9e67658ba 100644 --- a/tools/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) @@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicaSetCondition, len(*in)) diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/doc.go b/tools/vendor/k8s.io/api/apps/v1beta1/doc.go index 38a358551..7770fab5d 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1beta1 // import "k8s.io/api/apps/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/tools/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 76e755b4a..ae84aaf48 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/tools/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -728,134 +728,135 @@ func init() { } var fileDescriptor_2747f709ac7c95e7 = []byte{ - // 2018 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xf7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63, - 0xc4, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x21, 0x4a, 0x6e, 0xec, 0x40, 0x8a, - 0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63, - 0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14, - 0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, - 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, - 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfa, 0xe9, 0x7b, 0x9e, 0xa6, 0xdb, 0x4d, 0xe2, 0xe8, 0x4d, 0xe2, - 0x38, 0x5e, 0xb3, 0x7f, 0xeb, 0x98, 0x32, 0x72, 0xab, 0xd9, 0xa5, 0x16, 0x75, 0x09, 0xa3, 0x1d, - 0xcd, 0x71, 0x6d, 0x66, 0xa3, 0x25, 0x9f, 0x51, 0x23, 0x8e, 0xae, 0x71, 0x46, 0x4d, 0x32, 0x2e, - 0xdf, 0xec, 0xea, 0xec, 0xa4, 0x77, 0xac, 0xb5, 0x6d, 0xb3, 0xd9, 0xb5, 0xbb, 0x76, 0x53, 0xf0, - 0x1f, 0xf7, 0x1e, 0x8b, 0x2f, 0xf1, 0x21, 0x7e, 0xf9, 0x38, 0xcb, 0x6a, 0x4c, 0x61, 0xdb, 0x76, - 0x69, 0xb3, 0x9f, 0xd1, 0xb5, 0xfc, 0x4e, 0xc4, 0x63, 0x92, 0xf6, 0x89, 0x6e, 0x51, 0x77, 0xd0, - 0x74, 0x4e, 0xbb, 0x7c, 0xc1, 0x6b, 0x9a, 0x94, 0x91, 0x3c, 0xa9, 0x66, 0x91, 0x94, 0xdb, 0xb3, - 0x98, 0x6e, 0xd2, 0x8c, 0xc0, 0xbb, 0xe7, 0x09, 0x78, 0xed, 0x13, 0x6a, 0x92, 0x8c, 0xdc, 0xdb, - 0x45, 0x72, 0x3d, 0xa6, 0x1b, 0x4d, 0xdd, 0x62, 0x1e, 0x73, 0xd3, 0x42, 0xea, 0xbf, 0x15, 0x40, - 0x5b, 0xb6, 0xc5, 0x5c, 0xdb, 0x30, 0xa8, 0x8b, 0x69, 0x5f, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, - 0x8d, 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0x6b, 0xca, 0xfa, 0xec, 0xc6, 0x5b, 0x5a, 0xe4, 0xe9, - 0x10, 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0xd6, 0xbf, 0xa5, 0xed, 0x1d, 0x7f, - 0x46, 0xdb, 0x6c, 0x97, 0x32, 0xd2, 0x42, 0x4f, 0x87, 0xab, 0x97, 0x46, 0xc3, 0x55, 0x88, 0xd6, - 0x70, 0x88, 0x8a, 0xf6, 0x60, 0x52, 0xa0, 0x57, 0x04, 0xfa, 0xcd, 0x42, 0x74, 0xb9, 0x69, 0x0d, - 0x93, 0xcf, 0xef, 0x3d, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x93, 0xdb, 0x84, 0x11, - 0x2c, 0x80, 0xd0, 0x0d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xc4, 0x9a, 0xb2, 0x3e, 0xd1, 0xba, 0x2c, - 0xb9, 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x6a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, - 0xd0, 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, - 0xbe, 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xda, 0xc4, 0xfa, 0xec, 0xc6, 0x9b, 0x5a, - 0x41, 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0x73, 0x12, 0xb7, 0xfa, 0x80, 0x23, 0x60, 0x1f, 0x48, 0xfd, - 0x65, 0x05, 0x60, 0x9b, 0x3a, 0x86, 0x3d, 0x30, 0xa9, 0xc5, 0x2e, 0xe0, 0xe8, 0x1e, 0xc0, 0xa4, - 0xe7, 0xd0, 0xb6, 0x3c, 0xba, 0xeb, 0x85, 0x3b, 0x88, 0x8c, 0x3a, 0x70, 0x68, 0x3b, 0x3a, 0x34, - 0xfe, 0x85, 0x05, 0x04, 0xfa, 0x18, 0xa6, 0x3c, 0x46, 0x58, 0xcf, 0x13, 0x47, 0x36, 0xbb, 0xf1, - 0x7a, 0x19, 0x30, 0x21, 0xd0, 0xaa, 0x4b, 0xb8, 0x29, 0xff, 0x1b, 0x4b, 0x20, 0xf5, 0x6f, 0x13, - 0xb0, 0x10, 0x31, 0x6f, 0xd9, 0x56, 0x47, 0x67, 0x3c, 0xa4, 0xef, 0xc0, 0x24, 0x1b, 0x38, 0x54, - 0xf8, 0x64, 0xa6, 0x75, 0x3d, 0x30, 0xe6, 0x70, 0xe0, 0xd0, 0xe7, 0xc3, 0xd5, 0xa5, 0x1c, 0x11, - 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x84, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x87, 0xab, - 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x39, 0x1d, - 0xc2, 0xe8, 0xa1, 0x6e, 0xd2, 0xc6, 0x94, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae, - 0x4a, 0x0b, 0xea, 0x3b, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xd0, 0x25, 0x96, - 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x1e, 0x5b, 0xdf, 0xb2, 0xd4, 0x87, 0x76, 0x32, 0x68, 0x38, 0x47, - 0x03, 0x7a, 0x0d, 0xa6, 0x5c, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x0a, 0x8f, 0x85, 0xc7, 0x85, 0xc5, - 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0x4c, 0x9b, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x5e, - 0x32, 0x4e, 0xef, 0xfa, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x57, 0xa0, 0x1e, 0x1d, 0xd3, 0x05, 0xe4, - 0xea, 0xfd, 0x64, 0xae, 0xbe, 0x5a, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0x1f, 0x15, 0x40, 0x11, 0x13, - 0xb6, 0x0d, 0xe3, 0x98, 0xb4, 0x4f, 0xd1, 0x1a, 0x4c, 0x5a, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90, - 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x4d, 0xcb, 0xb2, - 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2a, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xca, 0xa0, 0xdc, 0xb3, - 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x03, 0xb8, 0x12, 0xf3, 0xd0, - 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0x5b, 0xb6, 0xf5, 0x58, 0xef, 0x46, 0x85, 0x05, 0x87, - 0x10, 0x38, 0x06, 0xb7, 0x7c, 0x0f, 0x96, 0x0a, 0xec, 0x44, 0x97, 0x61, 0xe2, 0x94, 0x0e, 0x7c, - 0x57, 0x61, 0xfe, 0x13, 0x2d, 0x42, 0xb5, 0x4f, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0xb7, - 0x2b, 0xef, 0x29, 0xea, 0x6f, 0xaa, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf3, 0xeb, 0xc1, 0x31, - 0xf4, 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, - 0x42, 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, - 0x71, 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, - 0x61, 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, - 0xf5, 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0xee, - 0x40, 0x64, 0xdb, 0x59, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, - 0xb4, 0x09, 0xf3, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0x83, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, - 0xb4, 0xda, 0x5a, 0x92, 0x42, 0xf3, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x0c, 0xae, - 0xdd, 0xfb, 0xba, 0xc7, 0x6c, 0x77, 0xb0, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x8c, - 0x86, 0xab, 0x8b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, - 0xc3, 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0xc6, 0x0b, - 0xd3, 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x72, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa6, 0xa4, - 0x63, 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x23, 0x76, 0xf4, 0xca, 0x68, 0xb8, 0xba, 0xb4, 0x9f, 0xcf, - 0x82, 0x8b, 0x64, 0xd5, 0x3f, 0x4f, 0xc2, 0xe5, 0xf4, 0x1d, 0x87, 0x3e, 0x04, 0x64, 0x1f, 0x7b, - 0xd4, 0xed, 0xd3, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, - 0x65, 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, - 0x09, 0x36, 0x61, 0x5e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, - 0xf9, 0xd1, 0x1d, 0x98, 0x73, 0x79, 0x1c, 0x84, 0x00, 0xd3, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x0e, - 0xc7, 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0x2b, 0xa4, 0x4f, 0x74, 0x83, 0x1c, 0x1b, 0x34, 0x04, - 0x98, 0x14, 0x00, 0x2f, 0x4b, 0x80, 0x2b, 0x9b, 0x69, 0x06, 0x9c, 0x95, 0x41, 0xbb, 0xb0, 0xd0, - 0xb3, 0xb2, 0x50, 0x7e, 0x10, 0xbf, 0x22, 0xa1, 0x16, 0x8e, 0xb2, 0x2c, 0x38, 0x4f, 0x0e, 0x7d, - 0x0a, 0xd0, 0x0e, 0x6e, 0x75, 0xaf, 0x31, 0x25, 0xca, 0xf0, 0x8d, 0x12, 0xc9, 0x16, 0xb6, 0x02, - 0x51, 0x09, 0x0c, 0x97, 0x3c, 0x1c, 0xc3, 0x44, 0xb7, 0xa1, 0xde, 0xb6, 0x0d, 0x43, 0x44, 0xfe, - 0x96, 0xdd, 0xb3, 0x98, 0x08, 0xde, 0x6a, 0x0b, 0xf1, 0xcb, 0x7e, 0x2b, 0x41, 0xc1, 0x29, 0x4e, - 0xf5, 0x8f, 0x4a, 0xfc, 0x9a, 0x09, 0xd2, 0x19, 0xdd, 0x4e, 0xb4, 0x3e, 0xaf, 0xa5, 0x5a, 0x9f, - 0xab, 0x59, 0x89, 0x58, 0xe7, 0xa3, 0xc3, 0x1c, 0x0f, 0x7e, 0xdd, 0xea, 0xfa, 0x07, 0x2e, 0x4b, - 0xe2, 0x5b, 0x67, 0xa6, 0x52, 0xc8, 0x1d, 0xbb, 0x18, 0xaf, 0x88, 0x33, 0x8f, 0x13, 0x71, 0x12, - 0x59, 0xbd, 0x0b, 0xf5, 0x64, 0x1e, 0x26, 0x7a, 0x7a, 0xe5, 0xdc, 0x9e, 0xfe, 0x6b, 0x05, 0x96, - 0x0a, 0xb4, 0x23, 0x03, 0xea, 0x26, 0x79, 0x12, 0x3b, 0xe6, 0x73, 0x7b, 0x63, 0x3e, 0x35, 0x69, - 0xfe, 0xd4, 0xa4, 0x3d, 0xb0, 0xd8, 0x9e, 0x7b, 0xc0, 0x5c, 0xdd, 0xea, 0xfa, 0xe7, 0xb0, 0x9b, - 0xc0, 0xc2, 0x29, 0x6c, 0xf4, 0x09, 0xd4, 0x4c, 0xf2, 0xe4, 0xa0, 0xe7, 0x76, 0xf3, 0xfc, 0x55, - 0x4e, 0x8f, 0xb8, 0x3f, 0x76, 0x25, 0x0a, 0x0e, 0xf1, 0xd4, 0x3f, 0x29, 0xb0, 0x96, 0xd8, 0x25, - 0xaf, 0x15, 0xf4, 0x71, 0xcf, 0x38, 0xa0, 0xd1, 0x89, 0xbf, 0x09, 0x33, 0x0e, 0x71, 0x99, 0x1e, - 0xd6, 0x8b, 0x6a, 0x6b, 0x6e, 0x34, 0x5c, 0x9d, 0xd9, 0x0f, 0x16, 0x71, 0x44, 0xcf, 0xf1, 0x4d, - 0xe5, 0xc5, 0xf9, 0x46, 0xfd, 0x8f, 0x02, 0xd5, 0x83, 0x36, 0x31, 0xe8, 0x05, 0x4c, 0x2a, 0xdb, - 0x89, 0x49, 0x45, 0x2d, 0x8c, 0x59, 0x61, 0x4f, 0xe1, 0x90, 0xb2, 0x93, 0x1a, 0x52, 0xae, 0x9d, - 0x83, 0x73, 0xf6, 0x7c, 0xf2, 0x3e, 0xcc, 0x84, 0xea, 0x12, 0x45, 0x59, 0x39, 0xaf, 0x28, 0xab, - 0xbf, 0xae, 0xc0, 0x6c, 0x4c, 0xc5, 0x78, 0xd2, 0xdc, 0xdd, 0xb1, 0xbe, 0x86, 0x17, 0xae, 0x8d, - 0x32, 0x1b, 0xd1, 0x82, 0x1e, 0xc6, 0x6f, 0x17, 0xa3, 0x66, 0x21, 0xdb, 0xda, 0xdc, 0x85, 0x3a, - 0x23, 0x6e, 0x97, 0xb2, 0x80, 0x26, 0x1c, 0x36, 0x13, 0xcd, 0x2a, 0x87, 0x09, 0x2a, 0x4e, 0x71, - 0x2f, 0xdf, 0x81, 0xb9, 0x84, 0xb2, 0xb1, 0x7a, 0xbe, 0x2f, 0xb8, 0x73, 0xa2, 0x54, 0xb8, 0x80, - 0xe8, 0xfa, 0x30, 0x11, 0x5d, 0xeb, 0xc5, 0xce, 0x8c, 0x25, 0x68, 0x51, 0x8c, 0xe1, 0x54, 0x8c, - 0xbd, 0x51, 0x0a, 0xed, 0xec, 0x48, 0xfb, 0x67, 0x05, 0x16, 0x63, 0xdc, 0xd1, 0x28, 0xfc, 0xbd, - 0xc4, 0x7d, 0xb0, 0x9e, 0xba, 0x0f, 0x1a, 0x79, 0x32, 0x2f, 0x6c, 0x16, 0xce, 0x9f, 0x4f, 0x27, - 0xfe, 0x1f, 0xe7, 0xd3, 0x3f, 0x28, 0x30, 0x1f, 0xf3, 0xdd, 0x05, 0x0c, 0xa8, 0x0f, 0x92, 0x03, - 0xea, 0xb5, 0x32, 0x41, 0x53, 0x30, 0xa1, 0xde, 0x86, 0x85, 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, - 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, - 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, - 0xda, 0x46, 0xcf, 0xa4, 0x5b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, - 0xb7, 0x07, 0x88, 0xc0, 0xec, 0xe7, 0x27, 0xd4, 0xda, 0xa6, 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, - 0x07, 0x12, 0x7e, 0xf6, 0x51, 0x44, 0x7a, 0x3e, 0x5c, 0x5d, 0x2f, 0x83, 0x28, 0x22, 0x34, 0x8e, - 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, 0xbd, 0x1b, 0x64, 0xf4, 0xa3, 0x90, - 0x32, 0x96, 0x82, 0x18, 0xa2, 0xfa, 0xdb, 0x5a, 0xe2, 0xbc, 0xbf, 0xf1, 0x63, 0xe6, 0xcf, 0x61, - 0xb1, 0x1f, 0x79, 0x27, 0x60, 0xe0, 0x6d, 0xf9, 0x44, 0xfa, 0xe9, 0x2e, 0x84, 0xcf, 0xf3, 0x6b, - 0xeb, 0xdb, 0x52, 0xc9, 0xe2, 0xc3, 0x1c, 0x38, 0x9c, 0xab, 0x04, 0x7d, 0x17, 0x66, 0xf9, 0x48, - 0xa3, 0xb7, 0xe9, 0x47, 0xc4, 0x0c, 0x72, 0x71, 0x21, 0x88, 0x97, 0x83, 0x88, 0x84, 0xe3, 0x7c, - 0xe8, 0x04, 0x16, 0x1c, 0xbb, 0xb3, 0x4b, 0x2c, 0xd2, 0xa5, 0xbc, 0x11, 0xf4, 0x8f, 0x52, 0xcc, - 0x9e, 0x33, 0xad, 0x77, 0x83, 0xf6, 0x7f, 0x3f, 0xcb, 0xf2, 0x9c, 0x0f, 0x71, 0xd9, 0x65, 0x11, - 0x04, 0x79, 0x90, 0xc8, 0x85, 0x7a, 0x4f, 0xf6, 0x63, 0x72, 0x14, 0xf7, 0x1f, 0xd9, 0x36, 0xca, - 0x24, 0xe5, 0x51, 0x42, 0x32, 0xba, 0x30, 0x93, 0xeb, 0x38, 0xa5, 0xa1, 0x70, 0xb4, 0xae, 0xfd, - 0x4f, 0xa3, 0x75, 0xce, 0xac, 0x3f, 0x33, 0xe6, 0xac, 0xff, 0x17, 0x05, 0xae, 0x39, 0x25, 0x72, - 0xa9, 0x01, 0xc2, 0x37, 0xf7, 0xcb, 0xf8, 0xa6, 0x4c, 0x6e, 0xb6, 0xd6, 0x47, 0xc3, 0xd5, 0x6b, - 0x65, 0x38, 0x71, 0x29, 0xfb, 0xd0, 0x43, 0xa8, 0xd9, 0xb2, 0x06, 0x36, 0x66, 0x85, 0xad, 0x37, - 0xca, 0xd8, 0x1a, 0xd4, 0x4d, 0x3f, 0x2d, 0x83, 0x2f, 0x1c, 0x62, 0xa9, 0xbf, 0xab, 0xc2, 0x95, - 0xcc, 0x0d, 0x8e, 0x7e, 0x78, 0xc6, 0x9c, 0x7f, 0xf5, 0x85, 0xcd, 0xf8, 0x99, 0x01, 0x7d, 0x62, - 0x8c, 0x01, 0x7d, 0x13, 0xe6, 0xdb, 0x3d, 0xd7, 0xa5, 0x16, 0x4b, 0x8d, 0xe7, 0x61, 0xb0, 0x6c, - 0x25, 0xc9, 0x38, 0xcd, 0x9f, 0xf7, 0xc6, 0x50, 0x1d, 0xf3, 0x8d, 0x21, 0x6e, 0x85, 0x9c, 0x13, - 0xfd, 0xd4, 0xce, 0x5a, 0x21, 0xc7, 0xc5, 0x34, 0x3f, 0x6f, 0x5a, 0x7d, 0xd4, 0x10, 0x61, 0x3a, - 0xd9, 0xb4, 0x1e, 0x25, 0xa8, 0x38, 0xc5, 0x9d, 0x33, 0xaf, 0xcf, 0x94, 0x9d, 0xd7, 0x11, 0x49, - 0xbc, 0x26, 0x80, 0xa8, 0xa3, 0x37, 0xcb, 0xc4, 0x59, 0xf9, 0xe7, 0x84, 0xdc, 0x87, 0x94, 0xd9, - 0xf1, 0x1f, 0x52, 0xd4, 0xbf, 0x2a, 0xf0, 0x72, 0x61, 0xc5, 0x42, 0x9b, 0x89, 0x96, 0xf2, 0x66, - 0xaa, 0xa5, 0xfc, 0x4e, 0xa1, 0x60, 0xac, 0xaf, 0x74, 0xf3, 0x5f, 0x1a, 0xde, 0x2f, 0xf7, 0xd2, - 0x90, 0x33, 0x05, 0x9f, 0xff, 0xe4, 0xd0, 0xfa, 0xfe, 0xd3, 0x67, 0x2b, 0x97, 0xbe, 0x7c, 0xb6, - 0x72, 0xe9, 0xab, 0x67, 0x2b, 0x97, 0x7e, 0x31, 0x5a, 0x51, 0x9e, 0x8e, 0x56, 0x94, 0x2f, 0x47, - 0x2b, 0xca, 0x57, 0xa3, 0x15, 0xe5, 0xef, 0xa3, 0x15, 0xe5, 0x57, 0x5f, 0xaf, 0x5c, 0xfa, 0x64, - 0xa9, 0xe0, 0xdf, 0xe8, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xc9, 0xe6, 0x8c, 0xa7, 0x1e, - 0x00, 0x00, + // 2041 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdd, 0x6f, 0x1b, 0xc7, + 0x11, 0xd7, 0x51, 0xa2, 0x44, 0x8d, 0x22, 0xca, 0x5e, 0xa9, 0x16, 0xa3, 0xb4, 0x92, 0x70, 0x31, + 0x62, 0x25, 0xb1, 0x8f, 0xb1, 0x92, 0x06, 0x89, 0xdd, 0xba, 0x10, 0x25, 0x37, 0x56, 0x20, 0x45, + 0xca, 0x4a, 0xb2, 0xd1, 0xf4, 0x03, 0x59, 0x91, 0x6b, 0xea, 0xa2, 0xfb, 0xc2, 0xdd, 0x52, 0x31, + 0xd1, 0x97, 0xfe, 0x01, 0x2d, 0xd2, 0xe7, 0xfe, 0x15, 0xed, 0x53, 0x8b, 0x16, 0x7d, 0x2d, 0xfc, + 0x18, 0xf4, 0xa5, 0x79, 0x22, 0x6a, 0xe6, 0xb5, 0x7d, 0x6b, 0x5f, 0x0c, 0x14, 0x28, 0x76, 0x6f, + 0xef, 0xfb, 0x4e, 0x3a, 0x16, 0xb0, 0x80, 0xe6, 0x8d, 0xb7, 0x33, 0xf3, 0x9b, 0xd9, 0xd9, 0x99, + 0xd9, 0x99, 0x25, 0xdc, 0x38, 0x7d, 0xcf, 0xd3, 0x74, 0xbb, 0x49, 0x1c, 0xbd, 0x49, 0x1c, 0xc7, + 0x6b, 0x9e, 0xdd, 0x3e, 0xa6, 0x8c, 0xdc, 0x6e, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, 0x73, + 0x5c, 0x9b, 0xd9, 0x68, 0xd1, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x4b, 0xb7, + 0xba, 0x3a, 0x3b, 0xe9, 0x1d, 0x6b, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, 0xc7, + 0xbd, 0xc7, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x92, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, 0xda, + 0x3c, 0xcb, 0xe8, 0x5a, 0x7a, 0x27, 0xe2, 0x31, 0x49, 0xfb, 0x44, 0xb7, 0xa8, 0xdb, 0x6f, 0x3a, + 0xa7, 0x5d, 0xbe, 0xe0, 0x35, 0x4d, 0xca, 0x48, 0x9e, 0x54, 0xb3, 0x48, 0xca, 0xed, 0x59, 0x4c, + 0x37, 0x69, 0x46, 0xe0, 0xdd, 0x8b, 0x04, 0xbc, 0xf6, 0x09, 0x35, 0x49, 0x46, 0xee, 0xed, 0x22, + 0xb9, 0x1e, 0xd3, 0x8d, 0xa6, 0x6e, 0x31, 0x8f, 0xb9, 0x69, 0x21, 0xf5, 0xdf, 0x0a, 0xa0, 0x4d, + 0xdb, 0x62, 0xae, 0x6d, 0x18, 0xd4, 0xc5, 0xf4, 0x4c, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, 0x8d, + 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0xab, 0xca, 0xda, 0xcc, 0xfa, 0x5b, 0x5a, 0xe4, 0xe9, 0x10, + 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0x76, 0x76, 0x5b, 0xdb, 0x3b, 0xfe, 0x8c, + 0xb6, 0xd9, 0x2e, 0x65, 0xa4, 0x85, 0x9e, 0x0e, 0x56, 0xc6, 0x86, 0x83, 0x15, 0x88, 0xd6, 0x70, + 0x88, 0x8a, 0xf6, 0x60, 0x42, 0xa0, 0x57, 0x04, 0xfa, 0xad, 0x42, 0x74, 0xb9, 0x69, 0x0d, 0x93, + 0xcf, 0xef, 0x3f, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x13, 0x5b, 0x84, 0x11, 0x2c, + 0x80, 0xd0, 0x4d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xf8, 0xaa, 0xb2, 0x36, 0xde, 0xba, 0x22, 0xb9, + 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x5a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, 0xd0, + 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, 0xbe, + 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xea, 0xf8, 0xda, 0xcc, 0xfa, 0x9b, 0x5a, 0x41, + 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0xb3, 0x12, 0xb7, 0xba, 0xcd, 0x11, 0xb0, 0x0f, 0xa4, 0xfe, 0xb2, + 0x02, 0xb0, 0x45, 0x1d, 0xc3, 0xee, 0x9b, 0xd4, 0x62, 0x97, 0x70, 0x74, 0xdb, 0x30, 0xe1, 0x39, + 0xb4, 0x2d, 0x8f, 0xee, 0x46, 0xe1, 0x0e, 0x22, 0xa3, 0x0e, 0x1c, 0xda, 0x8e, 0x0e, 0x8d, 0x7f, + 0x61, 0x01, 0x81, 0x3e, 0x86, 0x49, 0x8f, 0x11, 0xd6, 0xf3, 0xc4, 0x91, 0xcd, 0xac, 0xbf, 0x5e, + 0x06, 0x4c, 0x08, 0xb4, 0xea, 0x12, 0x6e, 0xd2, 0xff, 0xc6, 0x12, 0x48, 0xfd, 0xdb, 0x38, 0xcc, + 0x47, 0xcc, 0x9b, 0xb6, 0xd5, 0xd1, 0x19, 0x0f, 0xe9, 0xbb, 0x30, 0xc1, 0xfa, 0x0e, 0x15, 0x3e, + 0x99, 0x6e, 0xdd, 0x08, 0x8c, 0x39, 0xec, 0x3b, 0xf4, 0xf9, 0x60, 0x65, 0x31, 0x47, 0x84, 0x93, + 0xb0, 0x10, 0x42, 0x3b, 0xa1, 0x9d, 0x15, 0x21, 0xfe, 0x4e, 0x52, 0xf9, 0xf3, 0xc1, 0x4a, 0x4e, + 0x01, 0xd1, 0x42, 0xa4, 0xa4, 0x89, 0xe8, 0x33, 0xa8, 0x1b, 0xc4, 0x63, 0x47, 0x4e, 0x87, 0x30, + 0x7a, 0xa8, 0x9b, 0xb4, 0x31, 0x29, 0x76, 0xff, 0x46, 0xb9, 0x83, 0xe2, 0x12, 0xad, 0x6b, 0xd2, + 0x82, 0xfa, 0x4e, 0x02, 0x09, 0xa7, 0x90, 0xd1, 0x19, 0x20, 0xbe, 0x72, 0xe8, 0x12, 0xcb, 0xf3, + 0x77, 0xc5, 0xf5, 0x4d, 0x8d, 0xac, 0x6f, 0x49, 0xea, 0x43, 0x3b, 0x19, 0x34, 0x9c, 0xa3, 0x01, + 0xbd, 0x06, 0x93, 0x2e, 0x25, 0x9e, 0x6d, 0x35, 0x26, 0x84, 0xc7, 0xc2, 0xe3, 0xc2, 0x62, 0x15, + 0x4b, 0x2a, 0x7a, 0x1d, 0xa6, 0x4c, 0xea, 0x79, 0xa4, 0x4b, 0x1b, 0x55, 0xc1, 0x38, 0x27, 0x19, + 0xa7, 0x76, 0xfd, 0x65, 0x1c, 0xd0, 0xd5, 0x3f, 0x28, 0x50, 0x8f, 0x8e, 0xe9, 0x12, 0x72, 0xf5, + 0x41, 0x32, 0x57, 0x5f, 0x2d, 0x11, 0x9c, 0x05, 0x39, 0xfa, 0x8f, 0x0a, 0xa0, 0x88, 0x09, 0xdb, + 0x86, 0x71, 0x4c, 0xda, 0xa7, 0x68, 0x15, 0x26, 0x2c, 0x62, 0x06, 0x31, 0x19, 0x26, 0xc8, 0x47, + 0xc4, 0xa4, 0x58, 0x50, 0xd0, 0x17, 0x0a, 0xa0, 0x9e, 0x38, 0xcd, 0xce, 0x86, 0x65, 0xd9, 0x8c, + 0x70, 0x07, 0x07, 0x06, 0x6d, 0x96, 0x30, 0x28, 0xd0, 0xa5, 0x1d, 0x65, 0x50, 0xee, 0x5b, 0xcc, + 0xed, 0x47, 0x07, 0x9b, 0x65, 0xc0, 0x39, 0xaa, 0xd1, 0x8f, 0x01, 0x5c, 0x89, 0x79, 0x68, 0xcb, + 0xb4, 0x2d, 0xae, 0x01, 0x81, 0xfa, 0x4d, 0xdb, 0x7a, 0xac, 0x77, 0xa3, 0xc2, 0x82, 0x43, 0x08, + 0x1c, 0x83, 0x5b, 0xba, 0x0f, 0x8b, 0x05, 0x76, 0xa2, 0x2b, 0x30, 0x7e, 0x4a, 0xfb, 0xbe, 0xab, + 0x30, 0xff, 0x89, 0x16, 0xa0, 0x7a, 0x46, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0x77, 0x2a, + 0xef, 0x29, 0xea, 0x6f, 0xab, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf1, 0xeb, 0xc1, 0x31, 0xf4, + 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, 0x42, + 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, 0x71, + 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, 0x61, + 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, 0xf5, + 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0x6e, 0x5f, + 0x64, 0xdb, 0x79, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, 0xb4, + 0x01, 0x73, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0xfd, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, 0xb4, + 0xda, 0x5a, 0x94, 0x42, 0x73, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x08, 0xae, 0xdd, + 0x07, 0xba, 0xc7, 0x6c, 0xb7, 0xbf, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x0c, 0x07, + 0x2b, 0x0b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, 0xc3, + 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0x46, 0x0b, 0xd3, + 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x74, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa2, 0xa4, 0x63, + 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x2d, 0x76, 0xf4, 0xca, 0x70, 0xb0, 0xb2, 0xb8, 0x9f, 0xcf, 0x82, + 0x8b, 0x64, 0xd5, 0x5f, 0x55, 0xe1, 0x4a, 0xfa, 0x8e, 0x43, 0x1f, 0x02, 0xb2, 0x8f, 0x3d, 0xea, + 0x9e, 0xd1, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, 0x65, + 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, 0x09, + 0x36, 0x60, 0x4e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, 0xf9, + 0xd1, 0x5d, 0x98, 0x75, 0x79, 0x1c, 0x84, 0x00, 0x53, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x16, 0xc7, + 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0xab, 0xe4, 0x8c, 0xe8, 0x06, 0x39, 0x36, 0x68, 0x08, 0x30, + 0x21, 0x00, 0x5e, 0x96, 0x00, 0x57, 0x37, 0xd2, 0x0c, 0x38, 0x2b, 0x83, 0x76, 0x61, 0xbe, 0x67, + 0x65, 0xa1, 0xfc, 0x20, 0x7e, 0x45, 0x42, 0xcd, 0x1f, 0x65, 0x59, 0x70, 0x9e, 0x1c, 0xda, 0x86, + 0x79, 0x46, 0x5d, 0x53, 0xb7, 0x08, 0xd3, 0xad, 0x6e, 0x08, 0xe7, 0x9f, 0xfc, 0x22, 0x87, 0x3a, + 0xcc, 0x92, 0x71, 0x9e, 0x0c, 0xfa, 0x14, 0xa0, 0x1d, 0x34, 0x08, 0x5e, 0x63, 0x52, 0x54, 0xf4, + 0x9b, 0x25, 0xf2, 0x36, 0xec, 0x2a, 0xa2, 0x6a, 0x1a, 0x2e, 0x79, 0x38, 0x86, 0x89, 0xee, 0x40, + 0xbd, 0x6d, 0x1b, 0x86, 0x48, 0xa2, 0x4d, 0xbb, 0x67, 0x31, 0x91, 0x07, 0xd5, 0x16, 0xe2, 0x7d, + 0xc3, 0x66, 0x82, 0x82, 0x53, 0x9c, 0xea, 0x9f, 0x94, 0xf8, 0x8d, 0x15, 0x54, 0x06, 0x74, 0x27, + 0xd1, 0x45, 0xbd, 0x96, 0xea, 0xa2, 0xae, 0x65, 0x25, 0x62, 0x4d, 0x94, 0x0e, 0xb3, 0x3c, 0x8f, + 0x74, 0xab, 0xeb, 0xc7, 0x8e, 0xac, 0xae, 0x6f, 0x9d, 0x9b, 0x95, 0x21, 0x77, 0xec, 0x8e, 0xbd, + 0x2a, 0xc2, 0x27, 0x4e, 0xc4, 0x49, 0x64, 0xf5, 0x1e, 0xd4, 0x93, 0x29, 0x9d, 0x18, 0x0f, 0x94, + 0x0b, 0xc7, 0x83, 0xaf, 0x15, 0x58, 0x2c, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x49, 0x2c, 0x62, + 0x2e, 0x6c, 0xb3, 0xf9, 0x00, 0xa6, 0xf9, 0x03, 0x98, 0xb6, 0x6d, 0xb1, 0x3d, 0xf7, 0x80, 0xb9, + 0xba, 0xd5, 0xf5, 0xcf, 0x61, 0x37, 0x81, 0x85, 0x53, 0xd8, 0xe8, 0x13, 0xa8, 0x99, 0xe4, 0xc9, + 0x41, 0xcf, 0xed, 0xe6, 0xf9, 0xab, 0x9c, 0x1e, 0x71, 0x15, 0xed, 0x4a, 0x14, 0x1c, 0xe2, 0xa9, + 0x7f, 0x56, 0x60, 0x35, 0xb1, 0x4b, 0x5e, 0x76, 0xe8, 0xe3, 0x9e, 0x71, 0x40, 0xa3, 0x13, 0x7f, + 0x13, 0xa6, 0x1d, 0xe2, 0x32, 0x3d, 0x2c, 0x3d, 0xd5, 0xd6, 0xec, 0x70, 0xb0, 0x32, 0xbd, 0x1f, + 0x2c, 0xe2, 0x88, 0x9e, 0xe3, 0x9b, 0xca, 0x8b, 0xf3, 0x8d, 0xfa, 0x1f, 0x05, 0xaa, 0x07, 0x6d, + 0x62, 0xd0, 0x4b, 0x18, 0x7a, 0xb6, 0x12, 0x43, 0x8f, 0x5a, 0x18, 0xb3, 0xc2, 0x9e, 0xc2, 0x79, + 0x67, 0x27, 0x35, 0xef, 0x5c, 0xbf, 0x00, 0xe7, 0xfc, 0x51, 0xe7, 0x7d, 0x98, 0x0e, 0xd5, 0x25, + 0xea, 0xbb, 0x72, 0x51, 0x7d, 0x57, 0x7f, 0x53, 0x81, 0x99, 0x98, 0x8a, 0xd1, 0xa4, 0xb9, 0xbb, + 0x63, 0x2d, 0x12, 0x2f, 0x5c, 0xeb, 0x65, 0x36, 0xa2, 0x05, 0xed, 0x90, 0xdf, 0x79, 0x46, 0x7d, + 0x47, 0xb6, 0x4b, 0xba, 0x07, 0x75, 0x46, 0xdc, 0x2e, 0x65, 0x01, 0x4d, 0x38, 0x6c, 0x3a, 0x1a, + 0x7b, 0x0e, 0x13, 0x54, 0x9c, 0xe2, 0x5e, 0xba, 0x0b, 0xb3, 0x09, 0x65, 0x23, 0xb5, 0x8f, 0x5f, + 0x70, 0xe7, 0x44, 0xa9, 0x70, 0x09, 0xd1, 0xf5, 0x61, 0x22, 0xba, 0xd6, 0x8a, 0x9d, 0x19, 0x4b, + 0xd0, 0xa2, 0x18, 0xc3, 0xa9, 0x18, 0x7b, 0xa3, 0x14, 0xda, 0xf9, 0x91, 0xf6, 0xcf, 0x0a, 0x2c, + 0xc4, 0xb8, 0xa3, 0xa9, 0xfa, 0x7b, 0x89, 0xfb, 0x60, 0x2d, 0x75, 0x1f, 0x34, 0xf2, 0x64, 0x5e, + 0xd8, 0x58, 0x9d, 0x3f, 0xea, 0x8e, 0xff, 0x3f, 0x8e, 0xba, 0x7f, 0x54, 0x60, 0x2e, 0xe6, 0xbb, + 0x4b, 0x98, 0x75, 0xb7, 0x93, 0xb3, 0xee, 0xf5, 0x32, 0x41, 0x53, 0x30, 0xec, 0xde, 0x81, 0xf9, + 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, + 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, + 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, 0xda, 0x46, 0xcf, 0xa4, 0x9b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, + 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, 0xb7, 0xfb, 0x88, 0xc0, 0xcc, 0xe7, 0x27, 0xd4, 0xda, 0xa2, + 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, 0x07, 0x12, 0x7e, 0xe6, 0x51, 0x44, 0x7a, 0x3e, 0x58, 0x59, + 0x2b, 0x83, 0x28, 0x22, 0x34, 0x8e, 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, + 0xbd, 0x17, 0x64, 0xf4, 0xa3, 0x90, 0x32, 0x92, 0x82, 0x18, 0xa2, 0xfa, 0xbb, 0x5a, 0xe2, 0xbc, + 0xbf, 0xf1, 0x13, 0xeb, 0xcf, 0x61, 0xe1, 0x2c, 0xf2, 0x4e, 0xc0, 0xc0, 0x3b, 0xfc, 0xf1, 0xf4, + 0x2b, 0x60, 0x08, 0x9f, 0xe7, 0xd7, 0xd6, 0xb7, 0xa5, 0x92, 0x85, 0x87, 0x39, 0x70, 0x38, 0x57, + 0x09, 0xfa, 0x2e, 0xcc, 0xf0, 0xe9, 0x48, 0x6f, 0xd3, 0x8f, 0x88, 0x19, 0xe4, 0xe2, 0x7c, 0x10, + 0x2f, 0x07, 0x11, 0x09, 0xc7, 0xf9, 0xd0, 0x09, 0xcc, 0x3b, 0x76, 0x67, 0x97, 0x58, 0xa4, 0x4b, + 0x79, 0x23, 0xe8, 0x1f, 0xa5, 0x18, 0x63, 0xa7, 0x5b, 0xef, 0x06, 0x93, 0xc4, 0x7e, 0x96, 0xe5, + 0x39, 0x9f, 0x07, 0xb3, 0xcb, 0x22, 0x08, 0xf2, 0x20, 0x91, 0x0b, 0xf5, 0x9e, 0xec, 0xc7, 0xe4, + 0x54, 0xef, 0xbf, 0xd7, 0xad, 0x97, 0x49, 0xca, 0xa3, 0x84, 0x64, 0x74, 0x61, 0x26, 0xd7, 0x71, + 0x4a, 0x43, 0xe1, 0x94, 0x5e, 0xfb, 0x9f, 0xa6, 0xf4, 0x9c, 0x67, 0x83, 0xe9, 0x11, 0x9f, 0x0d, + 0xfe, 0xa2, 0xc0, 0x75, 0xa7, 0x44, 0x2e, 0x35, 0x40, 0xf8, 0xe6, 0x41, 0x19, 0xdf, 0x94, 0xc9, + 0xcd, 0xd6, 0xda, 0x70, 0xb0, 0x72, 0xbd, 0x0c, 0x27, 0x2e, 0x65, 0x1f, 0x7a, 0x08, 0x35, 0x5b, + 0xd6, 0xc0, 0xc6, 0x8c, 0xb0, 0xf5, 0x66, 0x19, 0x5b, 0x83, 0xba, 0xe9, 0xa7, 0x65, 0xf0, 0x85, + 0x43, 0x2c, 0xf5, 0xf7, 0x55, 0xb8, 0x9a, 0xb9, 0xc1, 0xd1, 0x0f, 0xcf, 0x79, 0x32, 0xb8, 0xf6, + 0xc2, 0x9e, 0x0b, 0x32, 0xb3, 0xfe, 0xf8, 0x08, 0xb3, 0xfe, 0x06, 0xcc, 0xb5, 0x7b, 0xae, 0x4b, + 0x2d, 0x96, 0x9a, 0xf4, 0xc3, 0x60, 0xd9, 0x4c, 0x92, 0x71, 0x9a, 0x3f, 0xef, 0xb9, 0xa2, 0x3a, + 0xe2, 0x73, 0x45, 0xdc, 0x0a, 0x39, 0x27, 0xfa, 0xa9, 0x9d, 0xb5, 0x42, 0x8e, 0x8b, 0x69, 0x7e, + 0xde, 0xb4, 0xfa, 0xa8, 0x21, 0xc2, 0x54, 0xb2, 0x69, 0x3d, 0x4a, 0x50, 0x71, 0x8a, 0x3b, 0x67, + 0x5e, 0x9f, 0x2e, 0x3b, 0xaf, 0x23, 0x92, 0x78, 0x4d, 0x00, 0x51, 0x47, 0x6f, 0x95, 0x89, 0xb3, + 0xf2, 0xcf, 0x09, 0xb9, 0x6f, 0x32, 0x33, 0xa3, 0xbf, 0xc9, 0xa8, 0x7f, 0x55, 0xe0, 0xe5, 0xc2, + 0x8a, 0x85, 0x36, 0x12, 0x2d, 0xe5, 0xad, 0x54, 0x4b, 0xf9, 0x9d, 0x42, 0xc1, 0x58, 0x5f, 0xe9, + 0xe6, 0xbf, 0x34, 0xbc, 0x5f, 0xee, 0xa5, 0x21, 0x67, 0x0a, 0xbe, 0xf8, 0xc9, 0xa1, 0xf5, 0xfd, + 0xa7, 0xcf, 0x96, 0xc7, 0xbe, 0x7c, 0xb6, 0x3c, 0xf6, 0xd5, 0xb3, 0xe5, 0xb1, 0x5f, 0x0c, 0x97, + 0x95, 0xa7, 0xc3, 0x65, 0xe5, 0xcb, 0xe1, 0xb2, 0xf2, 0xd5, 0x70, 0x59, 0xf9, 0xfb, 0x70, 0x59, + 0xf9, 0xf5, 0xd7, 0xcb, 0x63, 0x9f, 0x2c, 0x16, 0xfc, 0xb1, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x40, 0xa4, 0x4b, 0xb9, 0xf2, 0x1e, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1289,6 +1290,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x48 + } if m.CollisionCount != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) i-- @@ -2225,6 +2231,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -2627,6 +2636,7 @@ func (this *DeploymentStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -4337,6 +4347,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto b/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto index 46d7bfdf9..0601efc3c 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -179,33 +179,40 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // observedGeneration is the generation observed by the deployment controller. + // The generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; - // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional optional int32 availableReplicas = 4; - // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of + // Total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Conditions represent the latest available observations of a deployment's current state. + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 9; + + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge // +listType=map @@ -455,6 +462,7 @@ message StatefulSetSpec { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional optional string serviceName = 5; // podManagementPolicy controls how pods are created during initial scale up, diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/types.go b/tools/vendor/k8s.io/api/apps/v1beta1/types.go index bc4851957..5530c990d 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/apps/v1beta1/types.go @@ -259,6 +259,7 @@ type StatefulSetSpec struct { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` // podManagementPolicy controls how pods are created during initial scale up, @@ -548,33 +549,40 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // observedGeneration is the generation observed by the deployment controller. + // The generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of + // Total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Conditions represent the latest available observations of a deployment's current state. + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"` + + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge // +listType=map diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 1381d75dc..02ea5f7f2 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -113,13 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", - "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.", + "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", + "conditions": "Represents the latest available observations of a deployment's current state.", "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } diff --git a/tools/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index dd73f1a5a..e8594766c 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -246,6 +246,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/doc.go b/tools/vendor/k8s.io/api/apps/v1beta2/doc.go index ac91fddfd..7d28fe42d 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/tools/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1beta2 // import "k8s.io/api/apps/v1beta2" +package v1beta2 diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/tools/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 1c3d3be5b..9fcba6feb 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/tools/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -1017,153 +1017,155 @@ func init() { } var fileDescriptor_c423c016abf485d4 = []byte{ - // 2328 bytes of a gzipped FileDescriptorProto + // 2359 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x5b, 0xd2, 0x58, 0x1b, - 0xb6, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0x04, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1, - 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x13, - 0xbd, 0xf4, 0x5a, 0xa0, 0x40, 0xdb, 0x6b, 0xff, 0x89, 0xa2, 0x97, 0xa2, 0x68, 0xd0, 0x4b, 0x11, - 0x04, 0x3e, 0x06, 0xbd, 0x24, 0x27, 0xa2, 0x66, 0x4e, 0x45, 0xd1, 0x5b, 0x7b, 0x31, 0x50, 0xa0, - 0x98, 0xd9, 0xd9, 0xef, 0x5d, 0x73, 0xa9, 0xd8, 0x4a, 0x13, 0xe4, 0xc6, 0x9d, 0xf7, 0xde, 0x6f, - 0xde, 0xcc, 0xbc, 0x37, 0xef, 0x37, 0x33, 0x04, 0x17, 0x1f, 0xbc, 0x6e, 0x37, 0x14, 0xa3, 0x89, - 0x4d, 0xa5, 0x89, 0x4d, 0xd3, 0x6e, 0x1e, 0x5c, 0xdb, 0x23, 0x14, 0xaf, 0x36, 0xfb, 0x44, 0x27, - 0x16, 0xa6, 0xa4, 0xd7, 0x30, 0x2d, 0x83, 0x1a, 0x70, 0xd9, 0x51, 0x6c, 0x60, 0x53, 0x69, 0x30, - 0xc5, 0x86, 0x50, 0x3c, 0x7d, 0xa5, 0xaf, 0xd0, 0xfd, 0xc1, 0x5e, 0xa3, 0x6b, 0x68, 0xcd, 0xbe, - 0xd1, 0x37, 0x9a, 0x5c, 0x7f, 0x6f, 0x70, 0x9f, 0x7f, 0xf1, 0x0f, 0xfe, 0xcb, 0xc1, 0x39, 0x2d, - 0x07, 0x3a, 0xec, 0x1a, 0x16, 0x69, 0x1e, 0x5c, 0x8b, 0xf6, 0x75, 0xfa, 0x15, 0x5f, 0x47, 0xc3, - 0xdd, 0x7d, 0x45, 0x27, 0xd6, 0xb0, 0x69, 0x3e, 0xe8, 0xb3, 0x06, 0xbb, 0xa9, 0x11, 0x8a, 0x93, - 0xac, 0x9a, 0x69, 0x56, 0xd6, 0x40, 0xa7, 0x8a, 0x46, 0x62, 0x06, 0xaf, 0x4d, 0x32, 0xb0, 0xbb, - 0xfb, 0x44, 0xc3, 0x31, 0xbb, 0x97, 0xd3, 0xec, 0x06, 0x54, 0x51, 0x9b, 0x8a, 0x4e, 0x6d, 0x6a, - 0x45, 0x8d, 0xe4, 0xff, 0x48, 0x00, 0xae, 0x1b, 0x3a, 0xb5, 0x0c, 0x55, 0x25, 0x16, 0x22, 0x07, - 0x8a, 0xad, 0x18, 0x3a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x3d, 0x4c, 0x71, 0x55, 0x3a, 0x2b, 0xad, - 0x54, 0x56, 0xaf, 0x36, 0xfc, 0x99, 0xf6, 0xe0, 0x1b, 0xe6, 0x83, 0x3e, 0x6b, 0xb0, 0x1b, 0x4c, - 0xbb, 0x71, 0x70, 0xad, 0xb1, 0xb3, 0xf7, 0x01, 0xe9, 0xd2, 0x2d, 0x42, 0x71, 0x0b, 0x3e, 0x1a, - 0xd5, 0x8f, 0x8d, 0x47, 0x75, 0xe0, 0xb7, 0x21, 0x0f, 0x15, 0xee, 0x80, 0x02, 0x47, 0xcf, 0x71, - 0xf4, 0x2b, 0xa9, 0xe8, 0x62, 0xd0, 0x0d, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x3a, 0x73, 0xaf, - 0x75, 0x42, 0x40, 0x17, 0x36, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xcb, 0xa0, 0x64, 0x09, 0xf7, 0xab, - 0xf9, 0xb3, 0xd2, 0x4a, 0xbe, 0x75, 0x52, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, 0x24, - 0x81, 0xa5, 0xf8, 0xb8, 0x37, 0x15, 0x9b, 0xc2, 0x1f, 0xc7, 0xc6, 0xde, 0xc8, 0x36, 0x76, 0x66, - 0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x06, 0x45, 0x85, 0x12, 0xcd, 0xae, 0xe6, - 0xce, 0xe6, 0x57, 0x2a, 0xab, 0x97, 0x1a, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x15, 0xb8, - 0xc5, 0xdb, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xbc, 0x81, 0x89, 0x66, 0xe8, 0x1d, - 0x42, 0x8f, 0x60, 0xe5, 0x6e, 0x81, 0x82, 0x6d, 0x92, 0xae, 0x58, 0xb9, 0x0b, 0xa9, 0x03, 0xf0, - 0x7c, 0xea, 0x98, 0xa4, 0xeb, 0x2f, 0x19, 0xfb, 0x42, 0x1c, 0x01, 0xb6, 0xc1, 0x8c, 0x4d, 0x31, - 0x1d, 0xd8, 0x7c, 0xc1, 0x2a, 0xab, 0x2b, 0x19, 0xb0, 0xb8, 0x7e, 0x6b, 0x4e, 0xa0, 0xcd, 0x38, - 0xdf, 0x48, 0xe0, 0xc8, 0xff, 0xc8, 0x01, 0xe8, 0xe9, 0xae, 0x1b, 0x7a, 0x4f, 0xa1, 0x2c, 0x9c, - 0xaf, 0x83, 0x02, 0x1d, 0x9a, 0x84, 0x4f, 0x48, 0xb9, 0x75, 0xc1, 0x75, 0xe5, 0xce, 0xd0, 0x24, - 0x4f, 0x46, 0xf5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0x37, 0x3d, 0x27, 0x73, 0xdc, 0xfa, - 0x95, 0x70, 0xd7, 0x4f, 0x46, 0xf5, 0x84, 0xbd, 0xa3, 0xe1, 0x21, 0x85, 0x1d, 0x84, 0x07, 0x00, - 0xaa, 0xd8, 0xa6, 0x77, 0x2c, 0xac, 0xdb, 0x4e, 0x4f, 0x8a, 0x46, 0xc4, 0xf0, 0x5f, 0xca, 0xb6, - 0x50, 0xcc, 0xa2, 0x75, 0x5a, 0x78, 0x01, 0x37, 0x63, 0x68, 0x28, 0xa1, 0x07, 0x78, 0x01, 0xcc, - 0x58, 0x04, 0xdb, 0x86, 0x5e, 0x2d, 0xf0, 0x51, 0x78, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8, - 0x22, 0x38, 0xae, 0x11, 0xdb, 0xc6, 0x7d, 0x52, 0x2d, 0x72, 0xc5, 0x79, 0xa1, 0x78, 0x7c, 0xcb, - 0x69, 0x46, 0xae, 0x5c, 0xfe, 0xa3, 0x04, 0x66, 0xbd, 0x99, 0x3b, 0x82, 0xcc, 0xb9, 0x19, 0xce, - 0x1c, 0x79, 0x72, 0xb0, 0xa4, 0x24, 0xcc, 0xc7, 0xf9, 0x80, 0xe3, 0x2c, 0x1c, 0xe1, 0x4f, 0x40, - 0xc9, 0x26, 0x2a, 0xe9, 0x52, 0xc3, 0x12, 0x8e, 0xbf, 0x9c, 0xd1, 0x71, 0xbc, 0x47, 0xd4, 0x8e, - 0x30, 0x6d, 0x9d, 0x60, 0x9e, 0xbb, 0x5f, 0xc8, 0x83, 0x84, 0xef, 0x82, 0x12, 0x25, 0x9a, 0xa9, - 0x62, 0x4a, 0x44, 0xd6, 0x9c, 0x0b, 0x3a, 0xcf, 0x62, 0x86, 0x81, 0xb5, 0x8d, 0xde, 0x1d, 0xa1, - 0xc6, 0x53, 0xc6, 0x9b, 0x0c, 0xb7, 0x15, 0x79, 0x30, 0xd0, 0x04, 0x73, 0x03, 0xb3, 0xc7, 0x34, - 0x29, 0xdb, 0xce, 0xfb, 0x43, 0x11, 0x43, 0x57, 0x27, 0xcf, 0xca, 0x6e, 0xc8, 0xae, 0xb5, 0x24, - 0x7a, 0x99, 0x0b, 0xb7, 0xa3, 0x08, 0x3e, 0x5c, 0x03, 0xf3, 0x9a, 0xa2, 0x23, 0x82, 0x7b, 0xc3, - 0x0e, 0xe9, 0x1a, 0x7a, 0xcf, 0xe6, 0xa1, 0x54, 0x6c, 0x2d, 0x0b, 0x80, 0xf9, 0xad, 0xb0, 0x18, - 0x45, 0xf5, 0xe1, 0x26, 0x58, 0x74, 0x37, 0xe0, 0x5b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa9, 0x68, - 0x0a, 0xad, 0xce, 0x70, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x44, 0x09, 0x72, 0x94, 0x68, 0x25, 0xff, - 0x76, 0x06, 0xcc, 0x47, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0x75, 0x07, 0x96, 0x45, 0x74, 0xba, 0x3d, - 0xd0, 0xf6, 0x88, 0xd5, 0xe9, 0xee, 0x93, 0xde, 0x40, 0x25, 0x3d, 0xbe, 0xac, 0xc5, 0x56, 0x4d, - 0xf8, 0xba, 0xb4, 0x9e, 0xa8, 0x85, 0x52, 0xac, 0xe1, 0x3b, 0x00, 0xea, 0xbc, 0x69, 0x4b, 0xb1, - 0x6d, 0x0f, 0x33, 0xc7, 0x31, 0xbd, 0x54, 0xdc, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xe6, 0x63, 0x8f, - 0xd8, 0x8a, 0x45, 0x7a, 0x51, 0x1f, 0xf3, 0x61, 0x1f, 0x37, 0x12, 0xb5, 0x50, 0x8a, 0x35, 0x7c, - 0x15, 0x54, 0x9c, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x10, 0x60, 0x95, 0x6d, 0x5f, 0x84, 0x82, - 0x7a, 0x6c, 0x68, 0xc6, 0x9e, 0x4d, 0xac, 0x03, 0xd2, 0xbb, 0xe9, 0x90, 0x03, 0x56, 0x41, 0x8b, - 0xbc, 0x82, 0x7a, 0x43, 0xdb, 0x89, 0x69, 0xa0, 0x04, 0x2b, 0x36, 0x34, 0x27, 0x6a, 0x62, 0x43, - 0x9b, 0x09, 0x0f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3b, 0xc0, - 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x3d, 0x1e, 0x8e, 0xbd, 0xed, 0xb0, 0x18, 0x45, 0xf5, 0xe1, 0x4d, - 0x70, 0xca, 0x69, 0xda, 0xd5, 0xb1, 0x07, 0x52, 0xe2, 0x20, 0x2f, 0x08, 0x90, 0x53, 0xdb, 0x51, - 0x05, 0x14, 0xb7, 0x81, 0xd7, 0xc1, 0x5c, 0xd7, 0x50, 0x55, 0x1e, 0x8f, 0xeb, 0xc6, 0x40, 0xa7, - 0xd5, 0x32, 0x47, 0x81, 0x2c, 0x87, 0xd6, 0x43, 0x12, 0x14, 0xd1, 0x84, 0x3f, 0x03, 0xa0, 0xeb, - 0x16, 0x06, 0xbb, 0x0a, 0x26, 0x30, 0x80, 0x78, 0x59, 0xf2, 0x2b, 0xb3, 0xd7, 0x64, 0xa3, 0x00, - 0xa4, 0xfc, 0xb1, 0x04, 0x96, 0x53, 0x12, 0x1d, 0xbe, 0x15, 0x2a, 0x82, 0x97, 0x22, 0x45, 0xf0, - 0x4c, 0x8a, 0x59, 0xa0, 0x12, 0xee, 0x83, 0x59, 0x46, 0x48, 0x14, 0xbd, 0xef, 0xa8, 0x88, 0xbd, - 0xac, 0x99, 0x3a, 0x00, 0x14, 0xd4, 0xf6, 0x77, 0xe5, 0x53, 0xe3, 0x51, 0x7d, 0x36, 0x24, 0x43, - 0x61, 0x60, 0xf9, 0x57, 0x39, 0x00, 0x36, 0x88, 0xa9, 0x1a, 0x43, 0x8d, 0xe8, 0x47, 0xc1, 0x69, - 0x6e, 0x87, 0x38, 0xcd, 0xc5, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, 0x48, 0xcd, - 0x8b, 0x59, 0xc0, 0x9e, 0xce, 0x6a, 0x3e, 0xcb, 0x83, 0x05, 0x5f, 0xd9, 0xa7, 0x35, 0x37, 0x42, - 0x2b, 0x7a, 0x31, 0xb2, 0xa2, 0xcb, 0x09, 0x26, 0xcf, 0x8d, 0xd7, 0x7c, 0x00, 0xe6, 0x18, 0xeb, - 0x70, 0xd6, 0x8f, 0x73, 0x9a, 0x99, 0xa9, 0x39, 0x8d, 0x57, 0x89, 0x36, 0x43, 0x48, 0x28, 0x82, - 0x9c, 0xc2, 0xa1, 0x8e, 0x7f, 0x1d, 0x39, 0xd4, 0x9f, 0x24, 0x30, 0xe7, 0x2f, 0xd3, 0x11, 0x90, - 0xa8, 0x5b, 0x61, 0x12, 0x75, 0x2e, 0x43, 0x70, 0xa6, 0xb0, 0xa8, 0xcf, 0x0a, 0x41, 0xd7, 0x39, - 0x8d, 0x5a, 0x61, 0x47, 0x30, 0x53, 0x55, 0xba, 0xd8, 0x16, 0xf5, 0xf6, 0x84, 0x73, 0xfc, 0x72, - 0xda, 0x90, 0x27, 0x0d, 0x11, 0xae, 0xdc, 0xf3, 0x25, 0x5c, 0xf9, 0x67, 0x43, 0xb8, 0x7e, 0x04, - 0x4a, 0xb6, 0x4b, 0xb5, 0x0a, 0x1c, 0xf2, 0x52, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8, - 0x95, 0x07, 0x97, 0xc4, 0xac, 0x8a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a, - 0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x79, 0x2b, 0x12, 0x52, 0xb8, 0x0b, 0x96, 0x4d, 0xcb, 0xe8, - 0x5b, 0xc4, 0xb6, 0x37, 0x08, 0xee, 0xa9, 0x8a, 0x4e, 0xdc, 0x01, 0x38, 0x35, 0xf1, 0xcc, 0x78, - 0x54, 0x5f, 0x6e, 0x27, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xaf, 0x05, 0x70, 0x32, 0xba, 0x37, 0xa6, - 0xd0, 0x14, 0xe9, 0x50, 0x34, 0xe5, 0x72, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, 0x88, 0xc5, - 0xea, 0x1a, 0x98, 0x17, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x6e, 0x58, 0x8c, 0xa2, - 0xfa, 0xf0, 0x06, 0x98, 0xb5, 0x38, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x1d, 0x01, 0x30, 0x8b, - 0x82, 0x42, 0x14, 0xd6, 0x65, 0xcc, 0xc5, 0x27, 0x24, 0x2e, 0x40, 0x21, 0xcc, 0x5c, 0xd6, 0xa2, - 0x0a, 0x28, 0x6e, 0x03, 0xb7, 0xc0, 0xc2, 0x40, 0x8f, 0x43, 0x39, 0xb1, 0x76, 0x46, 0x40, 0x2d, - 0xec, 0xc6, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0x33, 0x7c, 0x3f, 0xb9, 0x9c, 0x21, - 0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, 0xe3, 0x79, - 0x38, 0xf1, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x9a, 0x91, 0xff, 0xf8, - 0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x68, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, 0xa9, 0x67, - 0x40, 0x80, 0x02, 0x60, 0x4f, 0x27, 0x40, 0xff, 0xcc, 0x81, 0x05, 0x5f, 0x39, 0x33, 0x01, 0x4a, - 0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0xff, 0x89, 0x94, 0xf8, 0x5e, 0xa5, - 0x90, 0x92, 0xdf, 0xe7, 0x82, 0xae, 0x4f, 0x49, 0x4a, 0x9e, 0xc1, 0x0d, 0xc7, 0xd7, 0x8e, 0xd7, - 0xc8, 0x9f, 0xe4, 0xc1, 0xc9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb1, 0x40, 0xb6, 0xc1, 0xe2, - 0xfd, 0x81, 0xaa, 0x0e, 0xf9, 0x18, 0x02, 0x55, 0xd2, 0x29, 0xad, 0xdf, 0x15, 0x96, 0x8b, 0x3f, - 0x4c, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x88, 0x7a, 0x99, - 0x4c, 0x39, 0xf2, 0x87, 0xa2, 0x1c, 0xd3, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xe2, 0xd1, 0x7f, 0x2c, - 0x81, 0xa5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x9c, 0x86, 0x1f, 0x06, 0x2f, 0x3e, 0x26, 0x15, 0x91, - 0x01, 0x55, 0xd4, 0x86, 0xf3, 0x64, 0xd4, 0xb8, 0xad, 0xd3, 0x1d, 0xab, 0x43, 0x2d, 0x45, 0xef, - 0x3b, 0x95, 0x77, 0x2b, 0x84, 0x85, 0x22, 0xd8, 0xf0, 0x7d, 0x50, 0xd2, 0xf0, 0xc3, 0xce, 0xc0, - 0xea, 0x27, 0x55, 0xc8, 0x6c, 0xfd, 0xf0, 0x04, 0xd8, 0x12, 0x28, 0xc8, 0xc3, 0x93, 0xbf, 0x90, - 0xc0, 0x72, 0x4a, 0x55, 0xfd, 0x06, 0x8d, 0xf2, 0x2f, 0x12, 0x38, 0x1b, 0x1a, 0x25, 0x4b, 0x4b, - 0x72, 0x7f, 0xa0, 0xf2, 0x0c, 0x15, 0x4c, 0xe6, 0x12, 0x28, 0x9b, 0xd8, 0xa2, 0x8a, 0xc7, 0x83, - 0x8b, 0xad, 0xd9, 0xf1, 0xa8, 0x5e, 0x6e, 0xbb, 0x8d, 0xc8, 0x97, 0x27, 0xcc, 0x4d, 0xee, 0xf9, - 0xcd, 0x8d, 0xfc, 0x5f, 0x09, 0x14, 0x3b, 0x5d, 0xac, 0x92, 0x23, 0x20, 0x2e, 0x1b, 0x21, 0xe2, - 0x92, 0xfe, 0x28, 0xc0, 0xfd, 0x49, 0xe5, 0x2c, 0x9b, 0x11, 0xce, 0x72, 0x7e, 0x02, 0xce, 0xd3, - 0xe9, 0xca, 0x1b, 0xa0, 0xec, 0x75, 0x37, 0xdd, 0x5e, 0x2a, 0xff, 0x2e, 0x07, 0x2a, 0x81, 0x2e, - 0xa6, 0xdc, 0x89, 0xef, 0x85, 0xca, 0x0f, 0xdb, 0x63, 0x56, 0xb3, 0x0c, 0xa4, 0xe1, 0x96, 0x9a, - 0xb7, 0x75, 0x6a, 0x05, 0xcf, 0xaa, 0xf1, 0x0a, 0xf4, 0x26, 0x98, 0xa3, 0xd8, 0xea, 0x13, 0xea, - 0xca, 0xf8, 0x84, 0x95, 0xfd, 0xbb, 0x9b, 0x3b, 0x21, 0x29, 0x8a, 0x68, 0x9f, 0xbe, 0x01, 0x66, - 0x43, 0x9d, 0xc1, 0x93, 0x20, 0xff, 0x80, 0x0c, 0x1d, 0x06, 0x87, 0xd8, 0x4f, 0xb8, 0x08, 0x8a, - 0x07, 0x58, 0x1d, 0x38, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xf5, 0xdc, 0xeb, 0x92, 0xfc, 0x6b, 0x36, - 0x39, 0x7e, 0x2a, 0x1c, 0x41, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, 0x82, 0xa6, - 0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xc5, 0x80, - 0xb6, 0xcf, 0x8c, 0xbf, 0x1f, 0x62, 0xc6, 0x2b, 0x11, 0x66, 0x5c, 0x4d, 0xb2, 0xf9, 0x96, 0x1a, - 0x4f, 0xa6, 0xc6, 0x7f, 0x96, 0xc0, 0x7c, 0x60, 0xee, 0x8e, 0x80, 0x1b, 0xdf, 0x0e, 0x73, 0xe3, - 0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0x5f, 0x07, 0x0b, 0x01, 0xa5, 0x1d, 0xab, 0xa7, 0xe8, 0x58, - 0xb5, 0xe1, 0x39, 0x50, 0xb4, 0x29, 0xb6, 0xa8, 0x5b, 0x44, 0x5c, 0xdb, 0x0e, 0x6b, 0x44, 0x8e, - 0x4c, 0xfe, 0xb7, 0x04, 0x9a, 0x01, 0xe3, 0x36, 0xb1, 0x6c, 0xc5, 0xa6, 0x44, 0xa7, 0x77, 0x0d, - 0x75, 0xa0, 0x91, 0x75, 0x15, 0x2b, 0x1a, 0x22, 0xac, 0x41, 0x31, 0xf4, 0xb6, 0xa1, 0x2a, 0xdd, - 0x21, 0xc4, 0xa0, 0xf2, 0xe1, 0x3e, 0xd1, 0x37, 0x88, 0x4a, 0xa8, 0x78, 0x81, 0x2b, 0xb7, 0xde, - 0x72, 0x1f, 0xa4, 0xde, 0xf3, 0x45, 0x4f, 0x46, 0xf5, 0x95, 0x2c, 0x88, 0x3c, 0x42, 0x83, 0x98, - 0xf0, 0xa7, 0x00, 0xb0, 0x4f, 0xbe, 0x97, 0xf5, 0x44, 0xb0, 0xbe, 0xe9, 0x66, 0xf4, 0x7b, 0x9e, - 0x64, 0xaa, 0x0e, 0x02, 0x88, 0xf2, 0x1f, 0x4a, 0xa1, 0xf5, 0xfe, 0xc6, 0xdf, 0x72, 0xfe, 0x1c, - 0x2c, 0x1e, 0xf8, 0xb3, 0xe3, 0x2a, 0x30, 0xfe, 0x9d, 0x8f, 0x9e, 0xe4, 0x3d, 0xf8, 0xa4, 0x79, - 0xf5, 0x59, 0xff, 0xdd, 0x04, 0x38, 0x94, 0xd8, 0x09, 0x7c, 0x15, 0x54, 0x18, 0x6f, 0x56, 0xba, - 0x64, 0x1b, 0x6b, 0x6e, 0x2e, 0x7a, 0x0f, 0x98, 0x1d, 0x5f, 0x84, 0x82, 0x7a, 0x70, 0x1f, 0x2c, - 0x98, 0x46, 0x6f, 0x0b, 0xeb, 0xb8, 0x4f, 0x18, 0x11, 0x74, 0x96, 0x92, 0x5f, 0x7d, 0x96, 0x5b, - 0xaf, 0xb9, 0xd7, 0x5a, 0xed, 0xb8, 0xca, 0x93, 0x51, 0x7d, 0x39, 0xa1, 0x99, 0x07, 0x41, 0x12, - 0x24, 0xb4, 0x62, 0x8f, 0xee, 0xce, 0xa3, 0xc3, 0x6a, 0x96, 0xa4, 0x3c, 0xe4, 0xb3, 0x7b, 0xda, - 0xcd, 0x6e, 0xe9, 0x50, 0x37, 0xbb, 0x09, 0x47, 0xdc, 0xf2, 0x94, 0x47, 0xdc, 0x4f, 0x24, 0x70, - 0xde, 0xcc, 0x90, 0x4b, 0x55, 0xc0, 0xe7, 0xe6, 0x56, 0x96, 0xb9, 0xc9, 0x92, 0x9b, 0xad, 0x95, - 0xf1, 0xa8, 0x7e, 0x3e, 0x8b, 0x26, 0xca, 0xe4, 0x1f, 0xbc, 0x0b, 0x4a, 0x86, 0xd8, 0x03, 0xab, - 0x15, 0xee, 0xeb, 0xe5, 0x2c, 0xbe, 0xba, 0xfb, 0xa6, 0x93, 0x96, 0xee, 0x17, 0xf2, 0xb0, 0xe4, - 0x8f, 0x8a, 0xe0, 0x54, 0xac, 0x82, 0x7f, 0x85, 0xf7, 0xd7, 0xb1, 0xc3, 0x74, 0x7e, 0x8a, 0xc3, - 0xf4, 0x1a, 0x98, 0x17, 0x7f, 0x89, 0x88, 0x9c, 0xc5, 0xbd, 0x80, 0x59, 0x0f, 0x8b, 0x51, 0x54, - 0x3f, 0xe9, 0xfe, 0xbc, 0x38, 0xe5, 0xfd, 0x79, 0xd0, 0x0b, 0xf1, 0x17, 0x3f, 0x27, 0xbd, 0xe3, - 0x5e, 0x88, 0x7f, 0xfa, 0x45, 0xf5, 0x19, 0x71, 0x75, 0x50, 0x3d, 0x84, 0xe3, 0x61, 0xe2, 0xba, - 0x1b, 0x92, 0xa2, 0x88, 0xf6, 0x97, 0x7a, 0xf6, 0xc7, 0x09, 0xcf, 0xfe, 0x57, 0xb2, 0xc4, 0x5a, - 0xf6, 0xab, 0xf2, 0xc4, 0x4b, 0x8f, 0xca, 0xf4, 0x97, 0x1e, 0xf2, 0xdf, 0x24, 0xf0, 0x42, 0xea, - 0xae, 0x05, 0xd7, 0x42, 0xb4, 0xf2, 0x4a, 0x84, 0x56, 0x7e, 0x2f, 0xd5, 0x30, 0xc0, 0x2d, 0xad, - 0xe4, 0x5b, 0xf4, 0x37, 0xb2, 0xdd, 0xa2, 0x27, 0x9c, 0x84, 0x27, 0x5f, 0xa7, 0xb7, 0x7e, 0xf0, - 0xe8, 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, - 0x7a, 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, - 0xcd, 0x17, 0xb5, 0x63, 0xef, 0x2f, 0xa7, 0xfc, 0xe9, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xa4, 0x79, 0xcd, 0x52, 0x8e, 0x2c, 0x00, 0x00, + 0x15, 0xf7, 0x92, 0xa2, 0x44, 0x0e, 0x2d, 0xc9, 0x1e, 0xa9, 0x22, 0x63, 0xb7, 0xa4, 0xb1, 0x36, + 0x6c, 0x25, 0xb6, 0x49, 0x5b, 0xf9, 0x40, 0x62, 0xb7, 0x09, 0x44, 0x29, 0xb5, 0x1d, 0x48, 0x32, + 0x33, 0xb4, 0x1c, 0x34, 0xe8, 0x87, 0x47, 0xe4, 0x98, 0xda, 0x78, 0xbf, 0xb0, 0x3b, 0x54, 0x4c, + 0xf4, 0xd2, 0x6b, 0x81, 0x16, 0x6d, 0xae, 0xfd, 0x27, 0x8a, 0x5e, 0x8a, 0xa2, 0x41, 0x6f, 0x41, + 0xe1, 0x63, 0xd0, 0x4b, 0x72, 0x22, 0x6a, 0xe6, 0x54, 0x14, 0xbd, 0xb5, 0x17, 0x03, 0x05, 0x8a, + 0x99, 0x9d, 0xfd, 0xde, 0x35, 0x97, 0x8a, 0xad, 0x34, 0x41, 0x6e, 0xdc, 0x79, 0xef, 0xfd, 0xe6, + 0xcd, 0xcc, 0x7b, 0xf3, 0x7e, 0xfb, 0xb8, 0xe0, 0xc2, 0x83, 0xd7, 0xed, 0x86, 0x62, 0x34, 0xb1, + 0xa9, 0x34, 0xb1, 0x69, 0xda, 0xcd, 0x83, 0xab, 0x7b, 0x84, 0xe2, 0xb5, 0x66, 0x9f, 0xe8, 0xc4, + 0xc2, 0x94, 0xf4, 0x1a, 0xa6, 0x65, 0x50, 0x03, 0x56, 0x1c, 0xc5, 0x06, 0x36, 0x95, 0x06, 0x53, + 0x6c, 0x08, 0xc5, 0x53, 0x97, 0xfb, 0x0a, 0xdd, 0x1f, 0xec, 0x35, 0xba, 0x86, 0xd6, 0xec, 0x1b, + 0x7d, 0xa3, 0xc9, 0xf5, 0xf7, 0x06, 0xf7, 0xf9, 0x13, 0x7f, 0xe0, 0xbf, 0x1c, 0x9c, 0x53, 0x72, + 0x60, 0xc2, 0xae, 0x61, 0x91, 0xe6, 0xc1, 0xd5, 0xe8, 0x5c, 0xa7, 0x5e, 0xf1, 0x75, 0x34, 0xdc, + 0xdd, 0x57, 0x74, 0x62, 0x0d, 0x9b, 0xe6, 0x83, 0x3e, 0x1b, 0xb0, 0x9b, 0x1a, 0xa1, 0x38, 0xc9, + 0xaa, 0x99, 0x66, 0x65, 0x0d, 0x74, 0xaa, 0x68, 0x24, 0x66, 0xf0, 0xda, 0x24, 0x03, 0xbb, 0xbb, + 0x4f, 0x34, 0x1c, 0xb3, 0x7b, 0x39, 0xcd, 0x6e, 0x40, 0x15, 0xb5, 0xa9, 0xe8, 0xd4, 0xa6, 0x56, + 0xd4, 0x48, 0xfe, 0x8f, 0x04, 0xe0, 0x86, 0xa1, 0x53, 0xcb, 0x50, 0x55, 0x62, 0x21, 0x72, 0xa0, + 0xd8, 0x8a, 0xa1, 0xc3, 0x7b, 0xa0, 0xc8, 0xd6, 0xd3, 0xc3, 0x14, 0x57, 0xa5, 0x33, 0xd2, 0x6a, + 0x79, 0xed, 0x4a, 0xc3, 0xdf, 0x69, 0x0f, 0xbe, 0x61, 0x3e, 0xe8, 0xb3, 0x01, 0xbb, 0xc1, 0xb4, + 0x1b, 0x07, 0x57, 0x1b, 0xb7, 0xf7, 0x3e, 0x20, 0x5d, 0xba, 0x4d, 0x28, 0x6e, 0xc1, 0x47, 0xa3, + 0xfa, 0xb1, 0xf1, 0xa8, 0x0e, 0xfc, 0x31, 0xe4, 0xa1, 0xc2, 0xdb, 0x60, 0x86, 0xa3, 0xe7, 0x38, + 0xfa, 0xe5, 0x54, 0x74, 0xb1, 0xe8, 0x06, 0xc2, 0x1f, 0xbe, 0xfd, 0x90, 0x12, 0x9d, 0xb9, 0xd7, + 0x3a, 0x2e, 0xa0, 0x67, 0x36, 0x31, 0xc5, 0x88, 0x03, 0xc1, 0x4b, 0xa0, 0x68, 0x09, 0xf7, 0xab, + 0xf9, 0x33, 0xd2, 0x6a, 0xbe, 0x75, 0x42, 0x68, 0x15, 0xdd, 0x65, 0x21, 0x4f, 0x43, 0x7e, 0x24, + 0x81, 0x95, 0xf8, 0xba, 0xb7, 0x14, 0x9b, 0xc2, 0x1f, 0xc7, 0xd6, 0xde, 0xc8, 0xb6, 0x76, 0x66, + 0xcd, 0x57, 0xee, 0x4d, 0xec, 0x8e, 0x04, 0xd6, 0xdd, 0x06, 0x05, 0x85, 0x12, 0xcd, 0xae, 0xe6, + 0xce, 0xe4, 0x57, 0xcb, 0x6b, 0x17, 0x1b, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x17, 0xb8, + 0x85, 0x5b, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xb4, 0x89, 0x89, 0x66, 0xe8, 0x1d, + 0x42, 0x8f, 0xe0, 0xe4, 0x6e, 0x82, 0x19, 0xdb, 0x24, 0x5d, 0x71, 0x72, 0xe7, 0x53, 0x17, 0xe0, + 0xf9, 0xd4, 0x31, 0x49, 0xd7, 0x3f, 0x32, 0xf6, 0x84, 0x38, 0x02, 0x6c, 0x83, 0x59, 0x9b, 0x62, + 0x3a, 0xb0, 0xf9, 0x81, 0x95, 0xd7, 0x56, 0x33, 0x60, 0x71, 0xfd, 0xd6, 0x82, 0x40, 0x9b, 0x75, + 0x9e, 0x91, 0xc0, 0x91, 0xff, 0x91, 0x03, 0xd0, 0xd3, 0xdd, 0x30, 0xf4, 0x9e, 0x42, 0x59, 0x38, + 0x5f, 0x03, 0x33, 0x74, 0x68, 0x12, 0xbe, 0x21, 0xa5, 0xd6, 0x79, 0xd7, 0x95, 0x3b, 0x43, 0x93, + 0x3c, 0x19, 0xd5, 0x57, 0xe2, 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0xdc, 0xf2, 0x9c, 0xcc, 0x71, 0xeb, + 0x57, 0xc2, 0x53, 0x3f, 0x19, 0xd5, 0x13, 0xee, 0x8e, 0x86, 0x87, 0x14, 0x76, 0x10, 0x1e, 0x00, + 0xa8, 0x62, 0x9b, 0xde, 0xb1, 0xb0, 0x6e, 0x3b, 0x33, 0x29, 0x1a, 0x11, 0xcb, 0x7f, 0x29, 0xdb, + 0x41, 0x31, 0x8b, 0xd6, 0x29, 0xe1, 0x05, 0xdc, 0x8a, 0xa1, 0xa1, 0x84, 0x19, 0xe0, 0x79, 0x30, + 0x6b, 0x11, 0x6c, 0x1b, 0x7a, 0x75, 0x86, 0xaf, 0xc2, 0xdb, 0x40, 0xc4, 0x47, 0x91, 0x90, 0xc2, + 0x17, 0xc1, 0x9c, 0x46, 0x6c, 0x1b, 0xf7, 0x49, 0xb5, 0xc0, 0x15, 0x17, 0x85, 0xe2, 0xdc, 0xb6, + 0x33, 0x8c, 0x5c, 0xb9, 0xfc, 0x47, 0x09, 0xcc, 0x7b, 0x3b, 0x77, 0x04, 0x99, 0x73, 0x23, 0x9c, + 0x39, 0xf2, 0xe4, 0x60, 0x49, 0x49, 0x98, 0x4f, 0xf2, 0x01, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x80, + 0xa2, 0x4d, 0x54, 0xd2, 0xa5, 0x86, 0x25, 0x1c, 0x7f, 0x39, 0xa3, 0xe3, 0x78, 0x8f, 0xa8, 0x1d, + 0x61, 0xda, 0x3a, 0xce, 0x3c, 0x77, 0x9f, 0x90, 0x07, 0x09, 0xdf, 0x05, 0x45, 0x4a, 0x34, 0x53, + 0xc5, 0x94, 0x88, 0xac, 0x39, 0x1b, 0x74, 0x9e, 0xc5, 0x0c, 0x03, 0x6b, 0x1b, 0xbd, 0x3b, 0x42, + 0x8d, 0xa7, 0x8c, 0xb7, 0x19, 0xee, 0x28, 0xf2, 0x60, 0xa0, 0x09, 0x16, 0x06, 0x66, 0x8f, 0x69, + 0x52, 0x76, 0x9d, 0xf7, 0x87, 0x22, 0x86, 0xae, 0x4c, 0xde, 0x95, 0xdd, 0x90, 0x5d, 0x6b, 0x45, + 0xcc, 0xb2, 0x10, 0x1e, 0x47, 0x11, 0x7c, 0xb8, 0x0e, 0x16, 0x35, 0x45, 0x47, 0x04, 0xf7, 0x86, + 0x1d, 0xd2, 0x35, 0xf4, 0x9e, 0xcd, 0x43, 0xa9, 0xd0, 0xaa, 0x08, 0x80, 0xc5, 0xed, 0xb0, 0x18, + 0x45, 0xf5, 0xe1, 0x16, 0x58, 0x76, 0x2f, 0xe0, 0x9b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa5, 0x68, + 0x0a, 0xad, 0xce, 0x72, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x46, 0x09, 0x72, 0x94, 0x68, 0x25, 0x7f, + 0x34, 0x0b, 0x16, 0x23, 0xf7, 0x02, 0xbc, 0x0b, 0x56, 0xba, 0x03, 0xcb, 0x22, 0x3a, 0xdd, 0x19, + 0x68, 0x7b, 0xc4, 0xea, 0x74, 0xf7, 0x49, 0x6f, 0xa0, 0x92, 0x1e, 0x3f, 0xd6, 0x42, 0xab, 0x26, + 0x7c, 0x5d, 0xd9, 0x48, 0xd4, 0x42, 0x29, 0xd6, 0xf0, 0x1d, 0x00, 0x75, 0x3e, 0xb4, 0xad, 0xd8, + 0xb6, 0x87, 0x99, 0xe3, 0x98, 0x5e, 0x2a, 0xee, 0xc4, 0x34, 0x50, 0x82, 0x15, 0xf3, 0xb1, 0x47, + 0x6c, 0xc5, 0x22, 0xbd, 0xa8, 0x8f, 0xf9, 0xb0, 0x8f, 0x9b, 0x89, 0x5a, 0x28, 0xc5, 0x1a, 0xbe, + 0x0a, 0xca, 0xce, 0x6c, 0x7c, 0xcf, 0xc5, 0xe1, 0x2c, 0x09, 0xb0, 0xf2, 0x8e, 0x2f, 0x42, 0x41, + 0x3d, 0xb6, 0x34, 0x63, 0xcf, 0x26, 0xd6, 0x01, 0xe9, 0xdd, 0x70, 0xc8, 0x01, 0xab, 0xa0, 0x05, + 0x5e, 0x41, 0xbd, 0xa5, 0xdd, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xb6, 0x34, 0x27, 0x6a, 0x62, 0x4b, + 0x9b, 0x0d, 0x2f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3f, 0xc0, + 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x9d, 0x0b, 0xc7, 0xde, 0x4e, 0x58, 0x8c, 0xa2, 0xfa, 0xf0, 0x06, + 0x38, 0xe9, 0x0c, 0xed, 0xea, 0xd8, 0x03, 0x29, 0x72, 0x90, 0x17, 0x04, 0xc8, 0xc9, 0x9d, 0xa8, + 0x02, 0x8a, 0xdb, 0xc0, 0x6b, 0x60, 0xa1, 0x6b, 0xa8, 0x2a, 0x8f, 0xc7, 0x0d, 0x63, 0xa0, 0xd3, + 0x6a, 0x89, 0xa3, 0x40, 0x96, 0x43, 0x1b, 0x21, 0x09, 0x8a, 0x68, 0xc2, 0x9f, 0x01, 0xd0, 0x75, + 0x0b, 0x83, 0x5d, 0x05, 0x13, 0x18, 0x40, 0xbc, 0x2c, 0xf9, 0x95, 0xd9, 0x1b, 0xb2, 0x51, 0x00, + 0x52, 0xfe, 0x44, 0x02, 0x95, 0x94, 0x44, 0x87, 0x6f, 0x85, 0x8a, 0xe0, 0xc5, 0x48, 0x11, 0x3c, + 0x9d, 0x62, 0x16, 0xa8, 0x84, 0xfb, 0x60, 0x9e, 0x11, 0x12, 0x45, 0xef, 0x3b, 0x2a, 0xe2, 0x2e, + 0x6b, 0xa6, 0x2e, 0x00, 0x05, 0xb5, 0xfd, 0x5b, 0xf9, 0xe4, 0x78, 0x54, 0x9f, 0x0f, 0xc9, 0x50, + 0x18, 0x58, 0xfe, 0x55, 0x0e, 0x80, 0x4d, 0x62, 0xaa, 0xc6, 0x50, 0x23, 0xfa, 0x51, 0x70, 0x9a, + 0x5b, 0x21, 0x4e, 0x73, 0x21, 0xfd, 0x48, 0x3c, 0xa7, 0x52, 0x49, 0xcd, 0xbb, 0x11, 0x52, 0xf3, + 0x62, 0x16, 0xb0, 0xa7, 0xb3, 0x9a, 0xcf, 0xf2, 0x60, 0xc9, 0x57, 0xf6, 0x69, 0xcd, 0xf5, 0xd0, + 0x89, 0x5e, 0x88, 0x9c, 0x68, 0x25, 0xc1, 0xe4, 0xb9, 0xf1, 0x9a, 0x0f, 0xc0, 0x02, 0x63, 0x1d, + 0xce, 0xf9, 0x71, 0x4e, 0x33, 0x3b, 0x35, 0xa7, 0xf1, 0x2a, 0xd1, 0x56, 0x08, 0x09, 0x45, 0x90, + 0x53, 0x38, 0xd4, 0xdc, 0xd7, 0x91, 0x43, 0xfd, 0x49, 0x02, 0x0b, 0xfe, 0x31, 0x1d, 0x01, 0x89, + 0xba, 0x19, 0x26, 0x51, 0x67, 0x33, 0x04, 0x67, 0x0a, 0x8b, 0xfa, 0x6c, 0x26, 0xe8, 0x3a, 0xa7, + 0x51, 0xab, 0xec, 0x15, 0xcc, 0x54, 0x95, 0x2e, 0xb6, 0x45, 0xbd, 0x3d, 0xee, 0xbc, 0x7e, 0x39, + 0x63, 0xc8, 0x93, 0x86, 0x08, 0x57, 0xee, 0xf9, 0x12, 0xae, 0xfc, 0xb3, 0x21, 0x5c, 0x3f, 0x02, + 0x45, 0xdb, 0xa5, 0x5a, 0x33, 0x1c, 0xf2, 0x62, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8, + 0x95, 0x07, 0x97, 0xc4, 0xac, 0x0a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a, + 0x3c, 0xa9, 0x8a, 0x7e, 0xa0, 0xb7, 0xf9, 0x28, 0x12, 0x52, 0xb8, 0x0b, 0x2a, 0xa6, 0x65, 0xf4, + 0x2d, 0x62, 0xdb, 0x9b, 0x04, 0xf7, 0x54, 0x45, 0x27, 0xee, 0x02, 0x9c, 0x9a, 0x78, 0x7a, 0x3c, + 0xaa, 0x57, 0xda, 0xc9, 0x2a, 0x28, 0xcd, 0x56, 0xfe, 0x75, 0x01, 0x9c, 0x88, 0xde, 0x8d, 0x29, + 0x34, 0x45, 0x3a, 0x14, 0x4d, 0xb9, 0x14, 0x88, 0x53, 0x87, 0xc3, 0x05, 0x5a, 0x05, 0xb1, 0x58, + 0x5d, 0x07, 0x8b, 0x82, 0x96, 0xb8, 0x42, 0x41, 0xd4, 0xbc, 0xe3, 0xd9, 0x0d, 0x8b, 0x51, 0x54, + 0x1f, 0x5e, 0x07, 0xf3, 0x16, 0x67, 0x5e, 0x2e, 0x80, 0xc3, 0x5e, 0xbe, 0x23, 0x00, 0xe6, 0x51, + 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x98, 0x09, 0x33, 0x97, 0xf5, 0xa8, + 0x02, 0x8a, 0xdb, 0xc0, 0x6d, 0xb0, 0x34, 0xd0, 0xe3, 0x50, 0x4e, 0xac, 0x9d, 0x16, 0x50, 0x4b, + 0xbb, 0x71, 0x15, 0x94, 0x64, 0x07, 0x6f, 0x81, 0x25, 0x4a, 0x2c, 0x4d, 0xd1, 0x31, 0x55, 0xf4, + 0xbe, 0x07, 0xe7, 0x9c, 0x7c, 0x85, 0x41, 0xdd, 0x89, 0x8b, 0x51, 0x92, 0x0d, 0xbc, 0x17, 0xe2, + 0x45, 0xb3, 0xfc, 0x6a, 0xba, 0x94, 0x21, 0xbd, 0x32, 0x13, 0xa3, 0x04, 0xd6, 0x56, 0xcc, 0xca, + 0xda, 0xe4, 0x8f, 0x25, 0x00, 0xe3, 0x29, 0x3d, 0xb1, 0xa9, 0x10, 0xb3, 0x08, 0x14, 0x5f, 0x25, + 0x99, 0x4a, 0x5d, 0xc9, 0x48, 0xa5, 0xfc, 0xbb, 0x39, 0x1b, 0x97, 0x12, 0x1b, 0x7d, 0x34, 0xfd, + 0xa1, 0xac, 0x5c, 0xca, 0x77, 0xea, 0x19, 0x70, 0xa9, 0x00, 0xd8, 0xd3, 0xb9, 0xd4, 0x3f, 0x73, + 0x60, 0xc9, 0x57, 0xce, 0xcc, 0xa5, 0x12, 0x4c, 0xbe, 0xed, 0x11, 0x65, 0xe3, 0x37, 0xfe, 0xd6, + 0xfd, 0x3f, 0xf1, 0x1b, 0xdf, 0xab, 0x14, 0x7e, 0xf3, 0xfb, 0x5c, 0xd0, 0xf5, 0x29, 0xf9, 0xcd, + 0x33, 0x68, 0x96, 0x7c, 0xed, 0x28, 0x92, 0xfc, 0xd1, 0x0c, 0x38, 0x11, 0xcd, 0xc3, 0x50, 0xad, + 0x95, 0x26, 0xd6, 0xda, 0x36, 0x58, 0xbe, 0x3f, 0x50, 0xd5, 0x21, 0x5f, 0x43, 0xa0, 0xe0, 0x3a, + 0x55, 0xfa, 0xbb, 0xc2, 0x72, 0xf9, 0x87, 0x09, 0x3a, 0x28, 0xd1, 0x32, 0x5e, 0x7a, 0x67, 0xbe, + 0x6c, 0xe9, 0x2d, 0x1c, 0xa2, 0xf4, 0xa6, 0xd4, 0xca, 0xb9, 0x43, 0xd4, 0xca, 0x64, 0x22, 0x94, + 0x3f, 0x14, 0x11, 0x9a, 0xae, 0xee, 0x26, 0xdc, 0x81, 0x13, 0x1b, 0x12, 0x63, 0x09, 0xac, 0x24, + 0xb7, 0x01, 0xa0, 0x0a, 0x16, 0x34, 0xfc, 0x30, 0xd8, 0x8e, 0x99, 0x54, 0x8f, 0x06, 0x54, 0x51, + 0x1b, 0xce, 0x1f, 0x59, 0x8d, 0x5b, 0x3a, 0xbd, 0x6d, 0x75, 0xa8, 0xa5, 0xe8, 0x7d, 0xa7, 0x88, + 0x6f, 0x87, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x8a, 0x1a, 0x7e, 0xd8, 0x19, 0x58, 0xfd, 0xa4, + 0x62, 0x9b, 0x6d, 0x1e, 0x9e, 0x4b, 0xdb, 0x02, 0x05, 0x79, 0x78, 0xf2, 0x17, 0x12, 0xa8, 0xa4, + 0x14, 0xe8, 0x6f, 0xd0, 0x2a, 0xff, 0x22, 0x81, 0x33, 0xa1, 0x55, 0xb2, 0x0c, 0x27, 0xf7, 0x07, + 0x2a, 0x4f, 0x76, 0x41, 0x8a, 0x2e, 0x82, 0x92, 0x89, 0x2d, 0xaa, 0x78, 0xec, 0xbc, 0xd0, 0x9a, + 0x1f, 0x8f, 0xea, 0xa5, 0xb6, 0x3b, 0x88, 0x7c, 0x79, 0xc2, 0xde, 0xe4, 0x9e, 0xdf, 0xde, 0xc8, + 0xff, 0x95, 0x40, 0xa1, 0xd3, 0xc5, 0x2a, 0x39, 0x02, 0x0e, 0xb4, 0x19, 0xe2, 0x40, 0xe9, 0x7f, + 0x55, 0x70, 0x7f, 0x52, 0xe9, 0xcf, 0x56, 0x84, 0xfe, 0x9c, 0x9b, 0x80, 0xf3, 0x74, 0xe6, 0xf3, + 0x06, 0x28, 0x79, 0xd3, 0x4d, 0x77, 0x2d, 0xcb, 0xbf, 0xcb, 0x81, 0x72, 0x60, 0x8a, 0x29, 0x2f, + 0xf5, 0x7b, 0xa1, 0x4a, 0xc6, 0xee, 0x98, 0xb5, 0x2c, 0x0b, 0x69, 0xb8, 0x55, 0xeb, 0x6d, 0x9d, + 0x5a, 0xc1, 0x37, 0xe8, 0x78, 0x31, 0x7b, 0x13, 0x2c, 0x50, 0x6c, 0xf5, 0x09, 0x75, 0x65, 0x7c, + 0xc3, 0x4a, 0x7e, 0x47, 0xe9, 0x4e, 0x48, 0x8a, 0x22, 0xda, 0xa7, 0xae, 0x83, 0xf9, 0xd0, 0x64, + 0xf0, 0x04, 0xc8, 0x3f, 0x20, 0x43, 0x87, 0x0c, 0x22, 0xf6, 0x13, 0x2e, 0x83, 0xc2, 0x01, 0x56, + 0x07, 0x4e, 0x88, 0x96, 0x90, 0xf3, 0x70, 0x2d, 0xf7, 0xba, 0x24, 0xff, 0x86, 0x6d, 0x8e, 0x9f, + 0x0a, 0x47, 0x10, 0x5d, 0xef, 0x84, 0xa2, 0x2b, 0xfd, 0x5f, 0xd3, 0x60, 0x82, 0xa6, 0xc5, 0x18, + 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xe5, 0x80, 0xb6, 0x4f, + 0xb2, 0xbf, 0x1f, 0x22, 0xd9, 0xab, 0x11, 0x92, 0x5d, 0x4d, 0xb2, 0xf9, 0x96, 0x65, 0x4f, 0x66, + 0xd9, 0x7f, 0x96, 0xc0, 0x62, 0x60, 0xef, 0x8e, 0x80, 0x66, 0xdf, 0x0a, 0xd3, 0xec, 0x73, 0x59, + 0x82, 0x26, 0x85, 0x67, 0x5f, 0x03, 0x4b, 0x01, 0xa5, 0xdb, 0x56, 0x4f, 0xd1, 0xb1, 0x6a, 0xc3, + 0xb3, 0xa0, 0x60, 0x53, 0x6c, 0x51, 0xb7, 0x88, 0xb8, 0xb6, 0x1d, 0x36, 0x88, 0x1c, 0x99, 0xfc, + 0x6f, 0x09, 0x34, 0x03, 0xc6, 0x6d, 0x62, 0xd9, 0x8a, 0x4d, 0x89, 0x4e, 0xef, 0x1a, 0xea, 0x40, + 0x23, 0x1b, 0x2a, 0x56, 0x34, 0x44, 0xd8, 0x80, 0x62, 0xe8, 0x6d, 0x43, 0x55, 0xba, 0x43, 0x88, + 0x41, 0xf9, 0xc3, 0x7d, 0xa2, 0x6f, 0x12, 0x95, 0x50, 0xf1, 0xbf, 0x60, 0xa9, 0xf5, 0x96, 0xfb, + 0x37, 0xd9, 0x7b, 0xbe, 0xe8, 0xc9, 0xa8, 0xbe, 0x9a, 0x05, 0x91, 0x47, 0x68, 0x10, 0x13, 0xfe, + 0x14, 0x00, 0xf6, 0xc8, 0xef, 0xb2, 0x9e, 0x08, 0xd6, 0x37, 0xdd, 0x8c, 0x7e, 0xcf, 0x93, 0x4c, + 0x35, 0x41, 0x00, 0x51, 0xfe, 0x43, 0x31, 0x74, 0xde, 0xdf, 0xf8, 0xde, 0xeb, 0xcf, 0xc1, 0xf2, + 0x81, 0xbf, 0x3b, 0xae, 0x02, 0xa3, 0xf2, 0xf9, 0x68, 0x53, 0xc0, 0x83, 0x4f, 0xda, 0x57, 0xff, + 0x05, 0xe2, 0x6e, 0x02, 0x1c, 0x4a, 0x9c, 0x04, 0xbe, 0x0a, 0xca, 0x8c, 0x37, 0x2b, 0x5d, 0xb2, + 0x83, 0x35, 0x37, 0x17, 0xbd, 0xbf, 0x55, 0x3b, 0xbe, 0x08, 0x05, 0xf5, 0xe0, 0x3e, 0x58, 0x32, + 0x8d, 0xde, 0x36, 0xd6, 0x71, 0x9f, 0x30, 0x22, 0xe8, 0x1c, 0x25, 0x6f, 0xc8, 0x96, 0x5a, 0xaf, + 0xb9, 0xcd, 0xb6, 0x76, 0x5c, 0xe5, 0xc9, 0xa8, 0x5e, 0x49, 0x18, 0xe6, 0x41, 0x90, 0x04, 0x09, + 0xad, 0xd8, 0xa7, 0x00, 0xce, 0x5f, 0x21, 0x6b, 0x59, 0x92, 0xf2, 0x90, 0x1f, 0x03, 0xa4, 0xf5, + 0x9b, 0x8b, 0x87, 0xea, 0x37, 0x27, 0xbc, 0x2d, 0x97, 0xa6, 0x7c, 0x5b, 0xfe, 0xab, 0x04, 0xce, + 0x99, 0x19, 0x72, 0xa9, 0x0a, 0xf8, 0xde, 0xdc, 0xcc, 0xb2, 0x37, 0x59, 0x72, 0xb3, 0xb5, 0x3a, + 0x1e, 0xd5, 0xcf, 0x65, 0xd1, 0x44, 0x99, 0xfc, 0x83, 0x77, 0x41, 0xd1, 0x10, 0x77, 0x60, 0xb5, + 0xcc, 0x7d, 0xbd, 0x94, 0xc5, 0x57, 0xf7, 0xde, 0x74, 0xd2, 0xd2, 0x7d, 0x42, 0x1e, 0x96, 0xfc, + 0x71, 0x01, 0x9c, 0x8c, 0x55, 0xf0, 0xaf, 0xb0, 0xab, 0x1e, 0x7b, 0x2f, 0xcf, 0x4f, 0xf1, 0x5e, + 0xbe, 0x0e, 0x16, 0xc5, 0x87, 0x1a, 0x91, 0xd7, 0x7a, 0x2f, 0x60, 0x36, 0xc2, 0x62, 0x14, 0xd5, + 0x4f, 0xea, 0xea, 0x17, 0xa6, 0xec, 0xea, 0x07, 0xbd, 0x10, 0x1f, 0x1e, 0x3a, 0xe9, 0x1d, 0xf7, + 0x42, 0x7c, 0x7f, 0x18, 0xd5, 0x67, 0xc4, 0xd5, 0x41, 0xf5, 0x10, 0xe6, 0xc2, 0xc4, 0x75, 0x37, + 0x24, 0x45, 0x11, 0xed, 0x2f, 0xf5, 0x31, 0x02, 0x4e, 0xf8, 0x18, 0xe1, 0x72, 0x96, 0x58, 0xcb, + 0xde, 0x75, 0x4f, 0xec, 0x9f, 0x94, 0xa7, 0xef, 0x9f, 0xc8, 0x7f, 0x93, 0xc0, 0x0b, 0xa9, 0xb7, + 0x16, 0x5c, 0x0f, 0xd1, 0xca, 0xcb, 0x11, 0x5a, 0xf9, 0xbd, 0x54, 0xc3, 0x00, 0xb7, 0xb4, 0x92, + 0x1b, 0xf2, 0x6f, 0x64, 0x6b, 0xc8, 0x27, 0xbc, 0x09, 0x4f, 0xee, 0xcc, 0xb7, 0x7e, 0xf0, 0xe8, + 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, 0x7a, + 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, 0xed, + 0x17, 0xb5, 0x63, 0xef, 0x57, 0x52, 0x3e, 0x85, 0xfe, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, + 0x01, 0x82, 0xf5, 0x24, 0x2d, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1845,6 +1847,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x48 + } if m.CollisionCount != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) i-- @@ -2151,6 +2158,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x38 + } if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -3146,6 +3158,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -3251,6 +3266,9 @@ func (m *ReplicaSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -3711,6 +3729,7 @@ func (this *DeploymentStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -3797,6 +3816,7 @@ func (this *ReplicaSetStatus) String() string { `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -6261,6 +6281,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7193,6 +7233,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto b/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto index c08a4c78b..68c463e25 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/tools/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -323,19 +323,19 @@ message DeploymentStatus { // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; - // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional optional int32 availableReplicas = 4; @@ -345,6 +345,13 @@ message DeploymentStatus { // +optional optional int32 unavailableReplicas = 5; + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 9; + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -427,16 +434,16 @@ message ReplicaSetList { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset repeated ReplicaSet items = 2; } // ReplicaSetSpec is the specification of a ReplicaSet. message ReplicaSetSpec { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional optional int32 replicas = 1; @@ -454,29 +461,36 @@ message ReplicaSetSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset optional int32 replicas = 1; - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional optional int32 fullyLabeledReplicas = 2; - // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional optional int32 readyReplicas = 4; - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional optional int32 availableReplicas = 5; + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 7; + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional optional int64 observedGeneration = 3; @@ -747,6 +761,7 @@ message StatefulSetSpec { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional optional string serviceName = 5; // podManagementPolicy controls how pods are created during initial scale up, diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/types.go b/tools/vendor/k8s.io/api/apps/v1beta2/types.go index c2624a941..491afc59f 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/tools/vendor/k8s.io/api/apps/v1beta2/types.go @@ -269,6 +269,7 @@ type StatefulSetSpec struct { // the network identity of the set. Pods get DNS/hostnames that follow the // pattern: pod-specific-string.serviceName.default.svc.cluster.local // where "pod-specific-string" is managed by the StatefulSet controller. + // +optional ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` // podManagementPolicy controls how pods are created during initial scale up, @@ -530,19 +531,19 @@ type DeploymentStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` @@ -552,6 +553,13 @@ type DeploymentStatus struct { // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"` + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -897,16 +905,16 @@ type ReplicaSetList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ReplicaSetSpec is the specification of a ReplicaSet. type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` @@ -924,29 +932,36 @@ type ReplicaSetSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"` + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index beec4b755..408943415 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -177,11 +177,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.", + "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "conditions": "Represents the latest available observations of a deployment's current state.", "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } @@ -227,7 +228,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -236,10 +237,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -248,10 +249,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", + "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.", + "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.", + "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", } diff --git a/tools/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index cd92792db..917ad4a22 100644 --- a/tools/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -363,6 +363,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) @@ -517,6 +522,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicaSetCondition, len(*in)) diff --git a/tools/vendor/k8s.io/api/authentication/v1/doc.go b/tools/vendor/k8s.io/api/authentication/v1/doc.go index 3bdc89bad..dc3aed4e4 100644 --- a/tools/vendor/k8s.io/api/authentication/v1/doc.go +++ b/tools/vendor/k8s.io/api/authentication/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1 // import "k8s.io/api/authentication/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/tools/vendor/k8s.io/api/authentication/v1alpha1/doc.go index eb32def90..c199ccd49 100644 --- a/tools/vendor/k8s.io/api/authentication/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/authentication/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1alpha1 // import "k8s.io/api/authentication/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/authentication/v1beta1/doc.go b/tools/vendor/k8s.io/api/authentication/v1beta1/doc.go index 2a2b176e4..af63dc845 100644 --- a/tools/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1beta1 // import "k8s.io/api/authentication/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/authorization/v1/doc.go b/tools/vendor/k8s.io/api/authorization/v1/doc.go index 77e5a19c4..40bf8006e 100644 --- a/tools/vendor/k8s.io/api/authorization/v1/doc.go +++ b/tools/vendor/k8s.io/api/authorization/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=authorization.k8s.io -package v1 // import "k8s.io/api/authorization/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/authorization/v1beta1/doc.go b/tools/vendor/k8s.io/api/authorization/v1beta1/doc.go index c996e35cc..9f7332d49 100644 --- a/tools/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=authorization.k8s.io -package v1beta1 // import "k8s.io/api/authorization/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/autoscaling/v1/doc.go b/tools/vendor/k8s.io/api/autoscaling/v1/doc.go index d64c9cbc1..4ee085e16 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/tools/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1 // import "k8s.io/api/autoscaling/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/doc.go b/tools/vendor/k8s.io/api/autoscaling/v2/doc.go index aafa2d4de..8dea6339d 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/doc.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v2 // import "k8s.io/api/autoscaling/v2" +package v2 diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/tools/vendor/k8s.io/api/autoscaling/v2/generated.pb.go index ece6dedad..40b60ebec 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/generated.pb.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2/generated.pb.go @@ -751,115 +751,116 @@ func init() { } var fileDescriptor_4d5f2c8767749221 = []byte{ - // 1722 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49, - 0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07, - 0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9, - 0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48, - 0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90, - 0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a, - 0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd, - 0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d, - 0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d, - 0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c, - 0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa, - 0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62, - 0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8, - 0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c, - 0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83, - 0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71, - 0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90, - 0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25, - 0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb, - 0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc, - 0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, - 0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, - 0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c, - 0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, - 0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d, - 0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4, - 0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32, - 0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, - 0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02, - 0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, - 0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3, - 0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2, - 0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d, - 0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c, - 0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93, - 0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37, - 0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c, - 0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71, - 0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d, - 0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde, - 0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95, - 0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c, - 0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a, - 0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67, - 0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1, - 0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d, - 0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40, - 0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a, - 0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77, - 0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67, - 0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79, - 0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b, - 0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7, - 0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7, - 0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47, - 0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2, - 0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12, - 0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e, - 0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04, - 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, - 0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e, - 0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83, - 0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f, - 0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16, - 0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45, - 0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21, - 0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2, - 0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad, - 0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd, - 0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65, - 0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc, - 0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba, - 0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb, - 0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7, - 0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51, - 0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61, - 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35, - 0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2, - 0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42, - 0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f, - 0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33, - 0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20, - 0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5, - 0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a, - 0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08, - 0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37, - 0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a, - 0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d, - 0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96, - 0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef, - 0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63, - 0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64, - 0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0, - 0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef, - 0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70, - 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82, - 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa, - 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51, - 0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe, - 0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38, - 0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a, - 0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd, - 0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb, - 0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9, - 0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f, - 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd, - 0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00, + // 1742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xc9, 0x8f, 0x1b, 0x4b, + 0x19, 0x9f, 0xb6, 0x3d, 0x5b, 0x79, 0xd6, 0xca, 0xe6, 0x4c, 0x14, 0x7b, 0xd4, 0x04, 0xb2, 0x40, + 0xda, 0xc4, 0x84, 0x28, 0x22, 0x07, 0x34, 0x3d, 0x01, 0x32, 0xca, 0x0c, 0xe3, 0x94, 0x27, 0x19, + 0x76, 0xa5, 0xdc, 0x5d, 0xe3, 0x29, 0xc6, 0xee, 0xb6, 0xba, 0xdb, 0x4e, 0x26, 0x12, 0x12, 0x17, + 0xee, 0x08, 0x14, 0xf1, 0x4f, 0x44, 0x9c, 0x40, 0xe1, 0x00, 0x12, 0x12, 0x1c, 0x72, 0x41, 0xca, + 0x81, 0x43, 0x4e, 0x16, 0x31, 0xd2, 0x3b, 0xbe, 0xe3, 0x3b, 0xe4, 0xf4, 0x54, 0x4b, 0xaf, 0xde, + 0xc6, 0x79, 0x93, 0x91, 0xe6, 0xe6, 0xaa, 0xfa, 0xbe, 0xdf, 0xb7, 0xd4, 0xb7, 0x55, 0x1b, 0x5c, + 0x3f, 0xb8, 0xeb, 0x6a, 0xd4, 0x2e, 0xe2, 0x26, 0x2d, 0xe2, 0x96, 0x67, 0xbb, 0x06, 0xae, 0x53, + 0xab, 0x56, 0x6c, 0x97, 0x8a, 0x35, 0x62, 0x11, 0x07, 0x7b, 0xc4, 0xd4, 0x9a, 0x8e, 0xed, 0xd9, + 0xf0, 0xa2, 0x20, 0xd5, 0x70, 0x93, 0x6a, 0x11, 0x52, 0xad, 0x5d, 0x5a, 0xb9, 0x59, 0xa3, 0xde, + 0x7e, 0xab, 0xaa, 0x19, 0x76, 0xa3, 0x58, 0xb3, 0x6b, 0x76, 0x91, 0x73, 0x54, 0x5b, 0x7b, 0x7c, + 0xc5, 0x17, 0xfc, 0x97, 0x40, 0x5a, 0x51, 0x23, 0x42, 0x0d, 0xdb, 0x21, 0xc5, 0xf6, 0xad, 0xa4, + 0xb4, 0x95, 0xdb, 0x21, 0x4d, 0x03, 0x1b, 0xfb, 0xd4, 0x22, 0xce, 0x61, 0xb1, 0x79, 0x50, 0xe3, + 0x4c, 0x0e, 0x71, 0xed, 0x96, 0x63, 0x90, 0xb1, 0xb8, 0xdc, 0x62, 0x83, 0x78, 0xb8, 0x9f, 0xac, + 0xe2, 0x20, 0x2e, 0xa7, 0x65, 0x79, 0xb4, 0xd1, 0x2b, 0xe6, 0xce, 0x28, 0x06, 0xd7, 0xd8, 0x27, + 0x0d, 0x9c, 0xe4, 0x53, 0x3f, 0x53, 0xc0, 0xe5, 0x75, 0xdb, 0xf2, 0x30, 0xe3, 0x40, 0xd2, 0x88, + 0x2d, 0xe2, 0x39, 0xd4, 0xa8, 0xf0, 0xdf, 0x70, 0x1d, 0x64, 0x2c, 0xdc, 0x20, 0x39, 0x65, 0x55, + 0xb9, 0x36, 0xab, 0x17, 0xdf, 0x74, 0x0a, 0x13, 0xdd, 0x4e, 0x21, 0xf3, 0x63, 0xdc, 0x20, 0x1f, + 0x3a, 0x85, 0x42, 0xaf, 0xe3, 0x34, 0x1f, 0x86, 0x91, 0x20, 0xce, 0x0c, 0xb7, 0xc1, 0x94, 0x87, + 0x9d, 0x1a, 0xf1, 0x72, 0xa9, 0x55, 0xe5, 0x5a, 0xb6, 0x74, 0x55, 0x1b, 0x78, 0x75, 0x9a, 0x90, + 0xbe, 0xc3, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0x30, 0x6b, 0xf8, + 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x9f, 0x0f, 0x31, + 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1e, 0x43, 0x77, 0xc1, 0xb4, 0xd1, 0x72, 0x1c, 0x62, 0xf9, 0x96, + 0x7e, 0x6b, 0xa4, 0xa5, 0x4f, 0x70, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x3a, 0xbd, 0x2e, + 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x15, 0x70, 0x69, 0xdd, 0xb1, 0x5d, 0xf7, 0x09, 0x71, + 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x5f, 0x13, 0xc3, 0x43, 0x64, 0x8f, 0x38, 0xc4, 0x32, 0x08, 0x5c, + 0x05, 0x99, 0x03, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0x87, 0xd4, 0x32, 0x11, 0x3f, 0x61, + 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x00, 0xa9, 0x15, + 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xbb, 0x02, 0xce, 0xfe, 0xe0, + 0xb9, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0x2a, 0x65, 0x4b, + 0xdf, 0x1c, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa3, 0xc4, 0x09, 0xe3, 0x44, 0x9c, 0x20, + 0x09, 0x75, 0xec, 0x81, 0xa7, 0xfe, 0xbb, 0x57, 0x7d, 0x11, 0x3e, 0x9f, 0x44, 0xfd, 0x4f, 0x15, + 0x4e, 0xea, 0x9f, 0x15, 0xb0, 0xf4, 0xa0, 0xbc, 0x56, 0x11, 0xdc, 0x65, 0xbb, 0x4e, 0x8d, 0x43, + 0x78, 0x17, 0x64, 0xbc, 0xc3, 0xa6, 0x9f, 0x01, 0x57, 0xfc, 0x0b, 0xdf, 0x39, 0x6c, 0xb2, 0x0c, + 0x38, 0x9b, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xaf, 0x81, 0xc9, 0x36, 0x93, 0xcb, 0xb5, 0x9c, + 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x0f, 0xcc, 0x37, 0x89, 0x43, 0x6d, + 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x39, 0x49, 0x3c, 0x5f, 0x8e, 0x1e, + 0xa2, 0x38, 0xad, 0xfa, 0x45, 0x0a, 0x2c, 0x86, 0x0a, 0xa0, 0x56, 0x9d, 0xb8, 0xf0, 0x57, 0x60, + 0xc5, 0xf5, 0x70, 0x95, 0xd6, 0xe9, 0x0b, 0xec, 0x51, 0xdb, 0xda, 0xa5, 0x96, 0x69, 0x3f, 0x8b, + 0xa3, 0xe7, 0xbb, 0x9d, 0xc2, 0x4a, 0x65, 0x20, 0x15, 0x1a, 0x82, 0x00, 0x1f, 0x82, 0x39, 0x97, + 0xd4, 0x89, 0xe1, 0x09, 0x7b, 0xa5, 0x5f, 0xae, 0x76, 0x3b, 0x85, 0xb9, 0x4a, 0x64, 0xff, 0x43, + 0xa7, 0x70, 0x26, 0xe6, 0x18, 0x71, 0x88, 0x62, 0xcc, 0xf0, 0xa7, 0x60, 0xa6, 0xc9, 0x7e, 0x51, + 0xe2, 0xe6, 0x52, 0xab, 0xe9, 0x11, 0x11, 0x92, 0xf4, 0xb5, 0xbe, 0x24, 0xbd, 0x34, 0x53, 0x96, + 0x20, 0x28, 0x80, 0x83, 0x3f, 0x07, 0xb3, 0x9e, 0x5d, 0x27, 0x0e, 0xb6, 0x0c, 0x92, 0xcb, 0xf0, + 0x38, 0xd1, 0x22, 0xd8, 0x41, 0x43, 0xd0, 0x9a, 0x07, 0x35, 0x2e, 0xcc, 0xef, 0x56, 0xda, 0xa3, + 0x16, 0xb6, 0x3c, 0xea, 0x1d, 0xea, 0xf3, 0xac, 0x8e, 0xec, 0xf8, 0x20, 0x28, 0xc4, 0x53, 0x5f, + 0xa7, 0xc0, 0x85, 0x07, 0xb6, 0x43, 0x5f, 0xb0, 0xca, 0x52, 0x2f, 0xdb, 0xe6, 0x9a, 0xd4, 0x94, + 0x38, 0xf0, 0x29, 0x98, 0x61, 0x1d, 0xcc, 0xc4, 0x1e, 0x96, 0x51, 0xff, 0xed, 0x61, 0x72, 0x5d, + 0x8d, 0x51, 0x6b, 0xed, 0x5b, 0x9a, 0x28, 0x46, 0x5b, 0xc4, 0xc3, 0x61, 0xbd, 0x08, 0xf7, 0x50, + 0x80, 0x0a, 0x7f, 0x02, 0x32, 0x6e, 0x93, 0x18, 0x32, 0xfa, 0xef, 0x0c, 0xf3, 0x58, 0x7f, 0x1d, + 0x2b, 0x4d, 0x62, 0x84, 0xb5, 0x8b, 0xad, 0x10, 0x47, 0x84, 0x4f, 0xc1, 0x94, 0xcb, 0xb3, 0x84, + 0x07, 0x4a, 0xb6, 0x74, 0xf7, 0x23, 0xb0, 0x45, 0x96, 0x05, 0xc9, 0x2b, 0xd6, 0x48, 0xe2, 0xaa, + 0xff, 0x51, 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x71, 0x9b, 0xda, 0x0e, 0x7c, 0x04, 0xa6, 0xf9, + 0xce, 0xe3, 0xa6, 0x74, 0xe0, 0x8d, 0x23, 0x05, 0x05, 0x8f, 0x7f, 0x3d, 0xcb, 0x52, 0xbb, 0x22, + 0xd8, 0x91, 0x8f, 0x03, 0x77, 0xc1, 0x2c, 0xff, 0x79, 0xdf, 0x7e, 0x66, 0x49, 0xbf, 0x8d, 0x03, + 0xca, 0x23, 0xa1, 0xe2, 0x03, 0xa0, 0x10, 0x4b, 0xfd, 0x5d, 0x1a, 0xac, 0x0e, 0xb0, 0x67, 0xdd, + 0xb6, 0x4c, 0xca, 0x12, 0x08, 0x3e, 0x88, 0xd5, 0x90, 0xdb, 0x89, 0x1a, 0x72, 0x65, 0x14, 0x7f, + 0xa4, 0xa6, 0x6c, 0x06, 0x17, 0x94, 0x8a, 0x61, 0x49, 0x37, 0x7f, 0xe8, 0x14, 0xfa, 0x4c, 0x6d, + 0x5a, 0x80, 0x14, 0xbf, 0x0c, 0xd8, 0x06, 0xb0, 0x8e, 0x5d, 0x6f, 0xc7, 0xc1, 0x96, 0x2b, 0x24, + 0xd1, 0x06, 0x91, 0x57, 0x7f, 0xe3, 0x68, 0x41, 0xcb, 0x38, 0xf4, 0x15, 0xa9, 0x05, 0xdc, 0xec, + 0x41, 0x43, 0x7d, 0x24, 0xc0, 0x6f, 0x80, 0x29, 0x87, 0x60, 0xd7, 0xb6, 0x78, 0x62, 0xce, 0x86, + 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x3a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, + 0xe4, 0x84, 0x41, 0xed, 0xde, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xff, 0xab, 0x80, 0x4b, 0x03, 0xfc, + 0xb8, 0x49, 0x5d, 0x0f, 0xfe, 0xa2, 0x27, 0x2b, 0xb5, 0xa3, 0x19, 0xc8, 0xb8, 0x79, 0x4e, 0x06, + 0xc5, 0xc6, 0xdf, 0x89, 0x64, 0xe4, 0x2e, 0x98, 0xa4, 0x1e, 0x69, 0xf8, 0x45, 0xac, 0x34, 0x7e, + 0xda, 0x84, 0xed, 0x61, 0x83, 0x01, 0x21, 0x81, 0xa7, 0xbe, 0x4e, 0x0f, 0x34, 0x8b, 0xa5, 0x2d, + 0x6c, 0x83, 0x05, 0xbe, 0x92, 0x0d, 0x99, 0xec, 0x49, 0xe3, 0x86, 0x15, 0x85, 0x21, 0x03, 0x90, + 0x7e, 0x5e, 0x6a, 0xb1, 0x50, 0x89, 0xa1, 0xa2, 0x84, 0x14, 0x78, 0x0b, 0x64, 0x1b, 0xd4, 0x42, + 0xa4, 0x59, 0xa7, 0x06, 0x76, 0x65, 0x87, 0x5b, 0xec, 0x76, 0x0a, 0xd9, 0xad, 0x70, 0x1b, 0x45, + 0x69, 0xe0, 0x77, 0x41, 0xb6, 0x81, 0x9f, 0x07, 0x2c, 0xa2, 0x13, 0x9d, 0x91, 0xf2, 0xb2, 0x5b, + 0xe1, 0x11, 0x8a, 0xd2, 0xc1, 0x32, 0x8b, 0x01, 0xd6, 0xc3, 0xdd, 0x5c, 0x86, 0x3b, 0xf7, 0xeb, + 0x23, 0xbb, 0x3d, 0x2f, 0x6f, 0x91, 0x50, 0xe1, 0xdc, 0xc8, 0x87, 0x81, 0x26, 0x98, 0xa9, 0xca, + 0x52, 0xc3, 0xc3, 0x2a, 0x5b, 0xfa, 0xde, 0x47, 0xdc, 0x97, 0x44, 0xd0, 0xe7, 0x58, 0x48, 0xf8, + 0x2b, 0x14, 0x20, 0xab, 0xaf, 0x32, 0xe0, 0xf2, 0xd0, 0x12, 0x09, 0x7f, 0x08, 0xa0, 0x5d, 0x75, + 0x89, 0xd3, 0x26, 0xe6, 0x8f, 0xc4, 0x0b, 0x84, 0x0d, 0x8c, 0xec, 0xfe, 0xd2, 0xfa, 0x79, 0x96, + 0x4d, 0xdb, 0x3d, 0xa7, 0xa8, 0x0f, 0x07, 0x34, 0xc0, 0x3c, 0xcb, 0x31, 0x71, 0x63, 0x54, 0xce, + 0xa6, 0xe3, 0x25, 0xf0, 0x32, 0x1b, 0x35, 0x36, 0xa3, 0x20, 0x28, 0x8e, 0x09, 0xd7, 0xc0, 0xa2, + 0x1c, 0x93, 0x12, 0x37, 0x78, 0x41, 0xfa, 0x79, 0x71, 0x3d, 0x7e, 0x8c, 0x92, 0xf4, 0x0c, 0xc2, + 0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0xf7, 0xe3, 0xc7, 0x28, 0x49, 0x0f, 0x6b, + 0x60, 0x41, 0xa2, 0xca, 0x5b, 0xcd, 0x4d, 0xf2, 0x98, 0x18, 0x3d, 0xc1, 0xca, 0xb6, 0x14, 0xc4, + 0xf7, 0x7a, 0x0c, 0x06, 0x25, 0x60, 0xa1, 0x0d, 0x80, 0xe1, 0x17, 0x4d, 0x37, 0x37, 0xc5, 0x85, + 0xdc, 0x1b, 0x3f, 0x4a, 0x82, 0xc2, 0x1b, 0x76, 0xf4, 0x60, 0xcb, 0x45, 0x11, 0x11, 0xea, 0x1f, + 0x15, 0xb0, 0x94, 0x9c, 0x80, 0x83, 0xc7, 0x86, 0x32, 0xf0, 0xb1, 0xf1, 0x4b, 0x30, 0x23, 0x06, + 0x2a, 0xdb, 0x91, 0xd7, 0xfe, 0x9d, 0x23, 0x96, 0x35, 0x5c, 0x25, 0xf5, 0x8a, 0x64, 0x15, 0x41, + 0xec, 0xaf, 0x50, 0x00, 0xa9, 0xbe, 0xcc, 0x00, 0x10, 0xe6, 0x14, 0xbc, 0x1d, 0xeb, 0x63, 0xab, + 0x89, 0x3e, 0xb6, 0x14, 0x7d, 0xb9, 0x44, 0x7a, 0xd6, 0x23, 0x30, 0x65, 0xf3, 0x32, 0x23, 0x35, + 0xbc, 0x39, 0xc4, 0x8f, 0xc1, 0xbc, 0x13, 0x00, 0xe9, 0x80, 0x35, 0x06, 0x59, 0xa7, 0x24, 0x10, + 0xdc, 0x00, 0x99, 0xa6, 0x6d, 0xfa, 0x53, 0xca, 0xb0, 0x99, 0xb1, 0x6c, 0x9b, 0x6e, 0x0c, 0x6e, + 0x86, 0x69, 0xcc, 0x76, 0x11, 0x87, 0x60, 0x23, 0xa8, 0x3f, 0xf9, 0xc9, 0x31, 0xb1, 0x38, 0x04, + 0xae, 0xdf, 0xd7, 0x00, 0xe1, 0x3d, 0xff, 0x04, 0x05, 0x70, 0xf0, 0x37, 0x60, 0xd9, 0x48, 0xbe, + 0xae, 0x73, 0xd3, 0x23, 0x07, 0xab, 0xa1, 0x9f, 0x1e, 0xf4, 0x73, 0xdd, 0x4e, 0x61, 0xb9, 0x87, + 0x04, 0xf5, 0x4a, 0x62, 0x96, 0x11, 0xf9, 0x28, 0x93, 0x75, 0x6e, 0x98, 0x65, 0xfd, 0x9e, 0x9f, + 0xc2, 0x32, 0xff, 0x04, 0x05, 0x70, 0xea, 0x9f, 0x32, 0x60, 0x2e, 0xf6, 0xd0, 0x3b, 0xe1, 0xc8, + 0x10, 0xc9, 0x7c, 0x6c, 0x91, 0x21, 0xe0, 0x8e, 0x35, 0x32, 0x04, 0xe4, 0x09, 0x45, 0x86, 0x10, + 0x76, 0x42, 0x91, 0x11, 0xb1, 0xac, 0x4f, 0x64, 0xfc, 0x2b, 0xe5, 0x47, 0x86, 0x18, 0x16, 0x8e, + 0x16, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xb7, 0xf3, 0xf8, 0x2f, 0xb7, 0xd9, 0x9e, 0x77, + 0xb6, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, + 0x33, 0x77, 0x2d, 0x82, 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x63, 0x2f, 0x78, 0x3f, 0xcb, + 0x2e, 0xc7, 0x5b, 0xfa, 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x21, 0x05, 0x96, 0x7b, 0xbe, + 0x5c, 0x84, 0x4e, 0x51, 0x3e, 0x91, 0x53, 0x52, 0x27, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x6b, + 0x0a, 0xc0, 0xde, 0xfe, 0x00, 0x0f, 0xf9, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x15, + 0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0xc7, 0xff, 0x05, 0x37, 0xfc, 0x5e, 0x96, + 0x3e, 0xb6, 0xef, 0x65, 0xea, 0x3f, 0x92, 0x7e, 0x3b, 0x85, 0xdf, 0xe6, 0xfa, 0xdd, 0x72, 0xfa, + 0x64, 0x6e, 0x59, 0xfd, 0x9b, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x29, 0xf9, 0x30, 0xfb, 0xcf, 0xb8, + 0xea, 0xa7, 0xf1, 0xa3, 0xec, 0x2b, 0x05, 0x9c, 0x3d, 0x3d, 0xff, 0xc1, 0xa8, 0x7f, 0xe9, 0x55, + 0xf7, 0x14, 0xfc, 0x93, 0xa2, 0x7f, 0xff, 0xcd, 0xfb, 0xfc, 0xc4, 0xdb, 0xf7, 0xf9, 0x89, 0x77, + 0xef, 0xf3, 0x13, 0xbf, 0xed, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0xb6, 0x9b, 0x57, 0xde, 0x75, + 0xf3, 0xca, 0xff, 0xba, 0x79, 0xe5, 0xf7, 0xff, 0xcf, 0x4f, 0xfc, 0xec, 0xe2, 0xc0, 0xbf, 0x21, + 0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x23, 0xae, 0x54, 0xa2, 0x1c, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { @@ -1126,6 +1127,18 @@ func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Tolerance != nil { + { + size, err := m.Tolerance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.StabilizationWindowSeconds != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) i-- @@ -2203,6 +2216,10 @@ func (m *HPAScalingRules) Size() (n int) { if m.StabilizationWindowSeconds != nil { n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) } + if m.Tolerance != nil { + l = m.Tolerance.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2619,6 +2636,7 @@ func (this *HPAScalingRules) String() string { `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, `Policies:` + repeatedStringForPolicies + `,`, `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, + `Tolerance:` + strings.Replace(fmt.Sprintf("%v", this.Tolerance), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -3770,6 +3788,42 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } } m.StabilizationWindowSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tolerance == nil { + m.Tolerance = &resource.Quantity{} + } + if err := m.Tolerance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/generated.proto b/tools/vendor/k8s.io/api/autoscaling/v2/generated.proto index 4e6dc0592..04c34d6e1 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/tools/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -112,12 +112,18 @@ message HPAScalingPolicy { optional int32 periodSeconds = 3; } -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// HPAScalingRules configures the scaling behavior for one direction via +// scaling Policy Rules and a configurable metric tolerance. +// +// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. // They can limit the scaling velocity by specifying scaling policies. // They can prevent flapping by specifying the stabilization window, so that the // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. +// +// The tolerance is applied to the metric values and prevents scaling too +// eagerly for small metric variations. (Note that setting a tolerance requires +// enabling the alpha HPAConfigurableTolerance feature gate.) message HPAScalingRules { // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. @@ -134,10 +140,28 @@ message HPAScalingRules { optional string selectPolicy = 1; // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // If not set, use the default values: + // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. + // - For scale down: allow all pods to be removed in a 15s window. // +listType=atomic // +optional repeated HPAScalingPolicy policies = 2; + + // tolerance is the tolerance on the ratio between the current and desired + // metric value under which no updates are made to the desired number of + // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not + // set, the default cluster-wide tolerance is applied (by default 10%). + // + // For example, if autoscaling is configured with a memory consumption target of 100Mi, + // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be + // triggered when the actual consumption falls below 95Mi or exceeds 101Mi. + // + // This is an alpha field and requires enabling the HPAConfigurableTolerance + // feature gate. + // + // +featureGate=HPAConfigurableTolerance + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity tolerance = 4; } // HorizontalPodAutoscaler is the configuration for a horizontal pod diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/types.go b/tools/vendor/k8s.io/api/autoscaling/v2/types.go index 99e8db09d..9ce69b1ed 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2/types.go @@ -171,12 +171,18 @@ const ( DisabledPolicySelect ScalingPolicySelect = "Disabled" ) -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// HPAScalingRules configures the scaling behavior for one direction via +// scaling Policy Rules and a configurable metric tolerance. +// +// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. // They can limit the scaling velocity by specifying scaling policies. // They can prevent flapping by specifying the stabilization window, so that the // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. +// +// The tolerance is applied to the metric values and prevents scaling too +// eagerly for small metric variations. (Note that setting a tolerance requires +// enabling the alpha HPAConfigurableTolerance feature gate.) type HPAScalingRules struct { // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. @@ -193,10 +199,28 @@ type HPAScalingRules struct { SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // If not set, use the default values: + // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. + // - For scale down: allow all pods to be removed in a 15s window. // +listType=atomic // +optional Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"` + + // tolerance is the tolerance on the ratio between the current and desired + // metric value under which no updates are made to the desired number of + // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not + // set, the default cluster-wide tolerance is applied (by default 10%). + // + // For example, if autoscaling is configured with a memory consumption target of 100Mi, + // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be + // triggered when the actual consumption falls below 95Mi or exceeds 101Mi. + // + // This is an alpha field and requires enabling the HPAConfigurableTolerance + // feature gate. + // + // +featureGate=HPAConfigurableTolerance + // +optional + Tolerance *resource.Quantity `json:"tolerance,omitempty" protobuf:"bytes,4,opt,name=tolerance"` } // HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 649cd04a0..017fefcde 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -92,10 +92,11 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { } var map_HPAScalingRules = map[string]string{ - "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)", "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", - "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", + "policies": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.", + "tolerance": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.", } func (HPAScalingRules) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go index 125708d6f..5fbcf9f80 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2/zz_generated.deepcopy.go @@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { *out = make([]HPAScalingPolicy, len(*in)) copy(*out, *in) } + if in.Tolerance != nil { + in, out := &in.Tolerance, &out.Tolerance + x := (*in).DeepCopy() + *out = &x + } return } diff --git a/tools/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/tools/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 25ca507bb..eac92e86e 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" +package v2beta1 diff --git a/tools/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/tools/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index 76fb0aff8..150037297 100644 --- a/tools/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/tools/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" +package v2beta2 diff --git a/tools/vendor/k8s.io/api/batch/v1/doc.go b/tools/vendor/k8s.io/api/batch/v1/doc.go index cb5cbb600..69088e2c5 100644 --- a/tools/vendor/k8s.io/api/batch/v1/doc.go +++ b/tools/vendor/k8s.io/api/batch/v1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1 // import "k8s.io/api/batch/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/batch/v1/generated.proto b/tools/vendor/k8s.io/api/batch/v1/generated.proto index 361ebdca1..d3aeae0ad 100644 --- a/tools/vendor/k8s.io/api/batch/v1/generated.proto +++ b/tools/vendor/k8s.io/api/batch/v1/generated.proto @@ -222,8 +222,6 @@ message JobSpec { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is beta-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (enabled by default). // +optional optional SuccessPolicy successPolicy = 16; @@ -238,8 +236,6 @@ message JobSpec { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional optional int32 backoffLimitPerIndex = 12; @@ -251,8 +247,6 @@ message JobSpec { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional optional int32 maxFailedIndexes = 13; @@ -442,8 +436,6 @@ message JobStatus { // represented as "1,3-5,7". // The set of failed indexes cannot overlap with the set of completed indexes. // - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional optional string failedIndexes = 10; @@ -554,8 +546,6 @@ message PodFailurePolicyRule { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is beta-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/tools/vendor/k8s.io/api/batch/v1/types.go b/tools/vendor/k8s.io/api/batch/v1/types.go index 8e9a761b9..6c0007c21 100644 --- a/tools/vendor/k8s.io/api/batch/v1/types.go +++ b/tools/vendor/k8s.io/api/batch/v1/types.go @@ -128,7 +128,6 @@ const ( // This is an action which might be taken on a pod failure - mark the // Job's index as failed to avoid restarts within this index. This action // can only be used when backoffLimitPerIndex is set. - // This value is beta-level. PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" // This is an action which might be taken on a pod failure - the counter towards @@ -223,8 +222,6 @@ type PodFailurePolicyRule struct { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is beta-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -346,8 +343,6 @@ type JobSpec struct { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is beta-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (enabled by default). // +optional SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` @@ -362,8 +357,6 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` @@ -375,8 +368,6 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` @@ -571,8 +562,6 @@ type JobStatus struct { // represented as "1,3-5,7". // The set of failed indexes cannot overlap with the set of completed indexes. // - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). // +optional FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` @@ -647,13 +636,9 @@ const ( JobReasonFailedIndexes string = "FailedIndexes" // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to // a Job met successPolicy. - // https://kep.k8s.io/3998 - // This is currently a beta field. JobReasonSuccessPolicy string = "SuccessPolicy" // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to // a number of succeeded Job pods met completions. - // - https://kep.k8s.io/3998 - // This is currently a beta field. JobReasonCompletionsReached string = "CompletionsReached" ) diff --git a/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 893f3371f..ffd4e4f5f 100644 --- a/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -116,10 +116,10 @@ var map_JobSpec = map[string]string{ "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", - "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", + "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", - "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", @@ -144,7 +144,7 @@ var map_JobStatus = map[string]string{ "failed": "The number of pods which reached phase Failed. The value increases monotonically.", "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", "ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", } @@ -195,7 +195,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/tools/vendor/k8s.io/api/batch/v1beta1/doc.go b/tools/vendor/k8s.io/api/batch/v1beta1/doc.go index cb2572f5d..3430d6939 100644 --- a/tools/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1beta1 // import "k8s.io/api/batch/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/certificates/v1/doc.go b/tools/vendor/k8s.io/api/certificates/v1/doc.go index 78434478e..6c16fc29b 100644 --- a/tools/vendor/k8s.io/api/certificates/v1/doc.go +++ b/tools/vendor/k8s.io/api/certificates/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=certificates.k8s.io -package v1 // import "k8s.io/api/certificates/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/tools/vendor/k8s.io/api/certificates/v1alpha1/doc.go index d83d0e820..01481df8e 100644 --- a/tools/vendor/k8s.io/api/certificates/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=certificates.k8s.io -package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/doc.go b/tools/vendor/k8s.io/api/certificates/v1beta1/doc.go index 1165518c6..81608a554 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=certificates.k8s.io -package v1beta1 // import "k8s.io/api/certificates/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/tools/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index b6d8ab3f5..199a54496 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -186,10 +186,94 @@ func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_6529c11a462c48a5, []int{5} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_6529c11a462c48a5, []int{6} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_6529c11a462c48a5, []int{7} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_6529c11a462c48a5, []int{5} + return fileDescriptor_6529c11a462c48a5, []int{8} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -221,6 +305,9 @@ func init() { proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleSpec") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } @@ -229,64 +316,69 @@ func init() { } var fileDescriptor_6529c11a462c48a5 = []byte{ - // 901 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x21, 0x6d, 0x47, 0x50, 0x2d, 0x96, 0xea, 0xb5, 0x56, 0x80, - 0xc2, 0xd7, 0x2c, 0xa9, 0x2a, 0x88, 0x72, 0x40, 0xb0, 0x21, 0x42, 0x11, 0x29, 0x48, 0x93, 0x84, - 0x03, 0x42, 0xa2, 0x93, 0xf5, 0xdb, 0xcd, 0x34, 0xdd, 0x0f, 0x76, 0x66, 0x4d, 0x7d, 0xeb, 0x4f, - 0xe0, 0xc8, 0x91, 0xff, 0xc0, 0x9f, 0x08, 0x07, 0xa4, 0x1e, 0x7b, 0x40, 0x16, 0x71, 0xff, 0x45, - 0x4e, 0x68, 0x66, 0xc7, 0x6b, 0xc7, 0x4e, 0x70, 0x69, 0x6f, 0x3b, 0xcf, 0xbc, 0xcf, 0xf3, 0xbc, - 0xf3, 0xce, 0xfb, 0x8e, 0x8d, 0xbc, 0xd3, 0x2d, 0x41, 0x78, 0xe2, 0xb1, 0x94, 0x7b, 0x01, 0x64, - 0x92, 0x3f, 0xe4, 0x01, 0x93, 0x20, 0xbc, 0xc1, 0xe6, 0x31, 0x48, 0xb6, 0xe9, 0x85, 0x10, 0x43, - 0xc6, 0x24, 0xf4, 0x49, 0x9a, 0x25, 0x32, 0xc1, 0x4e, 0x41, 0x20, 0x2c, 0xe5, 0x64, 0x96, 0x40, - 0x0c, 0xa1, 0xf3, 0x71, 0xc8, 0xe5, 0x49, 0x7e, 0x4c, 0x82, 0x24, 0xf2, 0xc2, 0x24, 0x4c, 0x3c, - 0xcd, 0x3b, 0xce, 0x1f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x42, 0xaf, 0xe3, 0xce, 0x26, 0x90, 0x64, - 0xe0, 0x0d, 0x16, 0x3c, 0x3b, 0xf7, 0xa6, 0x31, 0x11, 0x0b, 0x4e, 0x78, 0x0c, 0xd9, 0xd0, 0x4b, - 0x4f, 0x43, 0x05, 0x08, 0x2f, 0x02, 0xc9, 0xae, 0x62, 0x79, 0xd7, 0xb1, 0xb2, 0x3c, 0x96, 0x3c, - 0x82, 0x05, 0xc2, 0xa7, 0xcb, 0x08, 0x22, 0x38, 0x81, 0x88, 0xcd, 0xf3, 0xdc, 0x3f, 0x57, 0xd0, - 0xdb, 0x3b, 0xd3, 0x52, 0x1c, 0xf0, 0x30, 0xe6, 0x71, 0x48, 0xe1, 0xe7, 0x1c, 0x84, 0xc4, 0x0f, - 0x50, 0x53, 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xcf, 0xda, 0x68, 0xdf, 0xfd, 0x84, 0x4c, 0x6b, - 0x58, 0x1a, 0x91, 0xf4, 0x34, 0x54, 0x80, 0x20, 0x2a, 0x9a, 0x0c, 0x36, 0xc9, 0x77, 0xc7, 0x8f, - 0x20, 0x90, 0xf7, 0x41, 0x32, 0x1f, 0x9f, 0x8d, 0x9c, 0xca, 0x78, 0xe4, 0xa0, 0x29, 0x46, 0x4b, - 0x55, 0xfc, 0x00, 0xd5, 0x44, 0x0a, 0x81, 0xbd, 0xa2, 0xd5, 0x3f, 0x27, 0x4b, 0x6e, 0x88, 0x5c, - 0x9b, 0xeb, 0x41, 0x0a, 0x81, 0xbf, 0x66, 0xbc, 0x6a, 0x6a, 0x45, 0xb5, 0x32, 0x3e, 0x41, 0x0d, - 0x21, 0x99, 0xcc, 0x85, 0x5d, 0xd5, 0x1e, 0x5f, 0xbc, 0x86, 0x87, 0xd6, 0xf1, 0xd7, 0x8d, 0x4b, - 0xa3, 0x58, 0x53, 0xa3, 0xef, 0xbe, 0xa8, 0x22, 0xf7, 0x5a, 0xee, 0x4e, 0x12, 0xf7, 0xb9, 0xe4, - 0x49, 0x8c, 0xb7, 0x50, 0x4d, 0x0e, 0x53, 0xd0, 0x05, 0x6d, 0xf9, 0xef, 0x4c, 0x52, 0x3e, 0x1c, - 0xa6, 0x70, 0x31, 0x72, 0xde, 0x9c, 0x8f, 0x57, 0x38, 0xd5, 0x0c, 0xbc, 0x5f, 0x1e, 0xa5, 0xa1, - 0xb9, 0xf7, 0x2e, 0x27, 0x72, 0x31, 0x72, 0xae, 0xe8, 0x48, 0x52, 0x2a, 0x5d, 0x4e, 0x17, 0xbf, - 0x87, 0x1a, 0x19, 0x30, 0x91, 0xc4, 0xba, 0xf8, 0xad, 0xe9, 0xb1, 0xa8, 0x46, 0xa9, 0xd9, 0xc5, - 0xef, 0xa3, 0xd5, 0x08, 0x84, 0x60, 0x21, 0xe8, 0x0a, 0xb6, 0xfc, 0x1b, 0x26, 0x70, 0xf5, 0x7e, - 0x01, 0xd3, 0xc9, 0x3e, 0x7e, 0x84, 0xd6, 0x1f, 0x33, 0x21, 0x8f, 0xd2, 0x3e, 0x93, 0x70, 0xc8, - 0x23, 0xb0, 0x6b, 0xba, 0xe6, 0x1f, 0xbc, 0x5c, 0xd7, 0x28, 0x86, 0x7f, 0xdb, 0xa8, 0xaf, 0xef, - 0x5f, 0x52, 0xa2, 0x73, 0xca, 0x78, 0x80, 0xb0, 0x42, 0x0e, 0x33, 0x16, 0x8b, 0xa2, 0x50, 0xca, - 0xaf, 0xfe, 0xbf, 0xfd, 0x3a, 0xc6, 0x0f, 0xef, 0x2f, 0xa8, 0xd1, 0x2b, 0x1c, 0xdc, 0x91, 0x85, - 0xee, 0x5c, 0x7b, 0xcb, 0xfb, 0x5c, 0x48, 0xfc, 0xe3, 0xc2, 0xd4, 0x90, 0x97, 0xcb, 0x47, 0xb1, - 0xf5, 0xcc, 0xdc, 0x34, 0x39, 0x35, 0x27, 0xc8, 0xcc, 0xc4, 0xfc, 0x84, 0xea, 0x5c, 0x42, 0x24, - 0xec, 0x95, 0x5e, 0x75, 0xa3, 0x7d, 0x77, 0xfb, 0xd5, 0xdb, 0xd9, 0x7f, 0xc3, 0xd8, 0xd4, 0xf7, - 0x94, 0x20, 0x2d, 0x74, 0xdd, 0x3f, 0x6a, 0xff, 0x71, 0x40, 0x35, 0x58, 0xf8, 0x5d, 0xb4, 0x9a, - 0x15, 0x4b, 0x7d, 0xbe, 0x35, 0xbf, 0xad, 0xba, 0xc1, 0x44, 0xd0, 0xc9, 0x1e, 0x26, 0x08, 0x09, - 0x1e, 0xc6, 0x90, 0x7d, 0xcb, 0x22, 0xb0, 0x57, 0x8b, 0x26, 0x53, 0x2f, 0xc1, 0x41, 0x89, 0xd2, - 0x99, 0x08, 0xbc, 0x83, 0x6e, 0xc1, 0x93, 0x94, 0x67, 0x4c, 0x37, 0x2b, 0x04, 0x49, 0xdc, 0x17, - 0x76, 0xb3, 0x67, 0x6d, 0xd4, 0xfd, 0xb7, 0xc6, 0x23, 0xe7, 0xd6, 0xee, 0xfc, 0x26, 0x5d, 0x8c, - 0xc7, 0x04, 0x35, 0x72, 0xd5, 0x8b, 0xc2, 0xae, 0xf7, 0xaa, 0x1b, 0x2d, 0xff, 0xb6, 0xea, 0xe8, - 0x23, 0x8d, 0x5c, 0x8c, 0x9c, 0xe6, 0x37, 0x30, 0xd4, 0x0b, 0x6a, 0xa2, 0xf0, 0x47, 0xa8, 0x99, - 0x0b, 0xc8, 0x62, 0x95, 0x62, 0x31, 0x07, 0x65, 0xf1, 0x8f, 0x0c, 0x4e, 0xcb, 0x08, 0x7c, 0x07, - 0x55, 0x73, 0xde, 0x37, 0x73, 0xd0, 0x36, 0x81, 0xd5, 0xa3, 0xbd, 0xaf, 0xa8, 0xc2, 0xb1, 0x8b, - 0x1a, 0x61, 0x96, 0xe4, 0xa9, 0xb0, 0x6b, 0xda, 0x1c, 0x29, 0xf3, 0xaf, 0x35, 0x42, 0xcd, 0x0e, - 0x8e, 0x51, 0x1d, 0x9e, 0xc8, 0x8c, 0xd9, 0x0d, 0x7d, 0x7f, 0x7b, 0xaf, 0xf7, 0xe4, 0x91, 0x5d, - 0xa5, 0xb5, 0x1b, 0xcb, 0x6c, 0x38, 0xbd, 0x4e, 0x8d, 0xd1, 0xc2, 0xa6, 0x03, 0x08, 0x4d, 0x63, - 0xf0, 0x4d, 0x54, 0x3d, 0x85, 0x61, 0xf1, 0xf6, 0x50, 0xf5, 0x89, 0xbf, 0x44, 0xf5, 0x01, 0x7b, - 0x9c, 0x83, 0x79, 0x82, 0x3f, 0x5c, 0x9a, 0x8f, 0x56, 0xfb, 0x5e, 0x51, 0x68, 0xc1, 0xdc, 0x5e, - 0xd9, 0xb2, 0xdc, 0xbf, 0x2c, 0xe4, 0x2c, 0x79, 0x38, 0xf1, 0x2f, 0x08, 0x05, 0x93, 0xc7, 0x48, - 0xd8, 0x96, 0x3e, 0xff, 0xce, 0xab, 0x9f, 0xbf, 0x7c, 0xd8, 0xa6, 0xbf, 0x31, 0x25, 0x24, 0xe8, - 0x8c, 0x15, 0xde, 0x44, 0xed, 0x19, 0x69, 0x7d, 0xd2, 0x35, 0xff, 0xc6, 0x78, 0xe4, 0xb4, 0x67, - 0xc4, 0xe9, 0x6c, 0x8c, 0xfb, 0x99, 0x29, 0x9b, 0x3e, 0x28, 0x76, 0x26, 0x43, 0x67, 0xe9, 0x7b, - 0x6d, 0xcd, 0x0f, 0xcd, 0x76, 0xf3, 0xb7, 0xdf, 0x9d, 0xca, 0xd3, 0xbf, 0x7b, 0x15, 0x7f, 0xf7, - 0xec, 0xbc, 0x5b, 0x79, 0x76, 0xde, 0xad, 0x3c, 0x3f, 0xef, 0x56, 0x9e, 0x8e, 0xbb, 0xd6, 0xd9, - 0xb8, 0x6b, 0x3d, 0x1b, 0x77, 0xad, 0xe7, 0xe3, 0xae, 0xf5, 0xcf, 0xb8, 0x6b, 0xfd, 0xfa, 0xa2, - 0x5b, 0xf9, 0xc1, 0x59, 0xf2, 0xdf, 0xe5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x2f, 0x11, - 0xe8, 0xdd, 0x08, 0x00, 0x00, + // 991 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0x9b, 0x3f, 0x4d, 0x26, 0xa5, 0xbb, 0x3b, 0x40, 0x65, 0x22, 0x6d, 0x1c, 0x59, 0x80, + 0xca, 0x3f, 0x9b, 0x96, 0x85, 0xad, 0x7a, 0x40, 0xe0, 0x50, 0xa1, 0x8a, 0x2e, 0x48, 0xd3, 0x16, + 0x01, 0x42, 0x62, 0xa7, 0xce, 0x5b, 0xd7, 0xdb, 0xc6, 0x36, 0x9e, 0x71, 0xd8, 0xdc, 0x56, 0xe2, + 0x0b, 0x70, 0xe4, 0xc8, 0x77, 0xe0, 0x4b, 0x94, 0x03, 0x52, 0xb9, 0xed, 0x01, 0x45, 0x34, 0xfb, + 0x2d, 0x7a, 0x42, 0x33, 0x9e, 0x38, 0x4e, 0xd2, 0x90, 0xa5, 0x2b, 0xed, 0x2d, 0xf3, 0xe6, 0xfd, + 0x7e, 0xbf, 0xf7, 0x9e, 0xdf, 0x7b, 0x13, 0x64, 0x9f, 0x6c, 0x31, 0xcb, 0x0f, 0x6d, 0x1a, 0xf9, + 0xb6, 0x0b, 0x31, 0xf7, 0x1f, 0xf8, 0x2e, 0xe5, 0xc0, 0xec, 0xde, 0xc6, 0x11, 0x70, 0xba, 0x61, + 0x7b, 0x10, 0x40, 0x4c, 0x39, 0x74, 0xac, 0x28, 0x0e, 0x79, 0x88, 0x8d, 0x14, 0x60, 0xd1, 0xc8, + 0xb7, 0xf2, 0x00, 0x4b, 0x01, 0x1a, 0xef, 0x79, 0x3e, 0x3f, 0x4e, 0x8e, 0x2c, 0x37, 0xec, 0xda, + 0x5e, 0xe8, 0x85, 0xb6, 0xc4, 0x1d, 0x25, 0x0f, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x94, 0xaf, 0x61, + 0xe6, 0x03, 0x08, 0x63, 0xb0, 0x7b, 0x33, 0x9a, 0x8d, 0x3b, 0x63, 0x9f, 0x2e, 0x75, 0x8f, 0xfd, + 0x00, 0xe2, 0xbe, 0x1d, 0x9d, 0x78, 0xc2, 0xc0, 0xec, 0x2e, 0x70, 0x7a, 0x15, 0xca, 0x9e, 0x87, + 0x8a, 0x93, 0x80, 0xfb, 0x5d, 0x98, 0x01, 0x7c, 0xb4, 0x08, 0xc0, 0xdc, 0x63, 0xe8, 0xd2, 0x69, + 0x9c, 0xf9, 0xc7, 0x12, 0x7a, 0xad, 0x3d, 0x2e, 0xc5, 0xbe, 0xef, 0x05, 0x7e, 0xe0, 0x11, 0xf8, + 0x31, 0x01, 0xc6, 0xf1, 0x7d, 0x54, 0x15, 0x11, 0x76, 0x28, 0xa7, 0xba, 0xd6, 0xd2, 0xd6, 0xeb, + 0x9b, 0xef, 0x5b, 0xe3, 0x1a, 0x66, 0x42, 0x56, 0x74, 0xe2, 0x09, 0x03, 0xb3, 0x84, 0xb7, 0xd5, + 0xdb, 0xb0, 0xbe, 0x3a, 0x7a, 0x08, 0x2e, 0xbf, 0x07, 0x9c, 0x3a, 0xf8, 0x6c, 0x60, 0x14, 0x86, + 0x03, 0x03, 0x8d, 0x6d, 0x24, 0x63, 0xc5, 0xf7, 0x51, 0x89, 0x45, 0xe0, 0xea, 0x4b, 0x92, 0xfd, + 0x63, 0x6b, 0xc1, 0x17, 0xb2, 0xe6, 0xc6, 0xba, 0x1f, 0x81, 0xeb, 0xac, 0x28, 0xad, 0x92, 0x38, + 0x11, 0xc9, 0x8c, 0x8f, 0x51, 0x85, 0x71, 0xca, 0x13, 0xa6, 0x17, 0xa5, 0xc6, 0x27, 0xcf, 0xa1, + 0x21, 0x79, 0x9c, 0x55, 0xa5, 0x52, 0x49, 0xcf, 0x44, 0xf1, 0x9b, 0x4f, 0x8b, 0xc8, 0x9c, 0x8b, + 0x6d, 0x87, 0x41, 0xc7, 0xe7, 0x7e, 0x18, 0xe0, 0x2d, 0x54, 0xe2, 0xfd, 0x08, 0x64, 0x41, 0x6b, + 0xce, 0xeb, 0xa3, 0x90, 0x0f, 0xfa, 0x11, 0x5c, 0x0e, 0x8c, 0x57, 0xa6, 0xfd, 0x85, 0x9d, 0x48, + 0x04, 0xde, 0xcb, 0x52, 0xa9, 0x48, 0xec, 0x9d, 0xc9, 0x40, 0x2e, 0x07, 0xc6, 0x15, 0x1d, 0x69, + 0x65, 0x4c, 0x93, 0xe1, 0xe2, 0x37, 0x51, 0x25, 0x06, 0xca, 0xc2, 0x40, 0x16, 0xbf, 0x36, 0x4e, + 0x8b, 0x48, 0x2b, 0x51, 0xb7, 0xf8, 0x2d, 0xb4, 0xdc, 0x05, 0xc6, 0xa8, 0x07, 0xb2, 0x82, 0x35, + 0xe7, 0x86, 0x72, 0x5c, 0xbe, 0x97, 0x9a, 0xc9, 0xe8, 0x1e, 0x3f, 0x44, 0xab, 0xa7, 0x94, 0xf1, + 0xc3, 0xa8, 0x43, 0x39, 0x1c, 0xf8, 0x5d, 0xd0, 0x4b, 0xb2, 0xe6, 0x6f, 0x3f, 0x5b, 0xd7, 0x08, + 0x84, 0xb3, 0xa6, 0xd8, 0x57, 0xf7, 0x26, 0x98, 0xc8, 0x14, 0x33, 0xee, 0x21, 0x2c, 0x2c, 0x07, + 0x31, 0x0d, 0x58, 0x5a, 0x28, 0xa1, 0x57, 0xfe, 0xdf, 0x7a, 0x0d, 0xa5, 0x87, 0xf7, 0x66, 0xd8, + 0xc8, 0x15, 0x0a, 0xe6, 0x40, 0x43, 0xb7, 0xe7, 0x7e, 0xe5, 0x3d, 0x9f, 0x71, 0xfc, 0xfd, 0xcc, + 0xd4, 0x58, 0xcf, 0x16, 0x8f, 0x40, 0xcb, 0x99, 0xb9, 0xa9, 0x62, 0xaa, 0x8e, 0x2c, 0xb9, 0x89, + 0xf9, 0x01, 0x95, 0x7d, 0x0e, 0x5d, 0xa6, 0x2f, 0xb5, 0x8a, 0xeb, 0xf5, 0xcd, 0xed, 0xeb, 0xb7, + 0xb3, 0xf3, 0x92, 0x92, 0x29, 0xef, 0x0a, 0x42, 0x92, 0xf2, 0x9a, 0xbf, 0x97, 0xfe, 0x23, 0x41, + 0x31, 0x58, 0xf8, 0x0d, 0xb4, 0x1c, 0xa7, 0x47, 0x99, 0xdf, 0x8a, 0x53, 0x17, 0xdd, 0xa0, 0x3c, + 0xc8, 0xe8, 0x0e, 0x5b, 0x08, 0x31, 0xdf, 0x0b, 0x20, 0xfe, 0x92, 0x76, 0x41, 0x5f, 0x4e, 0x9b, + 0x4c, 0x6c, 0x82, 0xfd, 0xcc, 0x4a, 0x72, 0x1e, 0xb8, 0x8d, 0x6e, 0xc1, 0xa3, 0xc8, 0x8f, 0xa9, + 0x6c, 0x56, 0x70, 0xc3, 0xa0, 0xc3, 0xf4, 0x6a, 0x4b, 0x5b, 0x2f, 0x3b, 0xaf, 0x0e, 0x07, 0xc6, + 0xad, 0x9d, 0xe9, 0x4b, 0x32, 0xeb, 0x8f, 0x2d, 0x54, 0x49, 0x44, 0x2f, 0x32, 0xbd, 0xdc, 0x2a, + 0xae, 0xd7, 0x9c, 0x35, 0xd1, 0xd1, 0x87, 0xd2, 0x72, 0x39, 0x30, 0xaa, 0x5f, 0x40, 0x5f, 0x1e, + 0x88, 0xf2, 0xc2, 0xef, 0xa2, 0x6a, 0xc2, 0x20, 0x0e, 0x44, 0x88, 0xe9, 0x1c, 0x64, 0xc5, 0x3f, + 0x54, 0x76, 0x92, 0x79, 0xe0, 0xdb, 0xa8, 0x98, 0xf8, 0x1d, 0x35, 0x07, 0x75, 0xe5, 0x58, 0x3c, + 0xdc, 0xfd, 0x8c, 0x08, 0x3b, 0x36, 0x51, 0xc5, 0x8b, 0xc3, 0x24, 0x62, 0x7a, 0x49, 0x8a, 0x23, + 0x21, 0xfe, 0xb9, 0xb4, 0x10, 0x75, 0x83, 0x03, 0x54, 0x86, 0x47, 0x3c, 0xa6, 0x7a, 0x45, 0x7e, + 0xbf, 0xdd, 0xe7, 0x5b, 0x79, 0xd6, 0x8e, 0xe0, 0xda, 0x09, 0x78, 0xdc, 0x1f, 0x7f, 0x4e, 0x69, + 0x23, 0xa9, 0x4c, 0x03, 0x10, 0x1a, 0xfb, 0xe0, 0x9b, 0xa8, 0x78, 0x02, 0xfd, 0x74, 0xf7, 0x10, + 0xf1, 0x13, 0x7f, 0x8a, 0xca, 0x3d, 0x7a, 0x9a, 0x80, 0x5a, 0xc1, 0xef, 0x2c, 0x8c, 0x47, 0xb2, + 0x7d, 0x2d, 0x20, 0x24, 0x45, 0x6e, 0x2f, 0x6d, 0x69, 0xe6, 0x9f, 0x1a, 0x32, 0x16, 0x2c, 0x4e, + 0xfc, 0x13, 0x42, 0xee, 0x68, 0x19, 0x31, 0x5d, 0x93, 0xf9, 0xb7, 0xaf, 0x9f, 0x7f, 0xb6, 0xd8, + 0xc6, 0x6f, 0x4c, 0x66, 0x62, 0x24, 0x27, 0x85, 0x37, 0x50, 0x3d, 0x47, 0x2d, 0x33, 0x5d, 0x71, + 0x6e, 0x0c, 0x07, 0x46, 0x3d, 0x47, 0x4e, 0xf2, 0x3e, 0xe6, 0x5f, 0x1a, 0xc2, 0xed, 0xd3, 0x84, + 0x71, 0x88, 0x0f, 0xe2, 0x84, 0x71, 0x27, 0x09, 0x3a, 0xa7, 0xf0, 0x02, 0x5e, 0xc4, 0x6f, 0x27, + 0x5e, 0xc4, 0xbb, 0x8b, 0xcb, 0x33, 0x13, 0xe4, 0xbc, 0xa7, 0xd0, 0x3c, 0xd7, 0xd0, 0xda, 0xac, + 0xfb, 0x0b, 0xd8, 0x59, 0xdf, 0x4c, 0xee, 0xac, 0x0f, 0xae, 0x91, 0xd4, 0x9c, 0x65, 0xf5, 0xf3, + 0x95, 0x29, 0xc9, 0x2d, 0xb5, 0x39, 0xb1, 0x7e, 0xd2, 0xd7, 0x36, 0x2b, 0xfd, 0x9c, 0x15, 0xf4, + 0x21, 0xaa, 0xf3, 0x31, 0x8d, 0x5a, 0x08, 0x2f, 0x2b, 0x50, 0x3d, 0xa7, 0x40, 0xf2, 0x7e, 0xe6, + 0x5d, 0x35, 0x63, 0x72, 0x2a, 0xb0, 0x31, 0xca, 0x56, 0x93, 0x4b, 0xa0, 0x36, 0x1d, 0xf4, 0x76, + 0xf5, 0xd7, 0xdf, 0x8c, 0xc2, 0xe3, 0xbf, 0x5b, 0x05, 0x67, 0xe7, 0xec, 0xa2, 0x59, 0x38, 0xbf, + 0x68, 0x16, 0x9e, 0x5c, 0x34, 0x0b, 0x8f, 0x87, 0x4d, 0xed, 0x6c, 0xd8, 0xd4, 0xce, 0x87, 0x4d, + 0xed, 0xc9, 0xb0, 0xa9, 0xfd, 0x33, 0x6c, 0x6a, 0xbf, 0x3c, 0x6d, 0x16, 0xbe, 0x33, 0x16, 0xfc, + 0xd1, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x17, 0xbe, 0xe3, 0x02, 0x0a, 0x0b, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { @@ -595,6 +687,129 @@ func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -755,6 +970,49 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { return n } +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m ExtraValue) Size() (n int) { if m == nil { return 0 @@ -862,6 +1120,44 @@ func (this *CertificateSigningRequestStatus) String() string { }, "") return s } +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1892,6 +2188,353 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ExtraValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto index f3ec4c06e..7c48270f6 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -190,6 +190,79 @@ message CertificateSigningRequestStatus { optional bytes certificate = 2; } +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/register.go b/tools/vendor/k8s.io/api/certificates/v1beta1/register.go index b4f3af9b9..800dccd07 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/register.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -51,6 +51,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, &CertificateSigningRequestList{}, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, ) // Add the watch version that applies diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/types.go b/tools/vendor/k8s.io/api/certificates/v1beta1/types.go index 7e5a5c198..1ce104807 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -262,3 +262,88 @@ const ( UsageMicrosoftSGC KeyUsage = "microsoft sgc" UsageNetscapeSGC KeyUsage = "netscape sgc" ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.33 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.33 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index f9ab1f13d..58c69e54d 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -75,4 +75,34 @@ func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { return map_CertificateSigningRequestStatus } +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index a315e2ac6..854e83473 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -188,6 +188,82 @@ func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequest return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ExtraValue) DeepCopyInto(out *ExtraValue) { { diff --git a/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go index 480a32936..062b46f16 100644 --- a/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/tools/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,3 +72,39 @@ func (in *CertificateSigningRequestList) APILifecycleReplacement() schema.GroupV func (in *CertificateSigningRequestList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 36 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 39 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 36 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 39 +} diff --git a/tools/vendor/k8s.io/api/coordination/v1/doc.go b/tools/vendor/k8s.io/api/coordination/v1/doc.go index 9b2fbbda3..82ae6340c 100644 --- a/tools/vendor/k8s.io/api/coordination/v1/doc.go +++ b/tools/vendor/k8s.io/api/coordination/v1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=coordination.k8s.io -package v1 // import "k8s.io/api/coordination/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/coordination/v1alpha2/doc.go b/tools/vendor/k8s.io/api/coordination/v1alpha2/doc.go index 5e6d65530..dff7df47f 100644 --- a/tools/vendor/k8s.io/api/coordination/v1alpha2/doc.go +++ b/tools/vendor/k8s.io/api/coordination/v1alpha2/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=coordination.k8s.io -package v1alpha2 // import "k8s.io/api/coordination/v1alpha2" +package v1alpha2 diff --git a/tools/vendor/k8s.io/api/coordination/v1alpha2/generated.proto b/tools/vendor/k8s.io/api/coordination/v1alpha2/generated.proto index 7e56cd7f9..250c6113e 100644 --- a/tools/vendor/k8s.io/api/coordination/v1alpha2/generated.proto +++ b/tools/vendor/k8s.io/api/coordination/v1alpha2/generated.proto @@ -92,8 +92,6 @@ message LeaseCandidateSpec { // If multiple candidates for the same Lease return different strategies, the strategy provided // by the candidate with the latest BinaryVersion will be used. If there is still conflict, // this is a user error and coordinated leader election will not operate the Lease until resolved. - // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. - // +featureGate=CoordinatedLeaderElection // +required optional string strategy = 6; } diff --git a/tools/vendor/k8s.io/api/coordination/v1alpha2/types.go b/tools/vendor/k8s.io/api/coordination/v1alpha2/types.go index 2f53b097a..13e1deb06 100644 --- a/tools/vendor/k8s.io/api/coordination/v1alpha2/types.go +++ b/tools/vendor/k8s.io/api/coordination/v1alpha2/types.go @@ -73,8 +73,6 @@ type LeaseCandidateSpec struct { // If multiple candidates for the same Lease return different strategies, the strategy provided // by the candidate with the latest BinaryVersion will be used. If there is still conflict, // this is a user error and coordinated leader election will not operate the Lease until resolved. - // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. - // +featureGate=CoordinatedLeaderElection // +required Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` } diff --git a/tools/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go index 39534e6ad..f7e29849e 100644 --- a/tools/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go @@ -54,7 +54,7 @@ var map_LeaseCandidateSpec = map[string]string{ "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.", "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", - "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.", } func (LeaseCandidateSpec) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/doc.go b/tools/vendor/k8s.io/api/coordination/v1beta1/doc.go index e733411aa..cab8becf6 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=coordination.k8s.io -package v1beta1 // import "k8s.io/api/coordination/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/tools/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index bea9b8146..52fd4167f 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -74,10 +74,94 @@ func (m *Lease) XXX_DiscardUnknown() { var xxx_messageInfo_Lease proto.InternalMessageInfo +func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } +func (*LeaseCandidate) ProtoMessage() {} +func (*LeaseCandidate) Descriptor() ([]byte, []int) { + return fileDescriptor_8d4e223b8bb23da3, []int{1} +} +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidate.Merge(m, src) +} +func (m *LeaseCandidate) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidate) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidate.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo + +func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } +func (*LeaseCandidateList) ProtoMessage() {} +func (*LeaseCandidateList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d4e223b8bb23da3, []int{2} +} +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateList.Merge(m, src) +} +func (m *LeaseCandidateList) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo + +func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } +func (*LeaseCandidateSpec) ProtoMessage() {} +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d4e223b8bb23da3, []int{3} +} +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateSpec.Merge(m, src) +} +func (m *LeaseCandidateSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo + func (m *LeaseList) Reset() { *m = LeaseList{} } func (*LeaseList) ProtoMessage() {} func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_8d4e223b8bb23da3, []int{1} + return fileDescriptor_8d4e223b8bb23da3, []int{4} } func (m *LeaseList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +189,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } func (*LeaseSpec) ProtoMessage() {} func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8d4e223b8bb23da3, []int{2} + return fileDescriptor_8d4e223b8bb23da3, []int{5} } func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,6 +216,9 @@ var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateSpec") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } @@ -141,45 +228,54 @@ func init() { } var fileDescriptor_8d4e223b8bb23da3 = []byte{ - // 600 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e, - 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1, - 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae, - 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f, - 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61, - 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d, - 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e, - 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8, - 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf, - 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe, - 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f, - 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26, - 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51, - 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51, - 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26, - 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1, - 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89, - 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73, - 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57, - 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe, - 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f, - 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83, - 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76, - 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8, - 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f, - 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66, - 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea, - 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b, - 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd, - 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc, - 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee, - 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e, - 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05, - 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee, - 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea, - 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed, - 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00, + // 750 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x4e, 0x1b, 0x39, + 0x18, 0xcd, 0x40, 0xb2, 0x9b, 0x38, 0x04, 0xb2, 0x5e, 0x56, 0x1a, 0x71, 0x31, 0x83, 0x72, 0xb1, + 0x42, 0x48, 0xeb, 0x59, 0x60, 0xb5, 0x5a, 0x6d, 0x55, 0xa9, 0x1d, 0x40, 0x2d, 0x6a, 0x68, 0x91, + 0xa1, 0x95, 0x5a, 0x21, 0xb5, 0xce, 0x8c, 0x99, 0xb8, 0x30, 0x3f, 0xf5, 0x38, 0x54, 0xb9, 0xeb, + 0x23, 0xf4, 0x69, 0x5a, 0xf5, 0x0d, 0xd2, 0x3b, 0x2e, 0xb9, 0x8a, 0xca, 0x54, 0xea, 0x43, 0xf4, + 0xaa, 0xb2, 0x33, 0xf9, 0x27, 0x22, 0x6d, 0x11, 0x77, 0xf1, 0xf7, 0x9d, 0x73, 0xfc, 0x1d, 0xfb, + 0x38, 0x1a, 0x60, 0x1d, 0xff, 0x17, 0x23, 0x16, 0x5a, 0x24, 0x62, 0x96, 0x13, 0x86, 0xdc, 0x65, + 0x01, 0x11, 0x2c, 0x0c, 0xac, 0xd3, 0xb5, 0x1a, 0x15, 0x64, 0xcd, 0xf2, 0x68, 0x40, 0x39, 0x11, + 0xd4, 0x45, 0x11, 0x0f, 0x45, 0x08, 0xcd, 0x0e, 0x01, 0x91, 0x88, 0xa1, 0x41, 0x02, 0x4a, 0x09, + 0x4b, 0x7f, 0x79, 0x4c, 0xd4, 0x1b, 0x35, 0xe4, 0x84, 0xbe, 0xe5, 0x85, 0x5e, 0x68, 0x29, 0x5e, + 0xad, 0x71, 0xa4, 0x56, 0x6a, 0xa1, 0x7e, 0x75, 0xf4, 0x96, 0x56, 0x27, 0x0f, 0x30, 0xba, 0xf7, + 0xd2, 0x3f, 0x7d, 0xac, 0x4f, 0x9c, 0x3a, 0x0b, 0x28, 0x6f, 0x5a, 0xd1, 0xb1, 0x27, 0x0b, 0xb1, + 0xe5, 0x53, 0x41, 0x2e, 0x63, 0x59, 0x93, 0x58, 0xbc, 0x11, 0x08, 0xe6, 0xd3, 0x31, 0xc2, 0xbf, + 0x57, 0x11, 0x62, 0xa7, 0x4e, 0x7d, 0x32, 0xca, 0xab, 0xbc, 0xd7, 0x40, 0xae, 0x4a, 0x49, 0x4c, + 0xe1, 0x0b, 0x90, 0x97, 0xd3, 0xb8, 0x44, 0x10, 0x5d, 0x5b, 0xd6, 0x56, 0x8a, 0xeb, 0x7f, 0xa3, + 0xfe, 0xb9, 0xf5, 0x44, 0x51, 0x74, 0xec, 0xc9, 0x42, 0x8c, 0x24, 0x1a, 0x9d, 0xae, 0xa1, 0x47, + 0xb5, 0x97, 0xd4, 0x11, 0xbb, 0x54, 0x10, 0x1b, 0xb6, 0xda, 0x66, 0x26, 0x69, 0x9b, 0xa0, 0x5f, + 0xc3, 0x3d, 0x55, 0x58, 0x05, 0xd9, 0x38, 0xa2, 0x8e, 0x3e, 0xa3, 0xd4, 0x57, 0xd1, 0x15, 0xb7, + 0x82, 0xd4, 0x5c, 0xfb, 0x11, 0x75, 0xec, 0xb9, 0x54, 0x37, 0x2b, 0x57, 0x58, 0xa9, 0x54, 0x3e, + 0x6a, 0x60, 0x5e, 0x21, 0x36, 0x49, 0xe0, 0x32, 0x97, 0x88, 0x9b, 0xb0, 0xf0, 0x78, 0xc8, 0xc2, + 0xc6, 0x74, 0x16, 0x7a, 0x03, 0x4e, 0xf4, 0xd2, 0xd2, 0x00, 0x1c, 0x86, 0x56, 0x59, 0x2c, 0xe0, + 0xe1, 0x98, 0x1f, 0x34, 0x9d, 0x1f, 0xc9, 0x56, 0x6e, 0xca, 0xe9, 0x66, 0xf9, 0x6e, 0x65, 0xc0, + 0xcb, 0x01, 0xc8, 0x31, 0x41, 0xfd, 0x58, 0x9f, 0x59, 0x9e, 0x5d, 0x29, 0xae, 0x5b, 0xdf, 0x69, + 0xc6, 0x2e, 0xa5, 0xda, 0xb9, 0x1d, 0xa9, 0x82, 0x3b, 0x62, 0x95, 0x2f, 0xb3, 0xa3, 0x56, 0xa4, + 0x4f, 0x68, 0x81, 0xc2, 0x89, 0xac, 0x3e, 0x24, 0x3e, 0x55, 0x5e, 0x0a, 0xf6, 0x6f, 0x29, 0xbf, + 0x50, 0xed, 0x36, 0x70, 0x1f, 0x03, 0x9f, 0x82, 0x7c, 0xc4, 0x02, 0xef, 0x80, 0xf9, 0x34, 0x3d, + 0x6d, 0x6b, 0x3a, 0xef, 0xbb, 0xcc, 0xe1, 0xa1, 0xa4, 0xd9, 0x73, 0xd2, 0xf8, 0x5e, 0x2a, 0x82, + 0x7b, 0x72, 0xf0, 0x10, 0x14, 0x38, 0x0d, 0xe8, 0x6b, 0xa5, 0x3d, 0xfb, 0x63, 0xda, 0x25, 0x39, + 0x38, 0xee, 0xaa, 0xe0, 0xbe, 0x20, 0xbc, 0x05, 0x4a, 0x35, 0x16, 0x10, 0xde, 0x7c, 0x42, 0x79, + 0xcc, 0xc2, 0x40, 0xcf, 0x2a, 0xb7, 0x7f, 0xa4, 0x6e, 0x4b, 0xf6, 0x60, 0x13, 0x0f, 0x63, 0xe1, + 0x16, 0x28, 0x53, 0xbf, 0x71, 0xa2, 0xce, 0xbd, 0xcb, 0xcf, 0x29, 0xbe, 0x9e, 0xf2, 0xcb, 0xdb, + 0x23, 0x7d, 0x3c, 0xc6, 0x80, 0x0e, 0xc8, 0xc7, 0x42, 0xbe, 0x72, 0xaf, 0xa9, 0xff, 0xa2, 0xd8, + 0xf7, 0xba, 0x39, 0xd8, 0x4f, 0xeb, 0x5f, 0xdb, 0xe6, 0xc6, 0xe4, 0x7f, 0x31, 0xb4, 0xd9, 0x5d, + 0x53, 0xb7, 0xf3, 0x0a, 0x53, 0x1a, 0xee, 0x09, 0x57, 0xde, 0x69, 0xa0, 0x73, 0x73, 0x37, 0x10, + 0xd5, 0x07, 0xc3, 0x51, 0xfd, 0x73, 0xba, 0xa8, 0x4e, 0x48, 0xe8, 0x87, 0x6c, 0x3a, 0xb8, 0x0a, + 0xe6, 0xff, 0x60, 0xbe, 0x1e, 0x9e, 0xb8, 0x94, 0xef, 0xb8, 0x34, 0x10, 0x4c, 0x34, 0xd3, 0x74, + 0xc2, 0xa4, 0x6d, 0xce, 0xdf, 0x1f, 0xea, 0xe0, 0x11, 0x24, 0xac, 0x82, 0x45, 0x15, 0xd8, 0xad, + 0x06, 0x57, 0xdb, 0xef, 0x53, 0x27, 0x0c, 0xdc, 0x58, 0xe5, 0x35, 0x67, 0xeb, 0x49, 0xdb, 0x5c, + 0xac, 0x5e, 0xd2, 0xc7, 0x97, 0xb2, 0x60, 0x0d, 0x14, 0x89, 0xf3, 0xaa, 0xc1, 0x38, 0xfd, 0x99, + 0x60, 0x2e, 0x24, 0x6d, 0xb3, 0x78, 0xb7, 0xaf, 0x83, 0x07, 0x45, 0x87, 0xa3, 0x9f, 0xbd, 0xee, + 0xe8, 0xdf, 0x01, 0x65, 0xe5, 0xec, 0x80, 0x93, 0x20, 0x66, 0xd2, 0x5b, 0xac, 0xd2, 0x9b, 0xb3, + 0x17, 0x65, 0x72, 0xab, 0x23, 0x3d, 0x3c, 0x86, 0x86, 0xcf, 0xc7, 0x92, 0xbb, 0x79, 0xad, 0xa9, + 0x85, 0xb7, 0xc1, 0x42, 0xc4, 0xe9, 0x11, 0xe5, 0x9c, 0xba, 0x9d, 0xdb, 0xd5, 0x7f, 0x55, 0xfb, + 0xfc, 0x9e, 0xb4, 0xcd, 0x85, 0xbd, 0xe1, 0x16, 0x1e, 0xc5, 0xda, 0xdb, 0xad, 0x0b, 0x23, 0x73, + 0x76, 0x61, 0x64, 0xce, 0x2f, 0x8c, 0xcc, 0x9b, 0xc4, 0xd0, 0x5a, 0x89, 0xa1, 0x9d, 0x25, 0x86, + 0x76, 0x9e, 0x18, 0xda, 0xa7, 0xc4, 0xd0, 0xde, 0x7e, 0x36, 0x32, 0xcf, 0xcc, 0x2b, 0x3e, 0x50, + 0xbe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xff, 0x56, 0x51, 0x57, 0xc2, 0x08, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -225,6 +321,163 @@ func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0x32 + i -= len(m.EmulationVersion) + copy(dAtA[i:], m.EmulationVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.BinaryVersion) + copy(dAtA[i:], m.BinaryVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion))) + i-- + dAtA[i] = 0x22 + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PingTime != nil { + { + size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LeaseName) + copy(dAtA[i:], m.LeaseName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -374,6 +627,61 @@ func (m *Lease) Size() (n int) { return n } +func (m *LeaseCandidate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseCandidateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseCandidateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LeaseName) + n += 1 + l + sovGenerated(uint64(l)) + if m.PingTime != nil { + l = m.PingTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.BinaryVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EmulationVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *LeaseList) Size() (n int) { if m == nil { return 0 @@ -443,6 +751,48 @@ func (this *Lease) String() string { }, "") return s } +func (this *LeaseCandidate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LeaseCandidate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseCandidateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidateSpec{`, + `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`, + `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, + `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `}`, + }, "") + return s +} func (this *LeaseList) String() string { if this == nil { return "nil" @@ -599,6 +949,489 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } return nil } +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LeaseCandidate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingTime == nil { + m.PingTime = &v1.MicroTime{} + } + if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinaryVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmulationVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LeaseList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/tools/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 088811a74..7ca043f52 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -41,6 +41,75 @@ message Lease { optional LeaseSpec spec = 2; } +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +message LeaseCandidate { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseCandidateSpec spec = 2; +} + +// LeaseCandidateList is a list of Lease objects. +message LeaseCandidateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated LeaseCandidate items = 2; +} + +// LeaseCandidateSpec is a specification of a Lease. +message LeaseCandidateSpec { + // LeaseName is the name of the lease for which this candidate is contending. + // The limits on this field are the same as on Lease.name. Multiple lease candidates + // may reference the same Lease.name. + // This field is immutable. + // +required + optional string leaseName = 1; + + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2; + + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; + + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required. + // +required + optional string binaryVersion = 4; + + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string emulationVersion = 5; + + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. + // +required + optional string strategy = 6; +} + // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/register.go b/tools/vendor/k8s.io/api/coordination/v1beta1/register.go index 85efaa64e..bd0016423 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/register.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/register.go @@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Lease{}, &LeaseList{}, + &LeaseCandidate{}, + &LeaseCandidateList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/types.go b/tools/vendor/k8s.io/api/coordination/v1beta1/types.go index d63fc30a9..781d29efc 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -91,3 +91,76 @@ type LeaseList struct { // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +type LeaseCandidate struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseCandidateSpec is a specification of a Lease. +type LeaseCandidateSpec struct { + // LeaseName is the name of the lease for which this candidate is contending. + // The limits on this field are the same as on Lease.name. Multiple lease candidates + // may reference the same Lease.name. + // This field is immutable. + // +required + LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"` + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"` + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required. + // +required + BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"` + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. + // +required + Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// LeaseCandidateList is a list of Lease objects. +type LeaseCandidateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 50fe8ea18..35812b77f 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -37,6 +37,40 @@ func (Lease) SwaggerDoc() map[string]string { return map_Lease } +var map_LeaseCandidate = map[string]string{ + "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LeaseCandidate) SwaggerDoc() map[string]string { + return map_LeaseCandidate +} + +var map_LeaseCandidateList = map[string]string{ + "": "LeaseCandidateList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (LeaseCandidateList) SwaggerDoc() map[string]string { + return map_LeaseCandidateList +} + +var map_LeaseCandidateSpec = map[string]string{ + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. The limits on this field are the same as on Lease.name. Multiple lease candidates may reference the same Lease.name. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved.", +} + +func (LeaseCandidateSpec) SwaggerDoc() map[string]string { + return map_LeaseCandidateSpec +} + var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index dcef1e346..b990ee247 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -53,6 +53,90 @@ func (in *Lease) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate. +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate { + if in == nil { + return nil + } + out := new(LeaseCandidate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LeaseCandidate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList. +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList { + if in == nil { + return nil + } + out := new(LeaseCandidateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { + *out = *in + if in.PingTime != nil { + in, out := &in.PingTime, &out.PingTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec. +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec { + if in == nil { + return nil + } + out := new(LeaseCandidateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in diff --git a/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go b/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go index 18926aa10..73636edfa 100644 --- a/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/tools/vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go @@ -49,6 +49,42 @@ func (in *Lease) APILifecycleRemoved() (major, minor int) { return 1, 22 } +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { + return 1, 36 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { + return 1, 39 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { + return 1, 36 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { + return 1, 39 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *LeaseList) APILifecycleIntroduced() (major, minor int) { diff --git a/tools/vendor/k8s.io/api/core/v1/doc.go b/tools/vendor/k8s.io/api/core/v1/doc.go index bc0041b33..e4e9196ae 100644 --- a/tools/vendor/k8s.io/api/core/v1/doc.go +++ b/tools/vendor/k8s.io/api/core/v1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName= // Package v1 is the v1 version of the core API. -package v1 // import "k8s.io/api/core/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/core/v1/generated.pb.go b/tools/vendor/k8s.io/api/core/v1/generated.pb.go index 9d466c6d7..a4b8f5842 100644 --- a/tools/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/tools/vendor/k8s.io/api/core/v1/generated.pb.go @@ -3213,10 +3213,38 @@ func (m *NodeStatus) XXX_DiscardUnknown() { var xxx_messageInfo_NodeStatus proto.InternalMessageInfo +func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} } +func (*NodeSwapStatus) ProtoMessage() {} +func (*NodeSwapStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{113} +} +func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSwapStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSwapStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSwapStatus.Merge(m, src) +} +func (m *NodeSwapStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeSwapStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSwapStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo + func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3272,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3300,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3328,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3692,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3720,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{139} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{144} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{175} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } func (*ResourceHealth) ProtoMessage() {} func (*ResourceHealth) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{181} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5876,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5904,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5932,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5960,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5988,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6016,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6044,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6072,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6128,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6156,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6184,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6212,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6240,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6268,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{222} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,7 +6296,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } func (*VolumeMountStatus) ProtoMessage() {} func (*VolumeMountStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{222} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6296,7 +6324,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{223} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6324,7 +6352,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{224} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6352,7 +6380,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{225} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6380,7 +6408,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{226} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6408,7 +6436,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{227} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6436,7 +6464,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{228} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6464,7 +6492,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{229} + return fileDescriptor_6c07b07c062484ab, []int{230} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6617,6 +6645,7 @@ func init() { proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSwapStatus)(nil), "k8s.io.api.core.v1.NodeSwapStatus") proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") @@ -6758,1015 +6787,1020 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 16114 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9, - 0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee, - 0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74, - 0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32, - 0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08, - 0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e, - 0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82, - 0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd, - 0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0, - 0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c, - 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38, - 0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6, - 0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb, - 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24, - 0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e, - 0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17, - 0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42, - 0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b, - 0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89, - 0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb, - 0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb, - 0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26, - 0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18, - 0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04, - 0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e, - 0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18, - 0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67, - 0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7, - 0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9, - 0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04, - 0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43, - 0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd, - 0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22, - 0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40, - 0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02, - 0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18, - 0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2, - 0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b, - 0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25, - 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77, - 0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a, - 0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b, - 0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15, - 0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa, - 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29, - 0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9, - 0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60, - 0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c, - 0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b, - 0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b, - 0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d, - 0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49, - 0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7, - 0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9, - 0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d, - 0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf, - 0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57, - 0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f, - 0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91, - 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38, - 0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56, - 0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa, - 0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3, - 0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d, - 0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc, - 0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56, - 0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9, - 0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7, - 0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6, - 0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f, - 0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab, - 0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc, - 0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2, - 0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4, - 0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6, - 0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57, - 0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71, - 0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b, - 0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, - 0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46, - 0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43, - 0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e, - 0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15, - 0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf, - 0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f, - 0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d, - 0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f, - 0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, - 0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48, - 0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11, - 0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40, - 0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff, - 0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31, - 0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, - 0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45, - 0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2, - 0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd, - 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b, - 0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c, - 0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61, - 0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4, - 0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26, - 0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea, - 0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b, - 0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31, - 0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46, - 0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54, - 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18, - 0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1, - 0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f, - 0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3, - 0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58, - 0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73, - 0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc, - 0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f, - 0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d, - 0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19, - 0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70, - 0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5, - 0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06, - 0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12, - 0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47, - 0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c, - 0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d, - 0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8, - 0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b, - 0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39, - 0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18, - 0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f, - 0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58, - 0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99, - 0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d, - 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b, - 0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7, - 0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37, - 0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76, - 0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43, - 0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75, - 0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca, - 0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6, - 0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2, - 0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c, - 0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1, - 0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f, - 0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2, - 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c, - 0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa, - 0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99, - 0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6, - 0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29, - 0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab, - 0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52, - 0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca, - 0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca, - 0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25, - 0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4, - 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c, - 0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12, - 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75, - 0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e, - 0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14, - 0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a, - 0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab, - 0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c, - 0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85, - 0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83, - 0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70, - 0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59, - 0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58, - 0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5, - 0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96, - 0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09, - 0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb, - 0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda, - 0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88, - 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc, - 0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2, - 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75, - 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19, - 0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5, - 0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf, - 0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d, - 0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9, - 0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49, - 0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb, - 0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4, - 0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, - 0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2, - 0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75, - 0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf, - 0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee, - 0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d, - 0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71, - 0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52, - 0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31, - 0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4, - 0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6, - 0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed, - 0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44, - 0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1, - 0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, - 0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, - 0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3, - 0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0, - 0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd, - 0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c, - 0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5, - 0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44, - 0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09, - 0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, - 0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e, - 0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0, - 0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5, - 0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65, - 0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67, - 0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80, - 0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5, - 0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa, - 0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a, - 0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f, - 0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64, - 0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82, - 0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a, - 0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32, - 0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f, - 0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a, - 0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3, - 0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07, - 0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c, - 0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e, - 0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd, - 0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05, - 0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09, - 0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83, - 0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1, - 0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca, - 0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac, - 0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43, - 0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69, - 0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49, - 0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69, - 0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14, - 0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1, - 0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60, - 0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a, - 0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e, - 0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4, - 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c, - 0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7, - 0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa, - 0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d, - 0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55, - 0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8, - 0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20, - 0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4, - 0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37, - 0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d, - 0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4, - 0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68, - 0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05, - 0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07, - 0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff, - 0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d, - 0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28, - 0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8, - 0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c, - 0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e, - 0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d, - 0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d, - 0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46, - 0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0, - 0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1, - 0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09, - 0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b, - 0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99, - 0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb, - 0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee, - 0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28, - 0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66, - 0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63, - 0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02, - 0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21, - 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90, - 0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88, - 0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35, - 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30, - 0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8, - 0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7, - 0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79, - 0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5, - 0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18, - 0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72, - 0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e, - 0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8, - 0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31, - 0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88, - 0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39, - 0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3, - 0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5, - 0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31, - 0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, - 0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a, - 0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e, - 0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5, - 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc, - 0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab, - 0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb, - 0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9, - 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa, - 0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05, - 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f, - 0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda, - 0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11, - 0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01, - 0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae, - 0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a, - 0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b, - 0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee, - 0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76, - 0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d, - 0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7, - 0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8, - 0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab, - 0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5, - 0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c, - 0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda, - 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e, - 0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e, - 0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e, - 0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07, - 0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5, - 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91, - 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9, - 0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a, - 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb, - 0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00, - 0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30, - 0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5, - 0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74, - 0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15, - 0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb, - 0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc, - 0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85, - 0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2, - 0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22, - 0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06, - 0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93, - 0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47, - 0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4, - 0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69, - 0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c, - 0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e, - 0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5, - 0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1, - 0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b, - 0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f, - 0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, - 0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce, - 0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55, - 0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60, - 0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18, - 0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, - 0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79, - 0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, - 0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43, - 0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4, - 0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57, - 0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e, - 0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, - 0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3, - 0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98, - 0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed, - 0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72, - 0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f, - 0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07, - 0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9, - 0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09, - 0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d, - 0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71, - 0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, - 0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76, - 0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf, - 0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82, - 0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, - 0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, - 0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72, - 0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd, - 0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, - 0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f, - 0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d, - 0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, - 0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, - 0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, - 0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde, - 0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, - 0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, - 0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85, - 0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8, - 0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45, - 0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6, - 0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d, - 0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, - 0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5, - 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c, - 0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce, - 0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc, - 0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b, - 0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41, - 0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2, - 0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab, - 0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4, - 0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee, - 0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79, - 0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf, - 0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97, - 0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3, - 0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30, - 0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc, - 0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16, - 0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43, - 0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0, - 0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10, - 0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38, - 0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11, - 0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c, - 0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d, - 0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6, - 0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb, - 0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, - 0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, - 0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1, - 0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, - 0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, - 0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68, - 0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, - 0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, - 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, - 0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, - 0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3, - 0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96, - 0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31, - 0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59, - 0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4, - 0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7, - 0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8, - 0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, - 0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c, - 0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76, - 0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1, - 0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9, - 0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12, - 0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61, - 0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79, - 0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, - 0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7, - 0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5, - 0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, - 0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, - 0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4, - 0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe, - 0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79, - 0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44, - 0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9, - 0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f, - 0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29, - 0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78, - 0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28, - 0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb, - 0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c, - 0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16, - 0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30, - 0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d, - 0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74, - 0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2, - 0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff, - 0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39, - 0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47, - 0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77, - 0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1, - 0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb, - 0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae, - 0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe, - 0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6, - 0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0, - 0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b, - 0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6, - 0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54, - 0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac, - 0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55, - 0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62, - 0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b, - 0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3, - 0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49, - 0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4, - 0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b, - 0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82, - 0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82, - 0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f, - 0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50, - 0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1, - 0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5, - 0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1, - 0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f, - 0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03, - 0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39, - 0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01, - 0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c, - 0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb, - 0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58, - 0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6, - 0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90, - 0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3, - 0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d, - 0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04, - 0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, - 0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53, - 0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a, - 0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc, - 0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09, - 0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75, - 0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d, - 0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50, - 0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44, - 0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce, - 0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6, - 0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05, - 0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb, - 0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd, - 0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92, - 0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a, - 0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a, - 0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64, - 0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83, - 0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, - 0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f, - 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3, - 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f, - 0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb, - 0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06, - 0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9, - 0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89, - 0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f, - 0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9, - 0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4, - 0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, - 0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f, - 0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03, - 0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03, - 0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03, - 0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7, - 0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41, - 0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6, - 0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a, - 0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9, - 0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b, - 0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8, - 0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7, - 0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6, - 0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77, - 0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86, - 0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa, - 0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd, - 0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93, - 0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4, - 0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a, - 0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4, - 0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a, - 0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54, - 0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e, - 0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5, - 0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30, - 0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb, - 0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e, - 0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63, - 0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, - 0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c, - 0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb, - 0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0, - 0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85, - 0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60, - 0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9, - 0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c, - 0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f, - 0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad, - 0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf, - 0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53, - 0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d, - 0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39, - 0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f, - 0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, - 0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8, - 0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, - 0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, - 0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, - 0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba, - 0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14, - 0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36, - 0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b, - 0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05, - 0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d, - 0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1, - 0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23, - 0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15, - 0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d, - 0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d, - 0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4, - 0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70, - 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7, - 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39, - 0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22, - 0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e, - 0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09, - 0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82, - 0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1, - 0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30, - 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56, - 0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c, - 0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40, - 0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37, - 0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6, - 0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6, - 0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8, - 0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae, - 0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d, - 0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10, - 0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18, - 0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3, - 0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb, - 0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c, - 0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc, - 0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e, - 0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62, - 0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98, - 0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d, - 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47, - 0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76, - 0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8, - 0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75, - 0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93, - 0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, - 0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22, - 0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1, - 0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1, - 0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9, - 0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea, - 0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d, - 0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40, - 0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c, - 0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5, - 0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27, - 0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a, - 0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, - 0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3, - 0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c, - 0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60, - 0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c, - 0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f, - 0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9, - 0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9, - 0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad, - 0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc, - 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a, - 0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75, - 0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0, - 0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35, - 0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43, - 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62, - 0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d, - 0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40, - 0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed, - 0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8, - 0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24, - 0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49, - 0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53, - 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f, - 0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49, - 0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37, - 0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea, - 0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c, - 0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a, - 0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe, - 0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d, - 0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6, - 0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0, - 0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19, - 0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e, - 0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d, - 0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c, - 0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92, - 0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51, - 0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7, - 0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4, - 0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a, - 0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09, - 0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b, - 0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66, - 0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a, - 0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, - 0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01, - 0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc, - 0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39, - 0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74, - 0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd, - 0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7, - 0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47, - 0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea, - 0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71, - 0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d, - 0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62, - 0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b, - 0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e, - 0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d, - 0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad, - 0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2, - 0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a, - 0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35, - 0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e, - 0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3, - 0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb, - 0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc, - 0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a, - 0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69, - 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef, - 0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd, - 0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02, - 0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50, - 0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa, - 0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87, - 0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e, - 0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c, - 0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29, - 0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57, - 0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc, - 0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, - 0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44, - 0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0, - 0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, - 0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d, - 0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b, - 0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05, - 0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d, - 0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11, - 0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d, - 0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01, - 0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13, - 0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69, - 0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38, - 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1, - 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78, - 0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f, - 0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47, - 0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84, - 0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8, - 0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6, - 0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, - 0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5, - 0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd, - 0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f, - 0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74, - 0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2, - 0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b, - 0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26, - 0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28, - 0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec, - 0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7, - 0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7, - 0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76, - 0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08, - 0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad, - 0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55, - 0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82, - 0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a, - 0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51, - 0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda, - 0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73, - 0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7, - 0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27, - 0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5, - 0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e, - 0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, - 0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35, - 0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59, - 0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84, - 0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e, - 0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70, - 0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71, - 0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68, - 0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26, - 0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a, - 0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90, - 0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46, - 0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6, - 0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67, - 0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27, - 0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03, - 0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10, - 0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9, - 0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93, - 0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40, - 0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b, - 0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83, - 0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75, - 0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67, - 0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73, - 0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb, - 0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d, - 0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf, - 0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae, - 0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c, - 0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71, - 0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc, - 0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02, - 0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83, - 0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a, - 0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba, - 0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a, - 0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c, - 0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc, - 0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b, - 0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2, - 0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3, - 0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf, - 0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d, - 0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40, - 0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71, - 0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64, - 0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72, - 0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f, - 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58, - 0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43, - 0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8, - 0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66, - 0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58, - 0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a, - 0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2, - 0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33, - 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0, - 0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1, - 0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60, - 0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c, - 0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e, - 0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e, - 0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08, - 0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42, - 0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03, - 0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6, - 0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1, - 0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb, - 0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32, - 0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3, - 0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc, - 0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e, - 0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda, - 0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6, - 0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d, - 0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09, - 0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6, - 0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58, - 0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c, - 0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd, - 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a, - 0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a, - 0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15, - 0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e, - 0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3, - 0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a, - 0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23, - 0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf, - 0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11, - 0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc, - 0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98, - 0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f, - 0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3, - 0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8, - 0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb, - 0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98, - 0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21, - 0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce, - 0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46, - 0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75, - 0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44, - 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5, - 0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39, - 0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb, - 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9, - 0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9, - 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a, - 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07, - 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9, - 0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5, - 0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5, - 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29, - 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff, - 0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5, - 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d, - 0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda, - 0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4, - 0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99, - 0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c, - 0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84, - 0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e, - 0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34, - 0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46, - 0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6, - 0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c, - 0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c, - 0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a, - 0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6, - 0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe, - 0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7, - 0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4, - 0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5, - 0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b, - 0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc, - 0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3, - 0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6, - 0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb, - 0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d, - 0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27, - 0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac, - 0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8, - 0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94, - 0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59, - 0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae, - 0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf, - 0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b, - 0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f, - 0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf, - 0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71, - 0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e, - 0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2, - 0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3, - 0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3, - 0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e, - 0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c, - 0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e, - 0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a, - 0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d, - 0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3, - 0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8, - 0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38, - 0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61, - 0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd, - 0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1, - 0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59, - 0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5, - 0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38, - 0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7, - 0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5, - 0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a, - 0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb, - 0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d, - 0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89, - 0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f, - 0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5, - 0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99, - 0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16, - 0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14, - 0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81, - 0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e, - 0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15, - 0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59, - 0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f, - 0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2, - 0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9, - 0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, - 0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15, - 0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58, - 0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60, - 0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22, - 0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95, - 0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38, - 0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6, - 0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8, - 0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77, - 0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a, - 0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69, - 0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, - 0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, - 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, - 0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e, - 0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47, - 0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, - 0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6, - 0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59, - 0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, - 0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, - 0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35, - 0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9, - 0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c, - 0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, - 0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, - 0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, - 0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8, - 0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78, - 0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3, - 0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea, - 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89, - 0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a, - 0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34, - 0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb, - 0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0, - 0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6, - 0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f, - 0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff, - 0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3, - 0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24, - 0x01, 0x00, + // 16206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, + 0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b, + 0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde, + 0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa, + 0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a, + 0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe, + 0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64, + 0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97, + 0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1, + 0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10, + 0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72, + 0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6, + 0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec, + 0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4, + 0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f, + 0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd, + 0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d, + 0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7, + 0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37, + 0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7, + 0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74, + 0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86, + 0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88, + 0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48, + 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6, + 0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb, + 0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e, + 0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f, + 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b, + 0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41, + 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6, + 0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c, + 0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f, + 0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64, + 0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, + 0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91, + 0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2, + 0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61, + 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44, + 0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, + 0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f, + 0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b, + 0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37, + 0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7, + 0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c, + 0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5, + 0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a, + 0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8, + 0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb, + 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a, + 0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48, + 0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44, + 0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6, + 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3, + 0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04, + 0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07, + 0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27, + 0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2, + 0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39, + 0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c, + 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48, + 0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77, + 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f, + 0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c, + 0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a, + 0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98, + 0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f, + 0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70, + 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd, + 0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8, + 0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70, + 0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd, + 0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74, + 0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a, + 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76, + 0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24, + 0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b, + 0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c, + 0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f, + 0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67, + 0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f, + 0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, + 0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53, + 0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, + 0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53, + 0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, + 0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89, + 0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30, + 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0, + 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41, + 0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, + 0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70, + 0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58, + 0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b, + 0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2, + 0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35, + 0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c, + 0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c, + 0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2, + 0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad, + 0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d, + 0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e, + 0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c, + 0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a, + 0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4, + 0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda, + 0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b, + 0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee, + 0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c, + 0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb, + 0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26, + 0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24, + 0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f, + 0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1, + 0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18, + 0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c, + 0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba, + 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a, + 0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e, + 0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44, + 0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b, + 0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba, + 0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66, + 0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc, + 0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20, + 0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c, + 0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe, + 0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4, + 0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5, + 0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f, + 0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f, + 0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48, + 0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22, + 0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0, + 0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab, + 0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb, + 0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba, + 0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30, + 0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c, + 0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09, + 0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05, + 0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36, + 0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d, + 0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9, + 0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, + 0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f, + 0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24, + 0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7, + 0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e, + 0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38, + 0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f, + 0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14, + 0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75, + 0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf, + 0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0, + 0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed, + 0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61, + 0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66, + 0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d, + 0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba, + 0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55, + 0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28, + 0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84, + 0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad, + 0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40, + 0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1, + 0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1, + 0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b, + 0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52, + 0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9, + 0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a, + 0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76, + 0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a, + 0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b, + 0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17, + 0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b, + 0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30, + 0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b, + 0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca, + 0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49, + 0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a, + 0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7, + 0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23, + 0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44, + 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab, + 0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5, + 0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79, + 0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f, + 0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9, + 0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11, + 0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05, + 0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2, + 0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8, + 0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10, + 0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e, + 0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5, + 0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb, + 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86, + 0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3, + 0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32, + 0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4, + 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6, + 0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9, + 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50, + 0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d, + 0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b, + 0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d, + 0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e, + 0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f, + 0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f, + 0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75, + 0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a, + 0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69, + 0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10, + 0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1, + 0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee, + 0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e, + 0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb, + 0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94, + 0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5, + 0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89, + 0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70, + 0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4, + 0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86, + 0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9, + 0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09, + 0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22, + 0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58, + 0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10, + 0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d, + 0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15, + 0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca, + 0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3, + 0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1, + 0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1, + 0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf, + 0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36, + 0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b, + 0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25, + 0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf, + 0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, + 0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb, + 0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f, + 0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54, + 0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3, + 0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c, + 0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a, + 0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9, + 0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28, + 0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce, + 0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba, + 0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2, + 0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe, + 0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3, + 0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44, + 0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82, + 0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22, + 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d, + 0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e, + 0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18, + 0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0, + 0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8, + 0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d, + 0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c, + 0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99, + 0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb, + 0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf, + 0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09, + 0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b, + 0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06, + 0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41, + 0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e, + 0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d, + 0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11, + 0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad, + 0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, + 0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, + 0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18, + 0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba, + 0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3, + 0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e, + 0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01, + 0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1, + 0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2, + 0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1, + 0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e, + 0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f, + 0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4, + 0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe, + 0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12, + 0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c, + 0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, + 0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98, + 0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4, + 0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9, + 0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf, + 0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88, + 0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b, + 0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f, + 0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab, + 0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e, + 0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4, + 0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16, + 0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93, + 0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4, + 0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15, + 0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, + 0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12, + 0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb, + 0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a, + 0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16, + 0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd, + 0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67, + 0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3, + 0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, + 0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd, + 0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8, + 0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, + 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf, + 0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d, + 0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3, + 0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86, + 0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec, + 0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69, + 0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf, + 0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d, + 0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, + 0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed, + 0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, + 0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd, + 0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1, + 0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c, + 0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, + 0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75, + 0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56, + 0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64, + 0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a, + 0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a, + 0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d, + 0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1, + 0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0, + 0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf, + 0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53, + 0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34, + 0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59, + 0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c, + 0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85, + 0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21, + 0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6, + 0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42, + 0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd, + 0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2, + 0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57, + 0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8, + 0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb, + 0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab, + 0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10, + 0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24, + 0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26, + 0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb, + 0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa, + 0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61, + 0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50, + 0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e, + 0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6, + 0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0, + 0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f, + 0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a, + 0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad, + 0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d, + 0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf, + 0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c, + 0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea, + 0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a, + 0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23, + 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57, + 0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb, + 0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96, + 0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07, + 0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75, + 0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1, + 0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43, + 0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65, + 0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a, + 0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08, + 0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd, + 0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00, + 0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64, + 0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf, + 0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d, + 0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2, + 0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25, + 0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f, + 0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7, + 0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1, + 0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02, + 0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9, + 0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe, + 0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82, + 0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03, + 0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a, + 0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40, + 0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe, + 0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91, + 0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67, + 0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c, + 0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8, + 0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69, + 0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12, + 0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf, + 0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda, + 0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93, + 0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac, + 0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc, + 0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20, + 0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35, + 0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40, + 0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd, + 0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c, + 0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04, + 0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3, + 0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a, + 0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2, + 0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07, + 0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75, + 0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47, + 0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f, + 0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d, + 0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05, + 0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba, + 0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20, + 0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7, + 0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5, + 0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e, + 0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32, + 0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29, + 0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12, + 0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a, + 0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1, + 0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb, + 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd, + 0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13, + 0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00, + 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74, + 0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f, + 0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82, + 0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17, + 0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d, + 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6, + 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed, + 0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb, + 0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1, + 0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28, + 0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33, + 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43, + 0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4, + 0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e, + 0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32, + 0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef, + 0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23, + 0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c, + 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99, + 0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd, + 0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d, + 0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1, + 0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, + 0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7, + 0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48, + 0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1, + 0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02, + 0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34, + 0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d, + 0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70, + 0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa, + 0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52, + 0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8, + 0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f, + 0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49, + 0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd, + 0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64, + 0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5, + 0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c, + 0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c, + 0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49, + 0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6, + 0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e, + 0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c, + 0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d, + 0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb, + 0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5, + 0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8, + 0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69, + 0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1, + 0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e, + 0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd, + 0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63, + 0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca, + 0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf, + 0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52, + 0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff, + 0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29, + 0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46, + 0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49, + 0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13, + 0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19, + 0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c, + 0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90, + 0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32, + 0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47, + 0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7, + 0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b, + 0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70, + 0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14, + 0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34, + 0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18, + 0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e, + 0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78, + 0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64, + 0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9, + 0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5, + 0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85, + 0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd, + 0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5, + 0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45, + 0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9, + 0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf, + 0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b, + 0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b, + 0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1, + 0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b, + 0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd, + 0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79, + 0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b, + 0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee, + 0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45, + 0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91, + 0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b, + 0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a, + 0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89, + 0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, + 0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d, + 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29, + 0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f, + 0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90, + 0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92, + 0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32, + 0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58, + 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80, + 0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef, + 0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69, + 0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0, + 0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7, + 0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15, + 0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1, + 0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5, + 0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc, + 0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91, + 0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37, + 0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51, + 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50, + 0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97, + 0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61, + 0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72, + 0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62, + 0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5, + 0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec, + 0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc, + 0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62, + 0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce, + 0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49, + 0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7, + 0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9, + 0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e, + 0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7, + 0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71, + 0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55, + 0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0, + 0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54, + 0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5, + 0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00, + 0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5, + 0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36, + 0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37, + 0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e, + 0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6, + 0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2, + 0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0, + 0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5, + 0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7, + 0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf, + 0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8, + 0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce, + 0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e, + 0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff, + 0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa, + 0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9, + 0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7, + 0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31, + 0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b, + 0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda, + 0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12, + 0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1, + 0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77, + 0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70, + 0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56, + 0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f, + 0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a, + 0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6, + 0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc, + 0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f, + 0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b, + 0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c, + 0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5, + 0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25, + 0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62, + 0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f, + 0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2, + 0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a, + 0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c, + 0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8, + 0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8, + 0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f, + 0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43, + 0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00, + 0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61, + 0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb, + 0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51, + 0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba, + 0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd, + 0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d, + 0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89, + 0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b, + 0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f, + 0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5, + 0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea, + 0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66, + 0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73, + 0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6, + 0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2, + 0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2, + 0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d, + 0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70, + 0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13, + 0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53, + 0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8, + 0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7, + 0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26, + 0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a, + 0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6, + 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45, + 0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2, + 0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9, + 0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75, + 0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60, + 0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d, + 0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae, + 0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02, + 0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3, + 0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7, + 0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef, + 0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63, + 0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3, + 0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8, + 0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78, + 0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89, + 0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2, + 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a, + 0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81, + 0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0, + 0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, + 0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57, + 0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb, + 0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0, + 0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73, + 0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde, + 0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e, + 0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, + 0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76, + 0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38, + 0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34, + 0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9, + 0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71, + 0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67, + 0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99, + 0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86, + 0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66, + 0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f, + 0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89, + 0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95, + 0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f, + 0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f, + 0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef, + 0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba, + 0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee, + 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40, + 0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25, + 0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae, + 0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5, + 0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9, + 0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe, + 0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a, + 0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd, + 0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57, + 0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60, + 0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c, + 0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb, + 0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4, + 0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70, + 0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37, + 0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15, + 0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76, + 0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90, + 0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66, + 0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90, + 0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42, + 0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23, + 0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73, + 0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3, + 0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c, + 0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c, + 0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f, + 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f, + 0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d, + 0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58, + 0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12, + 0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0, + 0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0, + 0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92, + 0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a, + 0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3, + 0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65, + 0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd, + 0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7, + 0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72, + 0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f, + 0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f, + 0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55, + 0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe, + 0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26, + 0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5, + 0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56, + 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76, + 0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d, + 0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5, + 0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7, + 0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63, + 0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94, + 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0, + 0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f, + 0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2, + 0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9, + 0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24, + 0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde, + 0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc, + 0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98, + 0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d, + 0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c, + 0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0, + 0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9, + 0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80, + 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a, + 0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a, + 0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94, + 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd, + 0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, + 0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89, + 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd, + 0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c, + 0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba, + 0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce, + 0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64, + 0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19, + 0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c, + 0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40, + 0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b, + 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2, + 0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5, + 0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf, + 0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5, + 0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94, + 0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98, + 0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7, + 0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f, + 0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35, + 0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab, + 0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf, + 0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52, + 0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c, + 0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac, + 0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b, + 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8, + 0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c, + 0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8, + 0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb, + 0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6, + 0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5, + 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1, + 0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd, + 0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6, + 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb, + 0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63, + 0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf, + 0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe, + 0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa, + 0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09, + 0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64, + 0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e, + 0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92, + 0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf, + 0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86, + 0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e, + 0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa, + 0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb, + 0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9, + 0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32, + 0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47, + 0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99, + 0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, + 0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3, + 0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43, + 0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80, + 0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa, + 0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89, + 0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5, + 0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33, + 0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53, + 0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d, + 0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf, + 0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb, + 0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29, + 0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10, + 0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e, + 0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8, + 0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca, + 0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84, + 0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37, + 0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0, + 0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83, + 0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90, + 0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0, + 0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20, + 0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13, + 0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae, + 0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83, + 0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7, + 0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18, + 0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27, + 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e, + 0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8, + 0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22, + 0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15, + 0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4, + 0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84, + 0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77, + 0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee, + 0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40, + 0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29, + 0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e, + 0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53, + 0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69, + 0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31, + 0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f, + 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33, + 0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61, + 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff, + 0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee, + 0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39, + 0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7, + 0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0, + 0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22, + 0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5, + 0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b, + 0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61, + 0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1, + 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2, + 0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a, + 0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff, + 0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba, + 0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38, + 0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57, + 0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5, + 0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a, + 0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf, + 0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00, + 0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c, + 0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23, + 0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, + 0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29, + 0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38, + 0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67, + 0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24, + 0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a, + 0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f, + 0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, + 0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2, + 0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9, + 0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0, + 0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96, + 0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24, + 0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca, + 0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf, + 0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4, + 0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78, + 0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98, + 0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3, + 0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59, + 0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f, + 0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29, + 0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e, + 0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80, + 0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30, + 0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e, + 0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a, + 0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, + 0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c, + 0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, + 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce, + 0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e, + 0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e, + 0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd, + 0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43, + 0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35, + 0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a, + 0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42, + 0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7, + 0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e, + 0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f, + 0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a, + 0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff, + 0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99, + 0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7, + 0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65, + 0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2, + 0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea, + 0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20, + 0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d, + 0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25, + 0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a, + 0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67, + 0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59, + 0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20, + 0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0, + 0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28, + 0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39, + 0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae, + 0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73, + 0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65, + 0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58, + 0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90, + 0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39, + 0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06, + 0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f, + 0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d, + 0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc, + 0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e, + 0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6, + 0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9, + 0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6, + 0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4, + 0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f, + 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6, + 0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55, + 0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a, + 0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4, + 0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6, + 0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b, + 0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5, + 0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a, + 0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf, + 0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c, + 0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55, + 0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53, + 0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd, + 0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44, + 0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52, + 0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8, + 0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7, + 0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04, + 0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8, + 0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06, + 0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59, + 0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7, + 0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8, + 0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53, + 0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49, + 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8, + 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f, + 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4, + 0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f, + 0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f, + 0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe, + 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f, + 0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34, + 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e, + 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b, + 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85, + 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57, + 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b, + 0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87, + 0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d, + 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b, + 0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae, + 0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49, + 0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16, + 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf, + 0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d, + 0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05, + 0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09, + 0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea, + 0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde, + 0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f, + 0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5, + 0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -9887,6 +9921,13 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StopSignal != nil { + i -= len(*m.StopSignal) + copy(dAtA[i:], *m.StopSignal) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal))) + i-- + dAtA[i] = 0x7a + } if len(m.AllocatedResourcesStatus) > 0 { for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- { { @@ -12258,6 +12299,13 @@ func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StopSignal != nil { + i -= len(*m.StopSignal) + copy(dAtA[i:], *m.StopSignal) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StopSignal))) + i-- + dAtA[i] = 0x1a + } if m.PreStop != nil { { size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) @@ -14135,6 +14183,34 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeSwapStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSwapStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSwapStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Capacity != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Capacity)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14155,6 +14231,18 @@ func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } i -= len(m.Architecture) copy(dAtA[i:], m.Architecture) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) @@ -15723,6 +15811,9 @@ func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x38 i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -16994,6 +17085,11 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 if len(m.HostIPs) > 0 { for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- { { @@ -22542,6 +22638,10 @@ func (m *ContainerStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.StopSignal != nil { + l = len(*m.StopSignal) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23382,6 +23482,10 @@ func (m *Lifecycle) Size() (n int) { l = m.PreStop.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.StopSignal != nil { + l = len(*m.StopSignal) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24067,6 +24171,18 @@ func (m *NodeStatus) Size() (n int) { return n } +func (m *NodeSwapStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capacity != nil { + n += 1 + sovGenerated(uint64(*m.Capacity)) + } + return n +} + func (m *NodeSystemInfo) Size() (n int) { if m == nil { return 0 @@ -24093,6 +24209,10 @@ func (m *NodeSystemInfo) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Architecture) n += 1 + l + sovGenerated(uint64(l)) + if m.Swap != nil { + l = m.Swap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24650,6 +24770,7 @@ func (m *PodCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) return n } @@ -25174,6 +25295,7 @@ func (m *PodStatus) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + n += 2 + sovGenerated(uint64(m.ObservedGeneration)) return n } @@ -27457,6 +27579,7 @@ func (this *ContainerStatus) String() string { `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, `User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`, `AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`, + `StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`, `}`, }, "") return s @@ -28080,6 +28203,7 @@ func (this *Lifecycle) String() string { s := strings.Join([]string{`&Lifecycle{`, `PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, `PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`, + `StopSignal:` + valueToStringGenerated(this.StopSignal) + `,`, `}`, }, "") return s @@ -28658,6 +28782,16 @@ func (this *NodeStatus) String() string { }, "") return s } +func (this *NodeSwapStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSwapStatus{`, + `Capacity:` + valueToStringGenerated(this.Capacity) + `,`, + `}`, + }, "") + return s +} func (this *NodeSystemInfo) String() string { if this == nil { return "nil" @@ -28673,6 +28807,7 @@ func (this *NodeSystemInfo) String() string { `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "NodeSwapStatus", "NodeSwapStatus", 1) + `,`, `}`, }, "") return s @@ -29045,6 +29180,7 @@ func (this *PodCondition) String() string { `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `}`, }, "") return s @@ -29427,6 +29563,7 @@ func (this *PodStatus) String() string { `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`, `HostIPs:` + repeatedStringForHostIPs + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `}`, }, "") return s @@ -37794,88 +37931,122 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resources == nil { - m.Resources = &ResourceRequirements{} - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.User == nil { - m.User = &ContainerUser{} - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &ContainerUser{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) + if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37885,25 +38056,24 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) - if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := Signal(dAtA[iNdEx:postIndex]) + m.StopSignal = &s iNdEx = postIndex default: iNdEx = preIndex @@ -45056,6 +45226,39 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := Signal(dAtA[iNdEx:postIndex]) + m.StopSignal = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50743,6 +50946,76 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *NodeSwapStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSwapStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSwapStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Capacity = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -51092,6 +51365,42 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } m.Architecture = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &NodeSwapStatus{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -56087,6 +56396,25 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -60340,6 +60668,25 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/core/v1/generated.proto b/tools/vendor/k8s.io/api/core/v1/generated.proto index 08706987c..9b48fb1c3 100644 --- a/tools/vendor/k8s.io/api/core/v1/generated.proto +++ b/tools/vendor/k8s.io/api/core/v1/generated.proto @@ -1103,6 +1103,11 @@ message ContainerStatus { // +listType=map // +listMapKey=name repeated ResourceStatus allocatedResourcesStatus = 14; + + // StopSignal reports the effective stop signal for this container + // +featureGate=ContainerStopSignals + // +optional + optional string stopSignal = 15; } // ContainerUser represents user identity information @@ -1194,6 +1199,7 @@ message EmptyDirVolumeSource { } // EndpointAddress is a tuple that describes single IP address. +// Deprecated: This API is deprecated in v1.33+. // +structType=atomic message EndpointAddress { // The IP of this endpoint. @@ -1215,6 +1221,7 @@ message EndpointAddress { } // EndpointPort is a tuple that describes a single port. +// Deprecated: This API is deprecated in v1.33+. // +structType=atomic message EndpointPort { // The name of this port. This must match the 'name' field in the @@ -1265,6 +1272,8 @@ message EndpointPort { // // a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], // b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// Deprecated: This API is deprecated in v1.33+. message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -1298,6 +1307,11 @@ message EndpointSubset { // Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] // }, // ] +// +// Endpoints is a legacy API and does not contain information about all Service features. +// Use discoveryv1.EndpointSlice for complete information about Service endpoints. +// +// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice. message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -1317,6 +1331,7 @@ message Endpoints { } // EndpointsList is a list of endpoints. +// Deprecated: This API is deprecated in v1.33+. message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds @@ -1327,9 +1342,9 @@ message EndpointsList { repeated Endpoints items = 2; } -// EnvFromSource represents the source of a set of ConfigMaps +// EnvFromSource represents the source of a set of ConfigMaps or Secrets message EnvFromSource { - // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. // +optional optional string prefix = 1; @@ -2198,6 +2213,12 @@ message Lifecycle { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional LifecycleHandler preStop = 2; + + // StopSignal defines which signal will be sent to a container when it is being stopped. + // If not specified, the default is defined by the container runtime in use. + // StopSignal can only be set for Pods with a non-empty .spec.os.name + // +optional + optional string stopSignal = 3; } // LifecycleHandler defines a specific action that should be taken in a lifecycle @@ -2862,6 +2883,13 @@ message NodeStatus { optional NodeFeatures features = 13; } +// NodeSwapStatus represents swap memory information. +message NodeSwapStatus { + // Total amount of swap memory in bytes. + // +optional + optional int64 capacity = 1; +} + // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. message NodeSystemInfo { // MachineID reported by the node. For unique machine identification @@ -2897,6 +2925,9 @@ message NodeSystemInfo { // The Architecture reported by the node optional string architecture = 10; + + // Swap Info reported by the node. + optional NodeSwapStatus swap = 11; } // ObjectFieldSelector selects an APIVersioned field of an object. @@ -3615,7 +3646,6 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). // // +listType=atomic // +optional @@ -3629,7 +3659,6 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). // // +listType=atomic // +optional @@ -3702,6 +3731,12 @@ message PodCondition { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions optional string type = 1; + // If set, this represents the .metadata.generation that the pod condition was set based upon. + // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. + // +featureGate=PodObservedGenerationTracking + // +optional + optional int64 observedGeneration = 7; + // Status is the status of the condition. // Can be True, False, Unknown. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions @@ -4138,7 +4173,7 @@ message PodSpec { // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers + // that value or the sum of the normal containers. Limits are applied to init containers // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. @@ -4487,6 +4522,12 @@ message PodSpec { // state of a system, especially if the node that hosts the pod cannot contact the control // plane. message PodStatus { + // If set, this represents the .metadata.generation that the pod status was set based upon. + // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. + // +featureGate=PodObservedGenerationTracking + // +optional + optional int64 observedGeneration = 17; + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. @@ -4618,6 +4659,9 @@ message PodStatus { // Status of resources resize desired for pod's containers. // It is empty if no resources resize is pending. // Any changes to container resources will automatically set this to "Proposed" + // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. + // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. + // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources. // +featureGate=InPlacePodVerticalScaling // +optional optional string resize = 14; @@ -5063,12 +5107,18 @@ message ReplicationControllerSpec { // Defaults to 1. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller // +optional + // +k8s:optional + // +default=1 + // +k8s:minimum=0 optional int32 replicas = 1; // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 optional int32 minReadySeconds = 4; // Selector is a label query over pods that should match the Replicas count. @@ -6110,13 +6160,12 @@ message ServiceSpec { // +optional optional string internalTrafficPolicy = 22; - // TrafficDistribution offers a way to express preferences for how traffic is - // distributed to Service endpoints. Implementations can use this field as a - // hint, but are not required to guarantee strict adherence. If the field is - // not set, the implementation will apply its default routing strategy. If set - // to "PreferClose", implementations should prioritize endpoints that are - // topologically close (e.g., same zone). - // This is a beta field and requires enabling ServiceTrafficDistribution feature. + // TrafficDistribution offers a way to express preferences for how traffic + // is distributed to Service endpoints. Implementations can use this field + // as a hint, but are not required to guarantee strict adherence. If the + // field is not set, the implementation will apply its default routing + // strategy. If set to "PreferClose", implementations should prioritize + // endpoints that are in the same zone. // +featureGate=ServiceTrafficDistribution // +optional optional string trafficDistribution = 23; @@ -6411,7 +6460,6 @@ message TopologySpreadConstraint { // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. // // If this value is nil, the behavior is equivalent to the Honor policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. // +optional optional string nodeAffinityPolicy = 6; @@ -6422,7 +6470,6 @@ message TopologySpreadConstraint { // - Ignore: node taints are ignored. All nodes are included. // // If this value is nil, the behavior is equivalent to the Ignore policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. // +optional optional string nodeTaintsPolicy = 7; @@ -6854,7 +6901,7 @@ message VolumeSource { // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. // The volume will be mounted read-only (ro) and non-executable files (noexec). - // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. // +featureGate=ImageVolume // +optional diff --git a/tools/vendor/k8s.io/api/core/v1/lifecycle.go b/tools/vendor/k8s.io/api/core/v1/lifecycle.go index 21ca90e81..21b931b67 100644 --- a/tools/vendor/k8s.io/api/core/v1/lifecycle.go +++ b/tools/vendor/k8s.io/api/core/v1/lifecycle.go @@ -16,6 +16,10 @@ limitations under the License. package v1 +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison. func (in *ComponentStatus) APILifecycleIntroduced() (major, minor int) { return 1, 0 @@ -35,3 +39,23 @@ func (in *ComponentStatusList) APILifecycleIntroduced() (major, minor int) { func (in *ComponentStatusList) APILifecycleDeprecated() (major, minor int) { return 1, 19 } + +// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +func (in *Endpoints) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleReplacement returns the GVK of the replacement for the given API +func (in *Endpoints) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"} +} + +// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +func (in *EndpointsList) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleReplacement returns the GVK of the replacement for the given API +func (in *EndpointsList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSliceList"} +} diff --git a/tools/vendor/k8s.io/api/core/v1/types.go b/tools/vendor/k8s.io/api/core/v1/types.go index fb2c1c745..f7641e485 100644 --- a/tools/vendor/k8s.io/api/core/v1/types.go +++ b/tools/vendor/k8s.io/api/core/v1/types.go @@ -217,7 +217,7 @@ type VolumeSource struct { // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. // The volume will be mounted read-only (ro) and non-executable files (noexec). - // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. // +featureGate=ImageVolume // +optional @@ -2437,9 +2437,9 @@ type SecretKeySelector struct { Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } -// EnvFromSource represents the source of a set of ConfigMaps +// EnvFromSource represents the source of a set of ConfigMaps or Secrets type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER. // +optional Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` // The ConfigMap to select from @@ -2980,6 +2980,78 @@ type LifecycleHandler struct { Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } +// Signal defines the stop signal of containers +// +enum +type Signal string + +const ( + SIGABRT Signal = "SIGABRT" + SIGALRM Signal = "SIGALRM" + SIGBUS Signal = "SIGBUS" + SIGCHLD Signal = "SIGCHLD" + SIGCLD Signal = "SIGCLD" + SIGCONT Signal = "SIGCONT" + SIGFPE Signal = "SIGFPE" + SIGHUP Signal = "SIGHUP" + SIGILL Signal = "SIGILL" + SIGINT Signal = "SIGINT" + SIGIO Signal = "SIGIO" + SIGIOT Signal = "SIGIOT" + SIGKILL Signal = "SIGKILL" + SIGPIPE Signal = "SIGPIPE" + SIGPOLL Signal = "SIGPOLL" + SIGPROF Signal = "SIGPROF" + SIGPWR Signal = "SIGPWR" + SIGQUIT Signal = "SIGQUIT" + SIGSEGV Signal = "SIGSEGV" + SIGSTKFLT Signal = "SIGSTKFLT" + SIGSTOP Signal = "SIGSTOP" + SIGSYS Signal = "SIGSYS" + SIGTERM Signal = "SIGTERM" + SIGTRAP Signal = "SIGTRAP" + SIGTSTP Signal = "SIGTSTP" + SIGTTIN Signal = "SIGTTIN" + SIGTTOU Signal = "SIGTTOU" + SIGURG Signal = "SIGURG" + SIGUSR1 Signal = "SIGUSR1" + SIGUSR2 Signal = "SIGUSR2" + SIGVTALRM Signal = "SIGVTALRM" + SIGWINCH Signal = "SIGWINCH" + SIGXCPU Signal = "SIGXCPU" + SIGXFSZ Signal = "SIGXFSZ" + SIGRTMIN Signal = "SIGRTMIN" + SIGRTMINPLUS1 Signal = "SIGRTMIN+1" + SIGRTMINPLUS2 Signal = "SIGRTMIN+2" + SIGRTMINPLUS3 Signal = "SIGRTMIN+3" + SIGRTMINPLUS4 Signal = "SIGRTMIN+4" + SIGRTMINPLUS5 Signal = "SIGRTMIN+5" + SIGRTMINPLUS6 Signal = "SIGRTMIN+6" + SIGRTMINPLUS7 Signal = "SIGRTMIN+7" + SIGRTMINPLUS8 Signal = "SIGRTMIN+8" + SIGRTMINPLUS9 Signal = "SIGRTMIN+9" + SIGRTMINPLUS10 Signal = "SIGRTMIN+10" + SIGRTMINPLUS11 Signal = "SIGRTMIN+11" + SIGRTMINPLUS12 Signal = "SIGRTMIN+12" + SIGRTMINPLUS13 Signal = "SIGRTMIN+13" + SIGRTMINPLUS14 Signal = "SIGRTMIN+14" + SIGRTMINPLUS15 Signal = "SIGRTMIN+15" + SIGRTMAXMINUS14 Signal = "SIGRTMAX-14" + SIGRTMAXMINUS13 Signal = "SIGRTMAX-13" + SIGRTMAXMINUS12 Signal = "SIGRTMAX-12" + SIGRTMAXMINUS11 Signal = "SIGRTMAX-11" + SIGRTMAXMINUS10 Signal = "SIGRTMAX-10" + SIGRTMAXMINUS9 Signal = "SIGRTMAX-9" + SIGRTMAXMINUS8 Signal = "SIGRTMAX-8" + SIGRTMAXMINUS7 Signal = "SIGRTMAX-7" + SIGRTMAXMINUS6 Signal = "SIGRTMAX-6" + SIGRTMAXMINUS5 Signal = "SIGRTMAX-5" + SIGRTMAXMINUS4 Signal = "SIGRTMAX-4" + SIGRTMAXMINUS3 Signal = "SIGRTMAX-3" + SIGRTMAXMINUS2 Signal = "SIGRTMAX-2" + SIGRTMAXMINUS1 Signal = "SIGRTMAX-1" + SIGRTMAX Signal = "SIGRTMAX" +) + // Lifecycle describes actions that the management system should take in response to container lifecycle // events. For the PostStart and PreStop lifecycle handlers, management of the container blocks // until the action is complete, unless the container process fails, in which case the handler is aborted. @@ -3001,6 +3073,11 @@ type Lifecycle struct { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` + // StopSignal defines which signal will be sent to a container when it is being stopped. + // If not specified, the default is defined by the container runtime in use. + // StopSignal can only be set for Pods with a non-empty .spec.os.name + // +optional + StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,3,opt,name=stopSignal"` } type ConditionStatus string @@ -3154,6 +3231,10 @@ type ContainerStatus struct { // +listType=map // +listMapKey=name AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` + // StopSignal reports the effective stop signal for this container + // +featureGate=ContainerStopSignals + // +optional + StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,15,opt,name=stopSignal"` } // ResourceStatus represents the status of a single resource allocated to a Pod. @@ -3278,6 +3359,17 @@ const ( // PodReadyToStartContainers pod sandbox is successfully configured and // the pod is ready to launch containers. PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" + // PodResizePending indicates that the pod has been resized, but kubelet has not + // yet allocated the resources. If both PodResizePending and PodResizeInProgress + // are set, it means that a new resize was requested in the middle of a previous + // pod resize that is still in progress. + PodResizePending PodConditionType = "PodResizePending" + // PodResizeInProgress indicates that a resize is in progress, and is present whenever + // the Kubelet has allocated resources for the resize, but has not yet actuated all of + // the required changes. + // If both PodResizePending and PodResizeInProgress are set, it means that a new resize was + // requested in the middle of a previous pod resize that is still in progress. + PodResizeInProgress PodConditionType = "PodResizeInProgress" ) // These are reasons for a pod's transition to a condition. @@ -3301,6 +3393,18 @@ const ( // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the // disruption was initiated by scheduler's preemption. PodReasonPreemptionByScheduler = "PreemptionByScheduler" + + // PodReasonDeferred reason in PodResizePending pod condition indicates the proposed resize is feasible in + // theory (it fits on this node) but is not possible right now. + PodReasonDeferred = "Deferred" + + // PodReasonInfeasible reason in PodResizePending pod condition indicates the proposed resize is not + // feasible and is rejected; it may not be re-evaluated + PodReasonInfeasible = "Infeasible" + + // PodReasonError reason in PodResizeInProgress pod condition indicates that an error occurred while + // actuating the resize. + PodReasonError = "Error" ) // PodCondition contains details for the current condition of this pod. @@ -3308,6 +3412,11 @@ type PodCondition struct { // Type is the type of the condition. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` + // If set, this represents the .metadata.generation that the pod condition was set based upon. + // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. + // +featureGate=PodObservedGenerationTracking + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"` // Status is the status of the condition. // Can be True, False, Unknown. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions @@ -3326,12 +3435,10 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } -// PodResizeStatus shows status of desired resize of a pod's containers. +// Deprecated: PodResizeStatus shows status of desired resize of a pod's containers. type PodResizeStatus string const ( - // Pod resources resize has been requested and will be evaluated by node. - PodResizeStatusProposed PodResizeStatus = "Proposed" // Pod resources resize has been accepted by node and is being actuated. PodResizeStatusInProgress PodResizeStatus = "InProgress" // Node cannot resize the pod at this time and will keep retrying. @@ -3627,7 +3734,6 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). // // +listType=atomic // +optional @@ -3640,7 +3746,6 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). // // +listType=atomic // +optional @@ -3792,7 +3897,7 @@ type PodSpec struct { // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers + // that value or the sum of the normal containers. Limits are applied to init containers // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. @@ -4301,7 +4406,6 @@ type TopologySpreadConstraint struct { // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. // // If this value is nil, the behavior is equivalent to the Honor policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. // +optional NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"` // NodeTaintsPolicy indicates how we will treat node taints when calculating @@ -4311,7 +4415,6 @@ type TopologySpreadConstraint struct { // - Ignore: node taints are ignored. All nodes are included. // // If this value is nil, the behavior is equivalent to the Ignore policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. // +optional NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"` // MatchLabelKeys is a set of pod label keys to select the pods over which @@ -4841,6 +4944,11 @@ type EphemeralContainer struct { // state of a system, especially if the node that hosts the pod cannot contact the control // plane. type PodStatus struct { + // If set, this represents the .metadata.generation that the pod status was set based upon. + // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field. + // +featureGate=PodObservedGenerationTracking + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,17,opt,name=observedGeneration"` // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. @@ -4968,6 +5076,9 @@ type PodStatus struct { // Status of resources resize desired for pod's containers. // It is empty if no resources resize is pending. // Any changes to container resources will automatically set this to "Proposed" + // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. + // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. + // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources. // +featureGate=InPlacePodVerticalScaling // +optional Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` @@ -5099,12 +5210,18 @@ type ReplicationControllerSpec struct { // Defaults to 1. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller // +optional + // +k8s:optional + // +default=1 + // +k8s:minimum=0 Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional + // +k8s:optional + // +default=0 + // +k8s:minimum=0 MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` // Selector is a label query over pods that should match the Replicas count. @@ -5334,14 +5451,27 @@ const ( // These are valid values for the TrafficDistribution field of a Service. const ( - // Indicates a preference for routing traffic to endpoints that are - // topologically proximate to the client. The interpretation of "topologically - // proximate" may vary across implementations and could encompass endpoints - // within the same node, rack, zone, or even region. Setting this value gives - // implementations permission to make different tradeoffs, e.g. optimizing for - // proximity rather than equal distribution of load. Users should not set this - // value if such tradeoffs are not acceptable. + // Indicates a preference for routing traffic to endpoints that are in the same + // zone as the client. Users should not set this value unless they have ensured + // that clients and endpoints are distributed in such a way that the "same zone" + // preference will not result in endpoints getting overloaded. ServiceTrafficDistributionPreferClose = "PreferClose" + + // Indicates a preference for routing traffic to endpoints that are in the same + // zone as the client. Users should not set this value unless they have ensured + // that clients and endpoints are distributed in such a way that the "same zone" + // preference will not result in endpoints getting overloaded. + // This is an alias for "PreferClose", but it is an Alpha feature and is only + // recognized if the PreferSameTrafficDistribution feature gate is enabled. + ServiceTrafficDistributionPreferSameZone = "PreferSameZone" + + // Indicates a preference for routing traffic to endpoints that are on the same + // node as the client. Users should not set this value unless they have ensured + // that clients and endpoints are distributed in such a way that the "same node" + // preference will not result in endpoints getting overloaded. + // This is an Alpha feature and is only recognized if the + // PreferSameTrafficDistribution feature gate is enabled. + ServiceTrafficDistributionPreferSameNode = "PreferSameNode" ) // These are the valid conditions of a service. @@ -5689,13 +5819,12 @@ type ServiceSpec struct { // +optional InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` - // TrafficDistribution offers a way to express preferences for how traffic is - // distributed to Service endpoints. Implementations can use this field as a - // hint, but are not required to guarantee strict adherence. If the field is - // not set, the implementation will apply its default routing strategy. If set - // to "PreferClose", implementations should prioritize endpoints that are - // topologically close (e.g., same zone). - // This is a beta field and requires enabling ServiceTrafficDistribution feature. + // TrafficDistribution offers a way to express preferences for how traffic + // is distributed to Service endpoints. Implementations can use this field + // as a hint, but are not required to guarantee strict adherence. If the + // field is not set, the implementation will apply its default routing + // strategy. If set to "PreferClose", implementations should prioritize + // endpoints that are in the same zone. // +featureGate=ServiceTrafficDistribution // +optional TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` @@ -5888,6 +6017,11 @@ type ServiceAccountList struct { // Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] // }, // ] +// +// Endpoints is a legacy API and does not contain information about all Service features. +// Use discoveryv1.EndpointSlice for complete information about Service endpoints. +// +// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice. type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -5920,6 +6054,8 @@ type Endpoints struct { // // a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], // b: [ 10.10.1.1:309, 10.10.2.2:309 ] +// +// Deprecated: This API is deprecated in v1.33+. type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. @@ -5939,6 +6075,7 @@ type EndpointSubset struct { } // EndpointAddress is a tuple that describes single IP address. +// Deprecated: This API is deprecated in v1.33+. // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. @@ -5957,6 +6094,7 @@ type EndpointAddress struct { } // EndpointPort is a tuple that describes a single port. +// Deprecated: This API is deprecated in v1.33+. // +structType=atomic type EndpointPort struct { // The name of this port. This must match the 'name' field in the @@ -5998,6 +6136,7 @@ type EndpointPort struct { // +k8s:prerelease-lifecycle-gen:introduced=1.0 // EndpointsList is a list of endpoints. +// Deprecated: This API is deprecated in v1.33+. type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. @@ -6166,6 +6305,15 @@ type NodeSystemInfo struct { OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` // The Architecture reported by the node Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` + // Swap Info reported by the node. + Swap *NodeSwapStatus `json:"swap,omitempty" protobuf:"bytes,11,opt,name=swap"` +} + +// NodeSwapStatus represents swap memory information. +type NodeSwapStatus struct { + // Total amount of swap memory in bytes. + // +optional + Capacity *int64 `json:"capacity,omitempty" protobuf:"varint,1,opt,name=capacity"` } // NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. @@ -7267,6 +7415,9 @@ const ( ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" // Match all pod objects that have cross-namespace pod (anti)affinity mentioned. ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity" + + // Match all pvc objects that have volume attributes class mentioned. + ResourceQuotaScopeVolumeAttributesClass ResourceQuotaScope = "VolumeAttributesClass" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. diff --git a/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 89ce3d230..9e987eefd 100644 --- a/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -474,6 +474,7 @@ var map_ContainerStatus = map[string]string{ "volumeMounts": "Status of volume mounts.", "user": "User represents user identity information initially attached to the first process of the container", "allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", + "stopSignal": "StopSignal reports the effective stop signal for this container", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -540,7 +541,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { } var map_EndpointAddress = map[string]string{ - "": "EndpointAddress is a tuple that describes single IP address.", + "": "EndpointAddress is a tuple that describes single IP address. Deprecated: This API is deprecated in v1.33+.", "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", @@ -552,7 +553,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string { } var map_EndpointPort = map[string]string{ - "": "EndpointPort is a tuple that describes a single port.", + "": "EndpointPort is a tuple that describes a single port. Deprecated: This API is deprecated in v1.33+.", "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", @@ -564,7 +565,7 @@ func (EndpointPort) SwaggerDoc() map[string]string { } var map_EndpointSubset = map[string]string{ - "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]\n\nDeprecated: This API is deprecated in v1.33+.", "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", "ports": "Port numbers available on the related IP addresses.", @@ -575,7 +576,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { } var map_Endpoints = map[string]string{ - "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]\n\nEndpoints is a legacy API and does not contain information about all Service features. Use discoveryv1.EndpointSlice for complete information about Service endpoints.\n\nDeprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -585,7 +586,7 @@ func (Endpoints) SwaggerDoc() map[string]string { } var map_EndpointsList = map[string]string{ - "": "EndpointsList is a list of endpoints.", + "": "EndpointsList is a list of endpoints. Deprecated: This API is deprecated in v1.33+.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -595,8 +596,8 @@ func (EndpointsList) SwaggerDoc() map[string]string { } var map_EnvFromSource = map[string]string{ - "": "EnvFromSource represents the source of a set of ConfigMaps", - "prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "": "EnvFromSource represents the source of a set of ConfigMaps or Secrets", + "prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.", "configMapRef": "The ConfigMap to select from", "secretRef": "The Secret to select from", } @@ -957,9 +958,10 @@ func (KeyToPath) SwaggerDoc() map[string]string { } var map_Lifecycle = map[string]string{ - "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "stopSignal": "StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -1335,6 +1337,15 @@ func (NodeStatus) SwaggerDoc() map[string]string { return map_NodeStatus } +var map_NodeSwapStatus = map[string]string{ + "": "NodeSwapStatus represents swap memory information.", + "capacity": "Total amount of swap memory in bytes.", +} + +func (NodeSwapStatus) SwaggerDoc() map[string]string { + return map_NodeSwapStatus +} + var map_NodeSystemInfo = map[string]string{ "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", @@ -1347,6 +1358,7 @@ var map_NodeSystemInfo = map[string]string{ "kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.", "operatingSystem": "The Operating System reported by the node", "architecture": "The Architecture reported by the node", + "swap": "Swap Info reported by the node.", } func (NodeSystemInfo) SwaggerDoc() map[string]string { @@ -1583,8 +1595,8 @@ var map_PodAffinityTerm = map[string]string{ "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", - "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set.", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1617,6 +1629,7 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", "lastProbeTime": "Last time we probed the condition.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", @@ -1799,7 +1812,7 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", @@ -1846,6 +1859,7 @@ func (PodSpec) SwaggerDoc() map[string]string { var map_PodStatus = map[string]string{ "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.", "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", "message": "A human readable message indicating details about why the pod is in this condition.", @@ -1860,7 +1874,7 @@ var map_PodStatus = map[string]string{ "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.", "resourceClaimStatuses": "Status of resource claims.", } @@ -2487,7 +2501,7 @@ var map_ServiceSpec = map[string]string{ "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", - "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.", + "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2619,8 +2633,8 @@ var map_TopologySpreadConstraint = map[string]string{ "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", - "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", - "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.", + "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.", "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } @@ -2760,7 +2774,7 @@ var map_VolumeSource = map[string]string{ "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.", "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", - "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", + "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 3f669092e..619c52542 100644 --- a/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1055,6 +1055,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.StopSignal != nil { + in, out := &in.StopSignal, &out.StopSignal + *out = new(Signal) + **out = **in + } return } @@ -2101,6 +2106,11 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { *out = new(LifecycleHandler) (*in).DeepCopyInto(*out) } + if in.StopSignal != nil { + in, out := &in.StopSignal, &out.StopSignal + *out = new(Signal) + **out = **in + } return } @@ -3002,7 +3012,7 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { copy(*out, *in) } out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo + in.NodeInfo.DeepCopyInto(&out.NodeInfo) if in.Images != nil { in, out := &in.Images, &out.Images *out = make([]ContainerImage, len(*in)) @@ -3050,9 +3060,35 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus. +func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus { + if in == nil { + return nil + } + out := new(NodeSwapStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { *out = *in + if in.Swap != nil { + in, out := &in.Swap, &out.Swap + *out = new(NodeSwapStatus) + (*in).DeepCopyInto(*out) + } return } diff --git a/tools/vendor/k8s.io/api/discovery/v1/doc.go b/tools/vendor/k8s.io/api/discovery/v1/doc.go index 01913669f..43e30b7f4 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/doc.go +++ b/tools/vendor/k8s.io/api/discovery/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io -package v1 // import "k8s.io/api/discovery/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/discovery/v1/generated.pb.go b/tools/vendor/k8s.io/api/discovery/v1/generated.pb.go index 5792481dc..443ff8f8f 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/generated.pb.go +++ b/tools/vendor/k8s.io/api/discovery/v1/generated.pb.go @@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() { var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo +func (m *ForNode) Reset() { *m = ForNode{} } +func (*ForNode) ProtoMessage() {} +func (*ForNode) Descriptor() ([]byte, []int) { + return fileDescriptor_2237b452324cf77e, []int{6} +} +func (m *ForNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ForNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForNode.Merge(m, src) +} +func (m *ForNode) XXX_Size() int { + return m.Size() +} +func (m *ForNode) XXX_DiscardUnknown() { + xxx_messageInfo_ForNode.DiscardUnknown(m) +} + +var xxx_messageInfo_ForNode proto.InternalMessageInfo + func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_2237b452324cf77e, []int{6} + return fileDescriptor_2237b452324cf77e, []int{7} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -250,6 +278,7 @@ func init() { proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1.EndpointPort") proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1.EndpointSlice") proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1.EndpointSliceList") + proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1.ForNode") proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1.ForZone") } @@ -258,62 +287,64 @@ func init() { } var fileDescriptor_2237b452324cf77e = []byte{ - // 877 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xdc, 0x44, - 0x18, 0x5e, 0x67, 0x63, 0x62, 0x8f, 0x13, 0xd1, 0x8e, 0x90, 0x62, 0x2d, 0xc8, 0x5e, 0x8c, 0x0a, - 0x2b, 0x45, 0x78, 0x49, 0x84, 0x50, 0x41, 0xe2, 0x10, 0xd3, 0xd0, 0xf2, 0x15, 0xa2, 0x69, 0x4e, - 0x15, 0x52, 0x71, 0xec, 0x37, 0x5e, 0x93, 0xd8, 0x63, 0x79, 0x26, 0x2b, 0x2d, 0x27, 0x2e, 0x9c, - 0xe1, 0x17, 0x71, 0x44, 0x39, 0xf6, 0x46, 0x4f, 0x16, 0x31, 0x7f, 0x81, 0x53, 0x4f, 0x68, 0xc6, - 0x9f, 0x61, 0xb3, 0xda, 0xde, 0x3c, 0xcf, 0x3c, 0xcf, 0xfb, 0xf1, 0xcc, 0xcc, 0x6b, 0xf4, 0xc1, - 0xc5, 0x43, 0xe6, 0xc6, 0x74, 0xea, 0x67, 0xf1, 0x34, 0x8c, 0x59, 0x40, 0xe7, 0x90, 0x2f, 0xa6, - 0xf3, 0xfd, 0x69, 0x04, 0x29, 0xe4, 0x3e, 0x87, 0xd0, 0xcd, 0x72, 0xca, 0x29, 0xde, 0xad, 0x88, - 0xae, 0x9f, 0xc5, 0x6e, 0x4b, 0x74, 0xe7, 0xfb, 0xa3, 0x0f, 0xa3, 0x98, 0xcf, 0xae, 0xce, 0xdc, - 0x80, 0x26, 0xd3, 0x88, 0x46, 0x74, 0x2a, 0xf9, 0x67, 0x57, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0xab, - 0x8a, 0x33, 0x72, 0x7a, 0x09, 0x03, 0x9a, 0xc3, 0x1d, 0xb9, 0x46, 0x1f, 0x77, 0x9c, 0xc4, 0x0f, - 0x66, 0x71, 0x2a, 0x6a, 0xca, 0x2e, 0x22, 0x01, 0xb0, 0x69, 0x02, 0xdc, 0xbf, 0x4b, 0x35, 0x5d, - 0xa5, 0xca, 0xaf, 0x52, 0x1e, 0x27, 0xb0, 0x24, 0xf8, 0x64, 0x9d, 0x80, 0x05, 0x33, 0x48, 0xfc, - 0xff, 0xeb, 0x9c, 0x7f, 0x37, 0x91, 0x76, 0x94, 0x86, 0x19, 0x8d, 0x53, 0x8e, 0xf7, 0x90, 0xee, - 0x87, 0x61, 0x0e, 0x8c, 0x01, 0x33, 0x95, 0xf1, 0x70, 0xa2, 0x7b, 0x3b, 0x65, 0x61, 0xeb, 0x87, - 0x0d, 0x48, 0xba, 0x7d, 0xfc, 0x1c, 0xa1, 0x80, 0xa6, 0x61, 0xcc, 0x63, 0x9a, 0x32, 0x73, 0x63, - 0xac, 0x4c, 0x8c, 0x83, 0x3d, 0x77, 0x85, 0xb3, 0x6e, 0x93, 0xe3, 0x8b, 0x56, 0xe2, 0xe1, 0xeb, - 0xc2, 0x1e, 0x94, 0x85, 0x8d, 0x3a, 0x8c, 0xf4, 0x42, 0xe2, 0x09, 0xd2, 0x66, 0x94, 0xf1, 0xd4, - 0x4f, 0xc0, 0x1c, 0x8e, 0x95, 0x89, 0xee, 0x6d, 0x97, 0x85, 0xad, 0x3d, 0xa9, 0x31, 0xd2, 0xee, - 0xe2, 0x13, 0xa4, 0x73, 0x3f, 0x8f, 0x80, 0x13, 0x38, 0x37, 0x37, 0x65, 0x25, 0xef, 0xf5, 0x2b, - 0x11, 0x67, 0x23, 0x8a, 0xf8, 0xfe, 0xec, 0x27, 0x08, 0x04, 0x09, 0x72, 0x48, 0x03, 0xa8, 0x9a, - 0x3b, 0x6d, 0x94, 0xa4, 0x0b, 0x82, 0x7f, 0x55, 0x10, 0x0e, 0x21, 0xcb, 0x21, 0x10, 0x5e, 0x9d, - 0xd2, 0x8c, 0x5e, 0xd2, 0x68, 0x61, 0xaa, 0xe3, 0xe1, 0xc4, 0x38, 0xf8, 0x74, 0x6d, 0x97, 0xee, - 0xa3, 0x25, 0xed, 0x51, 0xca, 0xf3, 0x85, 0x37, 0xaa, 0x7b, 0xc6, 0xcb, 0x04, 0x72, 0x47, 0x42, - 0xe1, 0x41, 0x4a, 0x43, 0x38, 0x16, 0x1e, 0xbc, 0xd1, 0x79, 0x70, 0x5c, 0x63, 0xa4, 0xdd, 0xc5, - 0xef, 0xa0, 0xcd, 0x9f, 0x69, 0x0a, 0xe6, 0x96, 0x64, 0x69, 0x65, 0x61, 0x6f, 0x3e, 0xa3, 0x29, - 0x10, 0x89, 0xe2, 0xc7, 0x48, 0x9d, 0xc5, 0x29, 0x67, 0xa6, 0x26, 0xdd, 0x79, 0x7f, 0x6d, 0x07, - 0x4f, 0x04, 0xdb, 0xd3, 0xcb, 0xc2, 0x56, 0xe5, 0x27, 0xa9, 0xf4, 0xa3, 0x23, 0xb4, 0xbb, 0xa2, - 0x37, 0x7c, 0x0f, 0x0d, 0x2f, 0x60, 0x61, 0x2a, 0xa2, 0x00, 0x22, 0x3e, 0xf1, 0x5b, 0x48, 0x9d, - 0xfb, 0x97, 0x57, 0x20, 0x6f, 0x87, 0x4e, 0xaa, 0xc5, 0x67, 0x1b, 0x0f, 0x15, 0xe7, 0x37, 0x05, - 0xe1, 0xe5, 0x2b, 0x81, 0x6d, 0xa4, 0xe6, 0xe0, 0x87, 0x55, 0x10, 0xad, 0x4a, 0x4f, 0x04, 0x40, - 0x2a, 0x1c, 0x3f, 0x40, 0x5b, 0x0c, 0xf2, 0x79, 0x9c, 0x46, 0x32, 0xa6, 0xe6, 0x19, 0x65, 0x61, - 0x6f, 0x3d, 0xad, 0x20, 0xd2, 0xec, 0xe1, 0x7d, 0x64, 0x70, 0xc8, 0x93, 0x38, 0xf5, 0xb9, 0xa0, - 0x0e, 0x25, 0xf5, 0xcd, 0xb2, 0xb0, 0x8d, 0xd3, 0x0e, 0x26, 0x7d, 0x8e, 0xf3, 0x1c, 0xed, 0xdc, - 0xea, 0x1d, 0x1f, 0x23, 0xed, 0x9c, 0xe6, 0xc2, 0xc3, 0xea, 0x2d, 0x18, 0x07, 0xe3, 0x95, 0xae, - 0x7d, 0x59, 0x11, 0xbd, 0x7b, 0xf5, 0xf1, 0x6a, 0x35, 0xc0, 0x48, 0x1b, 0xc3, 0xf9, 0x53, 0x41, - 0xdb, 0x4d, 0x86, 0x13, 0x9a, 0x73, 0x71, 0x62, 0xf2, 0x6e, 0x2b, 0xdd, 0x89, 0xc9, 0x33, 0x95, - 0x28, 0x7e, 0x8c, 0x34, 0xf9, 0x42, 0x03, 0x7a, 0x59, 0xd9, 0xe7, 0xed, 0x89, 0xc0, 0x27, 0x35, - 0xf6, 0xaa, 0xb0, 0xdf, 0x5e, 0x9e, 0x3e, 0x6e, 0xb3, 0x4d, 0x5a, 0xb1, 0x48, 0x93, 0xd1, 0x9c, - 0x4b, 0x13, 0xd4, 0x2a, 0x8d, 0x48, 0x4f, 0x24, 0x2a, 0x9c, 0xf2, 0xb3, 0xac, 0x91, 0xc9, 0xc7, - 0xa3, 0x57, 0x4e, 0x1d, 0x76, 0x30, 0xe9, 0x73, 0x9c, 0xbf, 0x36, 0x3a, 0xab, 0x9e, 0x5e, 0xc6, - 0x01, 0xe0, 0x1f, 0x91, 0x26, 0x06, 0x59, 0xe8, 0x73, 0x5f, 0x76, 0x63, 0x1c, 0x7c, 0xd4, 0xb3, - 0xaa, 0x9d, 0x47, 0x6e, 0x76, 0x11, 0x09, 0x80, 0xb9, 0x82, 0xdd, 0x3d, 0xc8, 0xef, 0x80, 0xfb, - 0xdd, 0x34, 0xe8, 0x30, 0xd2, 0x46, 0xc5, 0x8f, 0x90, 0x51, 0x4f, 0x9e, 0xd3, 0x45, 0x06, 0x75, - 0x99, 0x4e, 0x2d, 0x31, 0x0e, 0xbb, 0xad, 0x57, 0xb7, 0x97, 0xa4, 0x2f, 0xc3, 0x04, 0xe9, 0x50, - 0x17, 0x2e, 0x26, 0x96, 0x38, 0xd3, 0x77, 0xd7, 0xbe, 0x04, 0xef, 0x7e, 0x9d, 0x46, 0x6f, 0x10, - 0x46, 0xba, 0x30, 0xf8, 0x6b, 0xa4, 0x0a, 0x23, 0x99, 0x39, 0x94, 0xf1, 0x1e, 0xac, 0x8d, 0x27, - 0xcc, 0xf7, 0x76, 0xea, 0x98, 0xaa, 0x58, 0x31, 0x52, 0x85, 0x70, 0xfe, 0x50, 0xd0, 0xfd, 0x5b, - 0xce, 0x7e, 0x1b, 0x33, 0x8e, 0x7f, 0x58, 0x72, 0xd7, 0x7d, 0x3d, 0x77, 0x85, 0x5a, 0x7a, 0xdb, - 0x5e, 0xcb, 0x06, 0xe9, 0x39, 0xfb, 0x0d, 0x52, 0x63, 0x0e, 0x49, 0xe3, 0xc7, 0xfa, 0xc9, 0x20, - 0x0b, 0xeb, 0x1a, 0xf8, 0x4a, 0x88, 0x49, 0x15, 0xc3, 0xd9, 0x43, 0x5b, 0xf5, 0xcd, 0xc7, 0xe3, - 0x5b, 0xb7, 0x7b, 0xbb, 0xa6, 0xf7, 0x6e, 0xb8, 0xf7, 0xf9, 0xf5, 0x8d, 0x35, 0x78, 0x71, 0x63, - 0x0d, 0x5e, 0xde, 0x58, 0x83, 0x5f, 0x4a, 0x4b, 0xb9, 0x2e, 0x2d, 0xe5, 0x45, 0x69, 0x29, 0x2f, - 0x4b, 0x4b, 0xf9, 0xbb, 0xb4, 0x94, 0xdf, 0xff, 0xb1, 0x06, 0xcf, 0x76, 0x57, 0xfc, 0xd4, 0xff, - 0x0b, 0x00, 0x00, 0xff, 0xff, 0x76, 0x4b, 0x26, 0xe3, 0xee, 0x07, 0x00, 0x00, + // 902 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0x9b, 0x9a, 0xda, 0xe3, 0x56, 0xec, 0x8e, 0x90, 0x6a, 0x05, 0x64, 0x07, 0xa3, 0x85, + 0x48, 0x15, 0x0e, 0xad, 0x10, 0x5a, 0x90, 0x38, 0xd4, 0x6c, 0xd9, 0xe5, 0x57, 0xa9, 0x66, 0x7b, + 0x5a, 0x21, 0x81, 0x6b, 0xbf, 0x3a, 0xa6, 0x8d, 0xc7, 0xf2, 0x4c, 0x22, 0x85, 0x13, 0x17, 0xce, + 0xf0, 0x9f, 0xf0, 0x1f, 0x70, 0x44, 0x3d, 0xee, 0x8d, 0x3d, 0x59, 0xd4, 0xfc, 0x0b, 0x9c, 0xf6, + 0x84, 0x66, 0xfc, 0x33, 0xa4, 0x51, 0xf6, 0xe6, 0xf9, 0xe6, 0x7b, 0xdf, 0x7b, 0xf3, 0xcd, 0x7b, + 0x23, 0xa3, 0xf7, 0xae, 0x1e, 0x32, 0x37, 0xa6, 0x63, 0x3f, 0x8d, 0xc7, 0x61, 0xcc, 0x02, 0x3a, + 0x87, 0x6c, 0x31, 0x9e, 0x1f, 0x8e, 0x23, 0x48, 0x20, 0xf3, 0x39, 0x84, 0x6e, 0x9a, 0x51, 0x4e, + 0xf1, 0x7e, 0x49, 0x74, 0xfd, 0x34, 0x76, 0x1b, 0xa2, 0x3b, 0x3f, 0x1c, 0xbc, 0x1f, 0xc5, 0x7c, + 0x32, 0xbb, 0x70, 0x03, 0x3a, 0x1d, 0x47, 0x34, 0xa2, 0x63, 0xc9, 0xbf, 0x98, 0x5d, 0xca, 0x95, + 0x5c, 0xc8, 0xaf, 0x52, 0x67, 0xe0, 0x74, 0x12, 0x06, 0x34, 0x83, 0x3b, 0x72, 0x0d, 0x3e, 0x6c, + 0x39, 0x53, 0x3f, 0x98, 0xc4, 0x89, 0xa8, 0x29, 0xbd, 0x8a, 0x04, 0xc0, 0xc6, 0x53, 0xe0, 0xfe, + 0x5d, 0x51, 0xe3, 0x75, 0x51, 0xd9, 0x2c, 0xe1, 0xf1, 0x14, 0x56, 0x02, 0x3e, 0xda, 0x14, 0xc0, + 0x82, 0x09, 0x4c, 0xfd, 0xff, 0xc7, 0x39, 0xff, 0x6e, 0x23, 0xed, 0x24, 0x09, 0x53, 0x1a, 0x27, + 0x1c, 0x1f, 0x20, 0xdd, 0x0f, 0xc3, 0x0c, 0x18, 0x03, 0x66, 0x2a, 0xc3, 0xfe, 0x48, 0xf7, 0xf6, + 0x8a, 0xdc, 0xd6, 0x8f, 0x6b, 0x90, 0xb4, 0xfb, 0xf8, 0x7b, 0x84, 0x02, 0x9a, 0x84, 0x31, 0x8f, + 0x69, 0xc2, 0xcc, 0xad, 0xa1, 0x32, 0x32, 0x8e, 0x0e, 0xdc, 0x35, 0xce, 0xba, 0x75, 0x8e, 0xcf, + 0x9a, 0x10, 0x0f, 0xdf, 0xe4, 0x76, 0xaf, 0xc8, 0x6d, 0xd4, 0x62, 0xa4, 0x23, 0x89, 0x47, 0x48, + 0x9b, 0x50, 0xc6, 0x13, 0x7f, 0x0a, 0x66, 0x7f, 0xa8, 0x8c, 0x74, 0x6f, 0xb7, 0xc8, 0x6d, 0xed, + 0x49, 0x85, 0x91, 0x66, 0x17, 0x9f, 0x21, 0x9d, 0xfb, 0x59, 0x04, 0x9c, 0xc0, 0xa5, 0xb9, 0x2d, + 0x2b, 0x79, 0xa7, 0x5b, 0x89, 0xb8, 0x1b, 0x51, 0xc4, 0xb7, 0x17, 0x3f, 0x42, 0x20, 0x48, 0x90, + 0x41, 0x12, 0x40, 0x79, 0xb8, 0xf3, 0x3a, 0x92, 0xb4, 0x22, 0xf8, 0x17, 0x05, 0xe1, 0x10, 0xd2, + 0x0c, 0x02, 0xe1, 0xd5, 0x39, 0x4d, 0xe9, 0x35, 0x8d, 0x16, 0xa6, 0x3a, 0xec, 0x8f, 0x8c, 0xa3, + 0x8f, 0x37, 0x9e, 0xd2, 0x7d, 0xb4, 0x12, 0x7b, 0x92, 0xf0, 0x6c, 0xe1, 0x0d, 0xaa, 0x33, 0xe3, + 0x55, 0x02, 0xb9, 0x23, 0xa1, 0xf0, 0x20, 0xa1, 0x21, 0x9c, 0x0a, 0x0f, 0x5e, 0x6b, 0x3d, 0x38, + 0xad, 0x30, 0xd2, 0xec, 0xe2, 0xb7, 0xd0, 0xf6, 0x4f, 0x34, 0x01, 0x73, 0x47, 0xb2, 0xb4, 0x22, + 0xb7, 0xb7, 0x9f, 0xd1, 0x04, 0x88, 0x44, 0xf1, 0x63, 0xa4, 0x4e, 0xe2, 0x84, 0x33, 0x53, 0x93, + 0xee, 0xbc, 0xbb, 0xf1, 0x04, 0x4f, 0x04, 0xdb, 0xd3, 0x8b, 0xdc, 0x56, 0xe5, 0x27, 0x29, 0xe3, + 0x07, 0x27, 0x68, 0x7f, 0xcd, 0xd9, 0xf0, 0x3d, 0xd4, 0xbf, 0x82, 0x85, 0xa9, 0x88, 0x02, 0x88, + 0xf8, 0xc4, 0x6f, 0x20, 0x75, 0xee, 0x5f, 0xcf, 0x40, 0x76, 0x87, 0x4e, 0xca, 0xc5, 0x27, 0x5b, + 0x0f, 0x15, 0xe7, 0x57, 0x05, 0xe1, 0xd5, 0x96, 0xc0, 0x36, 0x52, 0x33, 0xf0, 0xc3, 0x52, 0x44, + 0x2b, 0xd3, 0x13, 0x01, 0x90, 0x12, 0xc7, 0x0f, 0xd0, 0x0e, 0x83, 0x6c, 0x1e, 0x27, 0x91, 0xd4, + 0xd4, 0x3c, 0xa3, 0xc8, 0xed, 0x9d, 0xa7, 0x25, 0x44, 0xea, 0x3d, 0x7c, 0x88, 0x0c, 0x0e, 0xd9, + 0x34, 0x4e, 0x7c, 0x2e, 0xa8, 0x7d, 0x49, 0x7d, 0xbd, 0xc8, 0x6d, 0xe3, 0xbc, 0x85, 0x49, 0x97, + 0xe3, 0xfc, 0xae, 0xa0, 0xbd, 0xa5, 0xc3, 0xe3, 0x53, 0xa4, 0x5d, 0xd2, 0x4c, 0x98, 0x58, 0x0e, + 0x83, 0x71, 0x34, 0x5c, 0x6b, 0xdb, 0xe7, 0x25, 0xd1, 0xbb, 0x57, 0xdd, 0xaf, 0x56, 0x01, 0x8c, + 0x34, 0x1a, 0x95, 0x9e, 0xb8, 0x3a, 0x31, 0x2e, 0x1b, 0xf5, 0x04, 0x71, 0x49, 0x4f, 0x46, 0x92, + 0x46, 0xc3, 0xf9, 0x53, 0x41, 0xbb, 0x75, 0xc5, 0x67, 0x34, 0xe3, 0xa2, 0x05, 0xe4, 0xb0, 0x28, + 0x6d, 0x0b, 0xc8, 0x26, 0x91, 0x28, 0x7e, 0x8c, 0x34, 0x39, 0xf2, 0x01, 0xbd, 0x2e, 0xef, 0xc3, + 0x3b, 0x10, 0xc2, 0x67, 0x15, 0xf6, 0x32, 0xb7, 0xdf, 0x5c, 0x7d, 0xce, 0xdc, 0x7a, 0x9b, 0x34, + 0xc1, 0x22, 0x4d, 0x4a, 0x33, 0x2e, 0x5d, 0x55, 0xcb, 0x34, 0x22, 0x3d, 0x91, 0xa8, 0xb0, 0xde, + 0x4f, 0xd3, 0x3a, 0x4c, 0x4e, 0xa3, 0x5e, 0x5a, 0x7f, 0xdc, 0xc2, 0xa4, 0xcb, 0x71, 0xfe, 0xda, + 0x6a, 0xad, 0x7f, 0x7a, 0x1d, 0x07, 0x80, 0x7f, 0x40, 0x9a, 0x78, 0x19, 0x43, 0x9f, 0xfb, 0xf2, + 0x34, 0xc6, 0xd1, 0x07, 0x1d, 0xab, 0x9a, 0x07, 0xce, 0x4d, 0xaf, 0x22, 0x01, 0x30, 0x57, 0xb0, + 0xdb, 0x09, 0xff, 0x06, 0xb8, 0xdf, 0x3e, 0x2f, 0x2d, 0x46, 0x1a, 0x55, 0xfc, 0x08, 0x19, 0xd5, + 0x53, 0x76, 0xbe, 0x48, 0xa1, 0x2a, 0xd3, 0xa9, 0x42, 0x8c, 0xe3, 0x76, 0xeb, 0xe5, 0xf2, 0x92, + 0x74, 0xc3, 0x30, 0x41, 0x3a, 0x54, 0x85, 0xd7, 0x77, 0xfa, 0xf6, 0xc6, 0xd1, 0xf2, 0xee, 0x57, + 0x69, 0xf4, 0x1a, 0x61, 0xa4, 0x95, 0xc1, 0x5f, 0x22, 0x55, 0x18, 0xc9, 0xcc, 0xbe, 0xd4, 0x7b, + 0xb0, 0x51, 0x4f, 0x98, 0xef, 0xed, 0x55, 0x9a, 0xaa, 0x58, 0x31, 0x52, 0x4a, 0x38, 0x7f, 0x28, + 0xe8, 0xfe, 0x92, 0xb3, 0x5f, 0xc7, 0x8c, 0xe3, 0xef, 0x56, 0xdc, 0x75, 0x5f, 0xcd, 0x5d, 0x11, + 0x2d, 0xbd, 0x6d, 0xda, 0xb2, 0x46, 0x3a, 0xce, 0x7e, 0x85, 0xd4, 0x98, 0xc3, 0xb4, 0xf6, 0x63, + 0xf3, 0x53, 0x23, 0x0b, 0x6b, 0x0f, 0xf0, 0x85, 0x08, 0x26, 0xa5, 0x86, 0x73, 0x80, 0x76, 0xaa, + 0xce, 0xc7, 0xc3, 0xa5, 0xee, 0xde, 0xad, 0xe8, 0x9d, 0x0e, 0xaf, 0xc8, 0x62, 0xd8, 0x36, 0x93, + 0xbd, 0x4f, 0x6f, 0x6e, 0xad, 0xde, 0xf3, 0x5b, 0xab, 0xf7, 0xe2, 0xd6, 0xea, 0xfd, 0x5c, 0x58, + 0xca, 0x4d, 0x61, 0x29, 0xcf, 0x0b, 0x4b, 0x79, 0x51, 0x58, 0xca, 0xdf, 0x85, 0xa5, 0xfc, 0xf6, + 0x8f, 0xd5, 0x7b, 0xb6, 0xbf, 0xe6, 0x97, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xfc, + 0xbe, 0xad, 0x6c, 0x08, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -500,6 +531,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ForNodes) > 0 { + for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.ForZones) > 0 { for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- { { @@ -679,6 +724,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ForNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ForZone) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -793,6 +866,12 @@ func (m *EndpointHints) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.ForNodes) > 0 { + for _, e := range m.ForNodes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -862,6 +941,17 @@ func (m *EndpointSliceList) Size() (n int) { return n } +func (m *ForNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ForZone) Size() (n int) { if m == nil { return 0 @@ -927,8 +1017,14 @@ func (this *EndpointHints) String() string { repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + "," } repeatedStringForForZones += "}" + repeatedStringForForNodes := "[]ForNode{" + for _, f := range this.ForNodes { + repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + "," + } + repeatedStringForForNodes += "}" s := strings.Join([]string{`&EndpointHints{`, `ForZones:` + repeatedStringForForZones + `,`, + `ForNodes:` + repeatedStringForForNodes + `,`, `}`, }, "") return s @@ -985,6 +1081,16 @@ func (this *EndpointSliceList) String() string { }, "") return s } +func (this *ForNode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ForNode{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *ForZone) String() string { if this == nil { return "nil" @@ -1592,6 +1698,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForNodes = append(m.ForNodes, ForNode{}) + if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2082,6 +2222,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { } return nil } +func (m *ForNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ForZone) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/tools/vendor/k8s.io/api/discovery/v1/generated.proto b/tools/vendor/k8s.io/api/discovery/v1/generated.proto index 8ddf0dc5d..569d8a916 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/tools/vendor/k8s.io/api/discovery/v1/generated.proto @@ -31,12 +31,12 @@ option go_package = "k8s.io/api/discovery/v1"; // Endpoint represents a single logical "backend" implementing a service. message Endpoint { - // addresses of this endpoint. The contents of this field are interpreted - // according to the corresponding EndpointSlice addressType field. Consumers - // must handle different types of addresses in the context of their own - // capabilities. This must contain at least one address but no more than - // 100. These are all assumed to be fungible and clients may choose to only - // use the first element. Refer to: https://issue.k8s.io/106267 + // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6", + // the values are IP addresses in canonical form. The syntax and semantics of + // other addressType values are not defined. This must contain at least one + // address but no more than 100. EndpointSlices generated by the EndpointSlice + // controller will always have exactly 1 address. No semantics are defined for + // additional addresses beyond the first, and kube-proxy does not look at them. // +listType=set repeated string addresses = 1; @@ -82,36 +82,42 @@ message Endpoint { // EndpointConditions represents the current condition of an endpoint. message EndpointConditions { - // ready indicates that this endpoint is prepared to receive traffic, + // ready indicates that this endpoint is ready to receive traffic, // according to whatever system is managing the endpoint. A nil value - // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints, except when the normal readiness - // behavior is being explicitly overridden, for example when the associated - // Service has set the publishNotReadyAddresses flag. + // should be interpreted as "true". In general, an endpoint should be + // marked ready if it is serving and not terminating, though this can + // be overridden in some cases, such as when the associated Service has + // set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; - // serving is identical to ready except that it is set regardless of the - // terminating state of endpoints. This condition should be set to true for - // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. + // serving indicates that this endpoint is able to receive traffic, + // according to whatever system is managing the endpoint. For endpoints + // backed by pods, the EndpointSlice controller will mark the endpoint + // as serving if the pod's Ready condition is True. A nil value should be + // interpreted as "true". // +optional optional bool serving = 2; // terminating indicates that this endpoint is terminating. A nil value - // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. + // should be interpreted as "false". // +optional optional bool terminating = 3; } // EndpointHints provides hints describing how an endpoint should be consumed. message EndpointHints { - // forZones indicates the zone(s) this endpoint should be consumed by to - // enable topology aware routing. + // forZones indicates the zone(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. // +listType=atomic repeated ForZone forZones = 1; + + // forNodes indicates the node(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. + // This is an Alpha feature and is only used when the PreferSameTrafficDistribution + // feature gate is enabled. + // +listType=atomic + repeated ForNode forNodes = 2; } // EndpointPort represents a Port used by an EndpointSlice @@ -132,8 +138,9 @@ message EndpointPort { optional string protocol = 2; // port represents the port number of the endpoint. - // If this is not specified, ports are not restricted and must be - // interpreted in the context of the specific consumer. + // If the EndpointSlice is derived from a Kubernetes service, this must be set + // to the service's target port. EndpointSlices used for other purposes may have + // a nil port. optional int32 port = 3; // The application protocol for this port. @@ -155,9 +162,12 @@ message EndpointPort { optional string appProtocol = 4; } -// EndpointSlice represents a subset of the endpoints that implement a service. -// For a given service there may be multiple EndpointSlice objects, selected by -// labels, which must be joined to produce the full set of endpoints. +// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by +// the EndpointSlice controller to represent the Pods selected by Service objects. For a +// given service there may be multiple EndpointSlice objects which must be joined to +// produce the full set of endpoints; you can find all of the slices for a given service +// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name` +// label contains the service's name. message EndpointSlice { // Standard object's metadata. // +optional @@ -169,7 +179,10 @@ message EndpointSlice { // supported: // * IPv4: Represents an IPv4 Address. // * IPv6: Represents an IPv6 Address. - // * FQDN: Represents a Fully Qualified Domain Name. + // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated) + // The EndpointSlice controller only generates, and kube-proxy only processes, + // slices of addressType "IPv4" and "IPv6". No semantics are defined for + // the "FQDN" type. optional string addressType = 4; // endpoints is a list of unique endpoints in this slice. Each slice may @@ -178,10 +191,11 @@ message EndpointSlice { repeated Endpoint endpoints = 2; // ports specifies the list of network ports exposed by each endpoint in - // this slice. Each port must have a unique name. When ports is empty, it - // indicates that there are no defined ports. When a port is defined with a - // nil port value, it indicates "all ports". Each slice may include a + // this slice. Each port must have a unique name. Each slice may include a // maximum of 100 ports. + // Services always have at least 1 port, so EndpointSlices generated by the + // EndpointSlice controller will likewise always have at least 1 port. + // EndpointSlices used for other purposes may have an empty ports list. // +optional // +listType=atomic repeated EndpointPort ports = 3; @@ -197,6 +211,12 @@ message EndpointSliceList { repeated EndpointSlice items = 2; } +// ForNode provides information about which nodes should consume this endpoint. +message ForNode { + // name represents the name of the node. + optional string name = 1; +} + // ForZone provides information about which zones should consume this endpoint. message ForZone { // name represents the name of the zone. diff --git a/tools/vendor/k8s.io/api/discovery/v1/types.go b/tools/vendor/k8s.io/api/discovery/v1/types.go index d6a9d0fce..6f2695316 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/types.go +++ b/tools/vendor/k8s.io/api/discovery/v1/types.go @@ -25,9 +25,12 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.21 -// EndpointSlice represents a subset of the endpoints that implement a service. -// For a given service there may be multiple EndpointSlice objects, selected by -// labels, which must be joined to produce the full set of endpoints. +// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by +// the EndpointSlice controller to represent the Pods selected by Service objects. For a +// given service there may be multiple EndpointSlice objects which must be joined to +// produce the full set of endpoints; you can find all of the slices for a given service +// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name` +// label contains the service's name. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` @@ -41,7 +44,10 @@ type EndpointSlice struct { // supported: // * IPv4: Represents an IPv4 Address. // * IPv6: Represents an IPv6 Address. - // * FQDN: Represents a Fully Qualified Domain Name. + // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated) + // The EndpointSlice controller only generates, and kube-proxy only processes, + // slices of addressType "IPv4" and "IPv6". No semantics are defined for + // the "FQDN" type. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` // endpoints is a list of unique endpoints in this slice. Each slice may @@ -50,10 +56,11 @@ type EndpointSlice struct { Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` // ports specifies the list of network ports exposed by each endpoint in - // this slice. Each port must have a unique name. When ports is empty, it - // indicates that there are no defined ports. When a port is defined with a - // nil port value, it indicates "all ports". Each slice may include a + // this slice. Each port must have a unique name. Each slice may include a // maximum of 100 ports. + // Services always have at least 1 port, so EndpointSlices generated by the + // EndpointSlice controller will likewise always have at least 1 port. + // EndpointSlices used for other purposes may have an empty ports list. // +optional // +listType=atomic Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` @@ -76,12 +83,12 @@ const ( // Endpoint represents a single logical "backend" implementing a service. type Endpoint struct { - // addresses of this endpoint. The contents of this field are interpreted - // according to the corresponding EndpointSlice addressType field. Consumers - // must handle different types of addresses in the context of their own - // capabilities. This must contain at least one address but no more than - // 100. These are all assumed to be fungible and clients may choose to only - // use the first element. Refer to: https://issue.k8s.io/106267 + // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6", + // the values are IP addresses in canonical form. The syntax and semantics of + // other addressType values are not defined. This must contain at least one + // address but no more than 100. EndpointSlices generated by the EndpointSlice + // controller will always have exactly 1 address. No semantics are defined for + // additional addresses beyond the first, and kube-proxy does not look at them. // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` @@ -127,36 +134,42 @@ type Endpoint struct { // EndpointConditions represents the current condition of an endpoint. type EndpointConditions struct { - // ready indicates that this endpoint is prepared to receive traffic, + // ready indicates that this endpoint is ready to receive traffic, // according to whatever system is managing the endpoint. A nil value - // indicates an unknown state. In most cases consumers should interpret this - // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints, except when the normal readiness - // behavior is being explicitly overridden, for example when the associated - // Service has set the publishNotReadyAddresses flag. + // should be interpreted as "true". In general, an endpoint should be + // marked ready if it is serving and not terminating, though this can + // be overridden in some cases, such as when the associated Service has + // set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` - // serving is identical to ready except that it is set regardless of the - // terminating state of endpoints. This condition should be set to true for - // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. + // serving indicates that this endpoint is able to receive traffic, + // according to whatever system is managing the endpoint. For endpoints + // backed by pods, the EndpointSlice controller will mark the endpoint + // as serving if the pod's Ready condition is True. A nil value should be + // interpreted as "true". // +optional Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` // terminating indicates that this endpoint is terminating. A nil value - // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. + // should be interpreted as "false". // +optional Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } // EndpointHints provides hints describing how an endpoint should be consumed. type EndpointHints struct { - // forZones indicates the zone(s) this endpoint should be consumed by to - // enable topology aware routing. + // forZones indicates the zone(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. // +listType=atomic ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"` + + // forNodes indicates the node(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. + // This is an Alpha feature and is only used when the PreferSameTrafficDistribution + // feature gate is enabled. + // +listType=atomic + ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"` } // ForZone provides information about which zones should consume this endpoint. @@ -165,6 +178,12 @@ type ForZone struct { Name string `json:"name" protobuf:"bytes,1,name=name"` } +// ForNode provides information about which nodes should consume this endpoint. +type ForNode struct { + // name represents the name of the node. + Name string `json:"name" protobuf:"bytes,1,name=name"` +} + // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { @@ -183,8 +202,9 @@ type EndpointPort struct { Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` // port represents the port number of the endpoint. - // If this is not specified, ports are not restricted and must be - // interpreted in the context of the specific consumer. + // If the EndpointSlice is derived from a Kubernetes service, this must be set + // to the service's target port. EndpointSlices used for other purposes may have + // a nil port. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` // The application protocol for this port. diff --git a/tools/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 41c306056..ac5b853b9 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", - "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267", + "addresses": "addresses of this endpoint. For EndpointSlices of addressType \"IPv4\" or \"IPv6\", the values are IP addresses in canonical form. The syntax and semantics of other addressType values are not defined. This must contain at least one address but no more than 100. EndpointSlices generated by the EndpointSlice controller will always have exactly 1 address. No semantics are defined for additional addresses beyond the first, and kube-proxy does not look at them.", "conditions": "conditions contains information about the current status of the endpoint.", "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", @@ -45,9 +45,9 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", - "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", - "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", + "ready": "ready indicates that this endpoint is ready to receive traffic, according to whatever system is managing the endpoint. A nil value should be interpreted as \"true\". In general, an endpoint should be marked ready if it is serving and not terminating, though this can be overridden in some cases, such as when the associated Service has set the publishNotReadyAddresses flag.", + "serving": "serving indicates that this endpoint is able to receive traffic, according to whatever system is managing the endpoint. For endpoints backed by pods, the EndpointSlice controller will mark the endpoint as serving if the pod's Ready condition is True. A nil value should be interpreted as \"true\".", + "terminating": "terminating indicates that this endpoint is terminating. A nil value should be interpreted as \"false\".", } func (EndpointConditions) SwaggerDoc() map[string]string { @@ -56,7 +56,8 @@ func (EndpointConditions) SwaggerDoc() map[string]string { var map_EndpointHints = map[string]string{ "": "EndpointHints provides hints describing how an endpoint should be consumed.", - "forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.", + "forZones": "forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.", + "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.", } func (EndpointHints) SwaggerDoc() map[string]string { @@ -67,7 +68,7 @@ var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "port": "port represents the port number of the endpoint. If the EndpointSlice is derived from a Kubernetes service, this must be set to the service's target port. EndpointSlices used for other purposes may have a nil port.", "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } @@ -76,11 +77,11 @@ func (EndpointPort) SwaggerDoc() map[string]string { } var map_EndpointSlice = map[string]string{ - "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "": "EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by the EndpointSlice controller to represent the Pods selected by Service objects. For a given service there may be multiple EndpointSlice objects which must be joined to produce the full set of endpoints; you can find all of the slices for a given service by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name` label contains the service's name.", "metadata": "Standard object's metadata.", - "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. (Deprecated) The EndpointSlice controller only generates, and kube-proxy only processes, slices of addressType \"IPv4\" and \"IPv6\". No semantics are defined for the \"FQDN\" type.", "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", - "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. Each slice may include a maximum of 100 ports. Services always have at least 1 port, so EndpointSlices generated by the EndpointSlice controller will likewise always have at least 1 port. EndpointSlices used for other purposes may have an empty ports list.", } func (EndpointSlice) SwaggerDoc() map[string]string { @@ -97,6 +98,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string { return map_EndpointSliceList } +var map_ForNode = map[string]string{ + "": "ForNode provides information about which nodes should consume this endpoint.", + "name": "name represents the name of the node.", +} + +func (ForNode) SwaggerDoc() map[string]string { + return map_ForNode +} + var map_ForZone = map[string]string{ "": "ForZone provides information about which zones should consume this endpoint.", "name": "name represents the name of the zone.", diff --git a/tools/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go index caa872af0..60eada3b9 100644 --- a/tools/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/discovery/v1/zz_generated.deepcopy.go @@ -119,6 +119,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) { *out = make([]ForZone, len(*in)) copy(*out, *in) } + if in.ForNodes != nil { + in, out := &in.ForNodes, &out.ForNodes + *out = make([]ForNode, len(*in)) + copy(*out, *in) + } return } @@ -241,6 +246,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForNode) DeepCopyInto(out *ForNode) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode. +func (in *ForNode) DeepCopy() *ForNode { + if in == nil { + return nil + } + out := new(ForNode) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ForZone) DeepCopyInto(out *ForZone) { *out = *in diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/doc.go b/tools/vendor/k8s.io/api/discovery/v1beta1/doc.go index 7d7084802..f12087eff 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io -package v1beta1 // import "k8s.io/api/discovery/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/tools/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index 46935574b..de3257786 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -214,10 +214,38 @@ func (m *EndpointSliceList) XXX_DiscardUnknown() { var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo +func (m *ForNode) Reset() { *m = ForNode{} } +func (*ForNode) ProtoMessage() {} +func (*ForNode) Descriptor() ([]byte, []int) { + return fileDescriptor_6555bad15de200e0, []int{6} +} +func (m *ForNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ForNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForNode.Merge(m, src) +} +func (m *ForNode) XXX_Size() int { + return m.Size() +} +func (m *ForNode) XXX_DiscardUnknown() { + xxx_messageInfo_ForNode.DiscardUnknown(m) +} + +var xxx_messageInfo_ForNode proto.InternalMessageInfo + func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_6555bad15de200e0, []int{6} + return fileDescriptor_6555bad15de200e0, []int{7} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -250,6 +278,7 @@ func init() { proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort") proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice") proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList") + proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1beta1.ForNode") proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1beta1.ForZone") } @@ -258,61 +287,62 @@ func init() { } var fileDescriptor_6555bad15de200e0 = []byte{ - // 857 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xe4, 0x34, - 0x14, 0x9f, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d, + // 877 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4f, 0x6f, 0xe4, 0x34, + 0x1c, 0x9d, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d, 0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56, 0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d, - 0xe0, 0xb3, 0xf0, 0x15, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x53, 0xc4, 0x84, 0x6f, 0xb1, 0x27, 0x64, - 0xc7, 0xf9, 0x33, 0x0c, 0x94, 0xb9, 0xc5, 0x3f, 0xbf, 0xdf, 0xef, 0xbd, 0xf7, 0x7b, 0xb6, 0x03, - 0x3e, 0xbe, 0x7e, 0xc2, 0x7d, 0xca, 0x02, 0x9c, 0xd3, 0x20, 0xa6, 0x3c, 0x62, 0x0b, 0x52, 0x2c, - 0x83, 0xc5, 0xc9, 0x8c, 0x08, 0x7c, 0x12, 0x24, 0x24, 0x23, 0x05, 0x16, 0x24, 0xf6, 0xf3, 0x82, - 0x09, 0x06, 0x8f, 0xea, 0x68, 0x1f, 0xe7, 0xd4, 0x6f, 0xa3, 0x7d, 0x1d, 0x7d, 0xf8, 0x49, 0x42, - 0xc5, 0xfc, 0x76, 0xe6, 0x47, 0x2c, 0x0d, 0x12, 0x96, 0xb0, 0x40, 0x91, 0x66, 0xb7, 0x57, 0x6a, - 0xa5, 0x16, 0xea, 0xab, 0x16, 0x3b, 0xf4, 0x7a, 0xa9, 0x23, 0x56, 0x90, 0x60, 0xb1, 0x91, 0xf0, - 0xf0, 0x71, 0x17, 0x93, 0xe2, 0x68, 0x4e, 0x33, 0x59, 0x5d, 0x7e, 0x9d, 0x48, 0x80, 0x07, 0x29, - 0x11, 0xf8, 0xdf, 0x58, 0xc1, 0x7f, 0xb1, 0x8a, 0xdb, 0x4c, 0xd0, 0x94, 0x6c, 0x10, 0x3e, 0xfb, - 0x3f, 0x02, 0x8f, 0xe6, 0x24, 0xc5, 0xff, 0xe4, 0x79, 0xbf, 0xed, 0x02, 0xeb, 0x69, 0x16, 0xe7, - 0x8c, 0x66, 0x02, 0x1e, 0x03, 0x1b, 0xc7, 0x71, 0x41, 0x38, 0x27, 0x7c, 0x6c, 0x4c, 0x86, 0x53, - 0x3b, 0x3c, 0xa8, 0x4a, 0xd7, 0x3e, 0x6b, 0x40, 0xd4, 0xed, 0xc3, 0x18, 0x80, 0x88, 0x65, 0x31, - 0x15, 0x94, 0x65, 0x7c, 0xbc, 0x33, 0x31, 0xa6, 0xa3, 0xd3, 0x4f, 0xfd, 0xfb, 0xec, 0xf5, 0x9b, - 0x44, 0x5f, 0xb6, 0xbc, 0x10, 0xde, 0x95, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xa7, 0x0b, - 0xa7, 0xc0, 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xbf, 0x2a, - 0x5d, 0xeb, 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0xcf, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xd5, - 0x78, 0x57, 0x95, 0xf3, 0x41, 0xbf, 0x1c, 0x39, 0x20, 0x7f, 0x71, 0xe2, 0xbf, 0x9c, 0xfd, 0x44, - 0x22, 0x19, 0x44, 0x0a, 0x92, 0x45, 0xa4, 0xee, 0xf0, 0xb2, 0x61, 0xa2, 0x4e, 0x04, 0xce, 0x80, - 0x25, 0x58, 0xce, 0x6e, 0x58, 0xb2, 0x1c, 0x9b, 0x93, 0xe1, 0x74, 0x74, 0xfa, 0x78, 0xbb, 0xfe, - 0xfc, 0x4b, 0x4d, 0x7b, 0x9a, 0x89, 0x62, 0x19, 0x3e, 0xd0, 0x3d, 0x5a, 0x0d, 0x8c, 0x5a, 0x5d, - 0xd9, 0x5f, 0xc6, 0x62, 0xf2, 0x42, 0xf6, 0xf7, 0x4e, 0xd7, 0xdf, 0x0b, 0x8d, 0xa1, 0x76, 0x17, - 0x3e, 0x07, 0xe6, 0x9c, 0x66, 0x82, 0x8f, 0xf7, 0x54, 0x6f, 0xc7, 0xdb, 0x95, 0xf2, 0x4c, 0x52, - 0x42, 0xbb, 0x2a, 0x5d, 0x53, 0x7d, 0xa2, 0x5a, 0xe4, 0xf0, 0x0b, 0x70, 0xb0, 0x56, 0x24, 0x7c, - 0x00, 0x86, 0xd7, 0x64, 0x39, 0x36, 0x64, 0x0d, 0x48, 0x7e, 0xc2, 0xf7, 0x80, 0xb9, 0xc0, 0x37, - 0xb7, 0x44, 0xcd, 0xd6, 0x46, 0xf5, 0xe2, 0xf3, 0x9d, 0x27, 0x86, 0xf7, 0x8b, 0x01, 0xe0, 0xe6, - 0x2c, 0xa1, 0x0b, 0xcc, 0x82, 0xe0, 0xb8, 0x16, 0xb1, 0xea, 0xa4, 0x48, 0x02, 0xa8, 0xc6, 0xe1, - 0x23, 0xb0, 0xc7, 0x49, 0xb1, 0xa0, 0x59, 0xa2, 0x34, 0xad, 0x70, 0x54, 0x95, 0xee, 0xde, 0x45, - 0x0d, 0xa1, 0x66, 0x0f, 0x9e, 0x80, 0x91, 0x20, 0x45, 0x4a, 0x33, 0x2c, 0x64, 0xe8, 0x50, 0x85, - 0xbe, 0x5b, 0x95, 0xee, 0xe8, 0xb2, 0x83, 0x51, 0x3f, 0xc6, 0x8b, 0xc1, 0xc1, 0x5a, 0xc7, 0xf0, - 0x02, 0x58, 0x57, 0xac, 0x78, 0xc5, 0x32, 0x7d, 0x92, 0x47, 0xa7, 0x8f, 0xee, 0x37, 0xec, 0xeb, - 0x3a, 0xba, 0x1b, 0x96, 0x06, 0x38, 0x6a, 0x85, 0xbc, 0x3f, 0x0c, 0xb0, 0xdf, 0xa4, 0x39, 0x67, - 0x85, 0x80, 0x47, 0x60, 0x57, 0x9d, 0x4c, 0xe5, 0x5a, 0x68, 0x55, 0xa5, 0xbb, 0xab, 0xa6, 0xa6, - 0x50, 0xf8, 0x0d, 0xb0, 0xd4, 0x25, 0x8b, 0xd8, 0x4d, 0xed, 0x61, 0x78, 0x2c, 0x85, 0xcf, 0x35, - 0xf6, 0xb6, 0x74, 0xdf, 0xdf, 0x7c, 0x40, 0xfc, 0x66, 0x1b, 0xb5, 0x64, 0x99, 0x26, 0x67, 0x85, - 0x50, 0x4e, 0x98, 0x75, 0x1a, 0x99, 0x1e, 0x29, 0x54, 0xda, 0x85, 0xf3, 0xbc, 0xa1, 0xa9, 0xa3, - 0x6f, 0xd7, 0x76, 0x9d, 0x75, 0x30, 0xea, 0xc7, 0x78, 0xab, 0x9d, 0xce, 0xaf, 0x8b, 0x1b, 0x1a, - 0x11, 0xf8, 0x23, 0xb0, 0xe4, 0x5b, 0x14, 0x63, 0x81, 0x55, 0x37, 0xeb, 0x77, 0xb9, 0x7d, 0x52, - 0xfc, 0xfc, 0x3a, 0x91, 0x00, 0xf7, 0x65, 0x74, 0x77, 0x9d, 0xbe, 0x23, 0x02, 0x77, 0x77, 0xb9, - 0xc3, 0x50, 0xab, 0x0a, 0xbf, 0x02, 0x23, 0xfd, 0x78, 0x5c, 0x2e, 0x73, 0xa2, 0xcb, 0xf4, 0x34, - 0x65, 0x74, 0xd6, 0x6d, 0xbd, 0x5d, 0x5f, 0xa2, 0x3e, 0x0d, 0x7e, 0x0f, 0x6c, 0xa2, 0x0b, 0x97, - 0x8f, 0x8e, 0x1c, 0xec, 0x87, 0xdb, 0xdd, 0x84, 0xf0, 0xa1, 0xce, 0x65, 0x37, 0x08, 0x47, 0x9d, - 0x16, 0x7c, 0x09, 0x4c, 0xe9, 0x26, 0x1f, 0x0f, 0x95, 0xe8, 0x47, 0xdb, 0x89, 0xca, 0x31, 0x84, - 0x07, 0x5a, 0xd8, 0x94, 0x2b, 0x8e, 0x6a, 0x1d, 0xef, 0x77, 0x03, 0x3c, 0x5c, 0xf3, 0xf8, 0x39, - 0xe5, 0x02, 0xfe, 0xb0, 0xe1, 0xb3, 0xbf, 0x9d, 0xcf, 0x92, 0xad, 0x5c, 0x6e, 0x0f, 0x68, 0x83, - 0xf4, 0x3c, 0x3e, 0x07, 0x26, 0x15, 0x24, 0x6d, 0x9c, 0xd9, 0xf2, 0x8d, 0x50, 0xd5, 0x75, 0x5d, - 0x7c, 0x2b, 0x15, 0x50, 0x2d, 0xe4, 0x1d, 0x83, 0x3d, 0x7d, 0x11, 0xe0, 0x64, 0xed, 0xb0, 0xef, - 0xeb, 0xf0, 0xde, 0x81, 0x0f, 0xc3, 0xbb, 0x95, 0x33, 0x78, 0xbd, 0x72, 0x06, 0x6f, 0x56, 0xce, - 0xe0, 0xe7, 0xca, 0x31, 0xee, 0x2a, 0xc7, 0x78, 0x5d, 0x39, 0xc6, 0x9b, 0xca, 0x31, 0xfe, 0xac, - 0x1c, 0xe3, 0xd7, 0xbf, 0x9c, 0xc1, 0xab, 0xa3, 0xfb, 0x7e, 0xd8, 0x7f, 0x07, 0x00, 0x00, 0xff, - 0xff, 0x1c, 0xe6, 0x20, 0x06, 0xcf, 0x07, 0x00, 0x00, + 0xe0, 0xb3, 0x70, 0xe3, 0x8c, 0x84, 0x7a, 0xdc, 0xe3, 0x9e, 0x22, 0x1a, 0xbe, 0xc5, 0x9e, 0x90, + 0x1d, 0xe7, 0xcf, 0x30, 0xd0, 0xce, 0x2d, 0x7e, 0x7e, 0xef, 0xfd, 0xfe, 0xd9, 0x56, 0xc0, 0xc7, + 0x97, 0x4f, 0xb8, 0x4f, 0x59, 0x80, 0x73, 0x1a, 0xc4, 0x94, 0x47, 0x6c, 0x41, 0x8a, 0x65, 0xb0, + 0x38, 0x9a, 0x11, 0x81, 0x8f, 0x82, 0x84, 0x64, 0xa4, 0xc0, 0x82, 0xc4, 0x7e, 0x5e, 0x30, 0xc1, + 0xe0, 0x41, 0xcd, 0xf6, 0x71, 0x4e, 0xfd, 0x96, 0xed, 0x6b, 0xf6, 0xfe, 0x27, 0x09, 0x15, 0xf3, + 0xeb, 0x99, 0x1f, 0xb1, 0x34, 0x48, 0x58, 0xc2, 0x02, 0x25, 0x9a, 0x5d, 0x5f, 0xa8, 0x95, 0x5a, + 0xa8, 0xaf, 0xda, 0x6c, 0xdf, 0xeb, 0x85, 0x8e, 0x58, 0x41, 0x82, 0xc5, 0x5a, 0xc0, 0xfd, 0xc7, + 0x1d, 0x27, 0xc5, 0xd1, 0x9c, 0x66, 0x32, 0xbb, 0xfc, 0x32, 0x91, 0x00, 0x0f, 0x52, 0x22, 0xf0, + 0x7f, 0xa9, 0x82, 0xff, 0x53, 0x15, 0xd7, 0x99, 0xa0, 0x29, 0x59, 0x13, 0x7c, 0x76, 0x9f, 0x80, + 0x47, 0x73, 0x92, 0xe2, 0x7f, 0xeb, 0xbc, 0xdf, 0xb6, 0x81, 0xf5, 0x34, 0x8b, 0x73, 0x46, 0x33, + 0x01, 0x0f, 0x81, 0x8d, 0xe3, 0xb8, 0x20, 0x9c, 0x13, 0x3e, 0x36, 0x26, 0xc3, 0xa9, 0x1d, 0xee, + 0x55, 0xa5, 0x6b, 0x9f, 0x34, 0x20, 0xea, 0xf6, 0x61, 0x0c, 0x40, 0xc4, 0xb2, 0x98, 0x0a, 0xca, + 0x32, 0x3e, 0xde, 0x9a, 0x18, 0xd3, 0xd1, 0xf1, 0xa7, 0xfe, 0x5d, 0xed, 0xf5, 0x9b, 0x40, 0x5f, + 0xb6, 0xba, 0x10, 0xde, 0x94, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xe7, 0x0b, 0xa7, 0xc0, + 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xb7, 0x2a, 0x5d, 0xeb, + 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0x4f, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xc5, 0x78, 0x5b, + 0xa5, 0xf3, 0x41, 0x3f, 0x1d, 0x39, 0x20, 0x7f, 0x71, 0xe4, 0xbf, 0x9c, 0xfd, 0x44, 0x22, 0x49, + 0x22, 0x05, 0xc9, 0x22, 0x52, 0x57, 0x78, 0xde, 0x28, 0x51, 0x67, 0x02, 0x67, 0xc0, 0x12, 0x2c, + 0x67, 0x57, 0x2c, 0x59, 0x8e, 0xcd, 0xc9, 0x70, 0x3a, 0x3a, 0x7e, 0xbc, 0x59, 0x7d, 0xfe, 0xb9, + 0x96, 0x3d, 0xcd, 0x44, 0xb1, 0x0c, 0x1f, 0xe8, 0x1a, 0xad, 0x06, 0x46, 0xad, 0xaf, 0xac, 0x2f, + 0x63, 0x31, 0x79, 0x21, 0xeb, 0x7b, 0xa7, 0xab, 0xef, 0x85, 0xc6, 0x50, 0xbb, 0x0b, 0x9f, 0x03, + 0x73, 0x4e, 0x33, 0xc1, 0xc7, 0x3b, 0xaa, 0xb6, 0xc3, 0xcd, 0x52, 0x79, 0x26, 0x25, 0xa1, 0x5d, + 0x95, 0xae, 0xa9, 0x3e, 0x51, 0x6d, 0xb2, 0xff, 0x05, 0xd8, 0x5b, 0x49, 0x12, 0x3e, 0x00, 0xc3, + 0x4b, 0xb2, 0x1c, 0x1b, 0x32, 0x07, 0x24, 0x3f, 0xe1, 0x7b, 0xc0, 0x5c, 0xe0, 0xab, 0x6b, 0xa2, + 0x66, 0x6b, 0xa3, 0x7a, 0xf1, 0xf9, 0xd6, 0x13, 0xc3, 0xfb, 0xc5, 0x00, 0x70, 0x7d, 0x96, 0xd0, + 0x05, 0x66, 0x41, 0x70, 0x5c, 0x9b, 0x58, 0x75, 0x50, 0x24, 0x01, 0x54, 0xe3, 0xf0, 0x11, 0xd8, + 0xe1, 0xa4, 0x58, 0xd0, 0x2c, 0x51, 0x9e, 0x56, 0x38, 0xaa, 0x4a, 0x77, 0xe7, 0xac, 0x86, 0x50, + 0xb3, 0x07, 0x8f, 0xc0, 0x48, 0x90, 0x22, 0xa5, 0x19, 0x16, 0x92, 0x3a, 0x54, 0xd4, 0x77, 0xab, + 0xd2, 0x1d, 0x9d, 0x77, 0x30, 0xea, 0x73, 0xbc, 0xdf, 0x0d, 0xb0, 0xb7, 0x52, 0x32, 0x3c, 0x03, + 0xd6, 0x05, 0x2b, 0x5e, 0xb1, 0x4c, 0x1f, 0xe5, 0xd1, 0xf1, 0xa3, 0xbb, 0x3b, 0xf6, 0x75, 0xcd, + 0xee, 0xa6, 0xa5, 0x01, 0x8e, 0x5a, 0x23, 0x6d, 0x2a, 0x87, 0x23, 0x4f, 0xfc, 0x66, 0xa6, 0x92, + 0xbd, 0x62, 0xaa, 0xe4, 0xa8, 0x35, 0xf2, 0xfe, 0x34, 0xc0, 0x6e, 0x93, 0xfb, 0x29, 0x2b, 0x04, + 0x3c, 0x00, 0xdb, 0xea, 0xbc, 0xab, 0x59, 0x84, 0x56, 0x55, 0xba, 0xdb, 0xea, 0x2c, 0x28, 0x14, + 0x7e, 0x03, 0x2c, 0x75, 0x75, 0x23, 0x76, 0x55, 0x4f, 0x26, 0x3c, 0x94, 0xc6, 0xa7, 0x1a, 0x7b, + 0x5b, 0xba, 0xef, 0xaf, 0x3f, 0x4b, 0x7e, 0xb3, 0x8d, 0x5a, 0xb1, 0x0c, 0x93, 0xb3, 0x42, 0xa8, + 0xfe, 0x9a, 0x75, 0x18, 0x19, 0x1e, 0x29, 0x54, 0x0e, 0x01, 0xe7, 0x79, 0x23, 0x53, 0x17, 0xca, + 0xae, 0x87, 0x70, 0xd2, 0xc1, 0xa8, 0xcf, 0xf1, 0x6e, 0xb7, 0xba, 0x21, 0x9c, 0x5d, 0xd1, 0x88, + 0xc0, 0x1f, 0x81, 0x25, 0x5f, 0xb8, 0x18, 0x0b, 0xac, 0xaa, 0x59, 0x7d, 0x21, 0xda, 0x87, 0xca, + 0xcf, 0x2f, 0x13, 0x09, 0x70, 0x5f, 0xb2, 0xbb, 0x4b, 0xfa, 0x1d, 0x11, 0xb8, 0x7b, 0x21, 0x3a, + 0x0c, 0xb5, 0xae, 0xf0, 0x2b, 0x30, 0xd2, 0x4f, 0xd2, 0xf9, 0x32, 0x27, 0x3a, 0x4d, 0x4f, 0x4b, + 0x46, 0x27, 0xdd, 0xd6, 0xdb, 0xd5, 0x25, 0xea, 0xcb, 0xe0, 0xf7, 0xc0, 0x26, 0x3a, 0xf1, 0x66, + 0xb0, 0x1f, 0x6e, 0x76, 0xbf, 0xc2, 0x87, 0x3a, 0x96, 0xdd, 0x20, 0x1c, 0x75, 0x5e, 0xf0, 0x25, + 0x30, 0x65, 0x37, 0xf9, 0x78, 0xa8, 0x4c, 0x3f, 0xda, 0xcc, 0x54, 0x8e, 0x21, 0xdc, 0xd3, 0xc6, + 0xa6, 0x5c, 0x71, 0x54, 0xfb, 0x78, 0x7f, 0x18, 0xe0, 0xe1, 0x4a, 0x8f, 0x9f, 0x53, 0x2e, 0xe0, + 0x0f, 0x6b, 0x7d, 0xf6, 0x37, 0xeb, 0xb3, 0x54, 0xab, 0x2e, 0xb7, 0x07, 0xb4, 0x41, 0x7a, 0x3d, + 0x3e, 0x05, 0x26, 0x15, 0x24, 0x6d, 0x3a, 0xb3, 0xe1, 0xcb, 0xa3, 0xb2, 0xeb, 0xaa, 0xf8, 0x56, + 0x3a, 0xa0, 0xda, 0xc8, 0x3b, 0x04, 0x3b, 0xfa, 0x22, 0xc0, 0xc9, 0xca, 0x61, 0xdf, 0xd5, 0xf4, + 0xde, 0x81, 0xd7, 0x64, 0x79, 0x01, 0xef, 0x27, 0x87, 0xe1, 0xcd, 0xad, 0x33, 0x78, 0x7d, 0xeb, + 0x0c, 0xde, 0xdc, 0x3a, 0x83, 0x9f, 0x2b, 0xc7, 0xb8, 0xa9, 0x1c, 0xe3, 0x75, 0xe5, 0x18, 0x6f, + 0x2a, 0xc7, 0xf8, 0xab, 0x72, 0x8c, 0x5f, 0xff, 0x76, 0x06, 0xaf, 0x0e, 0xee, 0xfa, 0x67, 0xf8, + 0x27, 0x00, 0x00, 0xff, 0xff, 0x76, 0x8e, 0x48, 0x7e, 0x52, 0x08, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { @@ -492,6 +522,20 @@ func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ForNodes) > 0 { + for iNdEx := len(m.ForNodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ForNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.ForZones) > 0 { for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- { { @@ -671,6 +715,34 @@ func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ForNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ForNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ForZone) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -781,6 +853,12 @@ func (m *EndpointHints) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.ForNodes) > 0 { + for _, e := range m.ForNodes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -850,6 +928,17 @@ func (m *EndpointSliceList) Size() (n int) { return n } +func (m *ForNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ForZone) Size() (n int) { if m == nil { return 0 @@ -914,8 +1003,14 @@ func (this *EndpointHints) String() string { repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + "," } repeatedStringForForZones += "}" + repeatedStringForForNodes := "[]ForNode{" + for _, f := range this.ForNodes { + repeatedStringForForNodes += strings.Replace(strings.Replace(f.String(), "ForNode", "ForNode", 1), `&`, ``, 1) + "," + } + repeatedStringForForNodes += "}" s := strings.Join([]string{`&EndpointHints{`, `ForZones:` + repeatedStringForForZones + `,`, + `ForNodes:` + repeatedStringForForNodes + `,`, `}`, }, "") return s @@ -972,6 +1067,16 @@ func (this *EndpointSliceList) String() string { }, "") return s } +func (this *ForNode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ForNode{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *ForZone) String() string { if this == nil { return "nil" @@ -1546,6 +1651,40 @@ func (m *EndpointHints) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForNodes = append(m.ForNodes, ForNode{}) + if err := m.ForNodes[len(m.ForNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2036,6 +2175,88 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { } return nil } +func (m *ForNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ForZone) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/tools/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 55828dd97..907050da1 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -114,6 +114,13 @@ message EndpointHints { // enable topology aware routing. May contain a maximum of 8 entries. // +listType=atomic repeated ForZone forZones = 1; + + // forNodes indicates the node(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. + // This is an Alpha feature and is only used when the PreferSameTrafficDistribution + // feature gate is enabled. + // +listType=atomic + repeated ForNode forNodes = 2; } // EndpointPort represents a Port used by an EndpointSlice @@ -189,6 +196,12 @@ message EndpointSliceList { repeated EndpointSlice items = 2; } +// ForNode provides information about which nodes should consume this endpoint. +message ForNode { + // name represents the name of the node. + optional string name = 1; +} + // ForZone provides information about which zones should consume this endpoint. message ForZone { // name represents the name of the zone. diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/types.go b/tools/vendor/k8s.io/api/discovery/v1beta1/types.go index defd8e2ce..fa9d1eae4 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -161,6 +161,13 @@ type EndpointHints struct { // enable topology aware routing. May contain a maximum of 8 entries. // +listType=atomic ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"` + + // forNodes indicates the node(s) this endpoint should be consumed by when + // using topology aware routing. May contain a maximum of 8 entries. + // This is an Alpha feature and is only used when the PreferSameTrafficDistribution + // feature gate is enabled. + // +listType=atomic + ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"` } // ForZone provides information about which zones should consume this endpoint. @@ -169,6 +176,12 @@ type ForZone struct { Name string `json:"name" protobuf:"bytes,1,name=name"` } +// ForNode provides information about which nodes should consume this endpoint. +type ForNode struct { + // name represents the name of the node. + Name string `json:"name" protobuf:"bytes,1,name=name"` +} + // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index 847d4d58e..72aa0cb9b 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -56,6 +56,7 @@ func (EndpointConditions) SwaggerDoc() map[string]string { var map_EndpointHints = map[string]string{ "": "EndpointHints provides hints describing how an endpoint should be consumed.", "forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.", + "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.", } func (EndpointHints) SwaggerDoc() map[string]string { @@ -96,6 +97,15 @@ func (EndpointSliceList) SwaggerDoc() map[string]string { return map_EndpointSliceList } +var map_ForNode = map[string]string{ + "": "ForNode provides information about which nodes should consume this endpoint.", + "name": "name represents the name of the node.", +} + +func (ForNode) SwaggerDoc() map[string]string { + return map_ForNode +} + var map_ForZone = map[string]string{ "": "ForZone provides information about which zones should consume this endpoint.", "name": "name represents the name of the zone.", diff --git a/tools/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go index 13b9544b0..72490d6ad 100644 --- a/tools/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go @@ -114,6 +114,11 @@ func (in *EndpointHints) DeepCopyInto(out *EndpointHints) { *out = make([]ForZone, len(*in)) copy(*out, *in) } + if in.ForNodes != nil { + in, out := &in.ForNodes, &out.ForNodes + *out = make([]ForNode, len(*in)) + copy(*out, *in) + } return } @@ -236,6 +241,22 @@ func (in *EndpointSliceList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForNode) DeepCopyInto(out *ForNode) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode. +func (in *ForNode) DeepCopy() *ForNode { + if in == nil { + return nil + } + out := new(ForNode) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ForZone) DeepCopyInto(out *ForZone) { *out = *in diff --git a/tools/vendor/k8s.io/api/events/v1/doc.go b/tools/vendor/k8s.io/api/events/v1/doc.go index 5fe700ffc..911639044 100644 --- a/tools/vendor/k8s.io/api/events/v1/doc.go +++ b/tools/vendor/k8s.io/api/events/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=events.k8s.io -package v1 // import "k8s.io/api/events/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/events/v1beta1/doc.go b/tools/vendor/k8s.io/api/events/v1beta1/doc.go index 46048a65b..e4864294f 100644 --- a/tools/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/events/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=events.k8s.io -package v1beta1 // import "k8s.io/api/events/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go b/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go index c9af49d55..7770fab5d 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:prerelease-lifecycle-gen=true -package v1beta1 // import "k8s.io/api/extensions/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/tools/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 818486f39..35b9a4ff2 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -1364,185 +1364,187 @@ func init() { } var fileDescriptor_90a532284de28347 = []byte{ - // 2842 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3, - 0x46, 0x84, 0x4d, 0xd8, 0x9d, 0x61, 0x37, 0xc9, 0x92, 0x0f, 0x29, 0x61, 0xc7, 0xbb, 0xc9, 0x3a, - 0xb1, 0xc7, 0x93, 0x9a, 0x71, 0x82, 0x22, 0x02, 0xb4, 0x7b, 0xca, 0xe3, 0x8e, 0x7b, 0xba, 0x47, - 0xdd, 0x35, 0x66, 0x7d, 0x03, 0xc1, 0x25, 0x27, 0xb8, 0x04, 0x38, 0x22, 0x21, 0x71, 0xe5, 0xca, - 0x21, 0x44, 0x20, 0x82, 0xb4, 0x42, 0x1c, 0x22, 0x71, 0x20, 0x27, 0x8b, 0x38, 0x27, 0xc4, 0x3f, - 0x80, 0xf6, 0x84, 0xea, 0xa3, 0xab, 0xbf, 0xed, 0x1e, 0xe3, 0x58, 0x04, 0x71, 0x5a, 0x4f, 0xbd, - 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0xeb, 0xbb, 0xcf, 0xf9, 0x35, - 0xcb, 0xad, 0x1b, 0x03, 0xab, 0x4e, 0xee, 0x53, 0xe2, 0xf8, 0x96, 0xeb, 0xf8, 0xf5, 0xbd, 0x1b, - 0x5b, 0x84, 0x1a, 0x37, 0xea, 0x3d, 0xe2, 0x10, 0xcf, 0xa0, 0xa4, 0x5b, 0x1b, 0x78, 0x2e, 0x75, - 0xd1, 0x63, 0x42, 0xbd, 0x66, 0x0c, 0xac, 0x5a, 0xa8, 0x5e, 0x93, 0xea, 0x8b, 0xd7, 0x7b, 0x16, - 0xdd, 0x19, 0x6e, 0xd5, 0x4c, 0xb7, 0x5f, 0xef, 0xb9, 0x3d, 0xb7, 0xce, 0xad, 0xb6, 0x86, 0xdb, - 0xfc, 0x17, 0xff, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x7a, 0xa4, 0x73, 0xd3, 0xf5, 0x48, 0x7d, 0x2f, - 0xd5, 0xe3, 0xe2, 0x33, 0xa1, 0x4e, 0xdf, 0x30, 0x77, 0x2c, 0x87, 0x78, 0xfb, 0xf5, 0xc1, 0x6e, - 0x8f, 0x35, 0xf8, 0xf5, 0x3e, 0xa1, 0x46, 0x96, 0x55, 0x3d, 0xcf, 0xca, 0x1b, 0x3a, 0xd4, 0xea, - 0x93, 0x94, 0xc1, 0xad, 0xe3, 0x0c, 0x7c, 0x73, 0x87, 0xf4, 0x8d, 0x94, 0xdd, 0xd3, 0x79, 0x76, - 0x43, 0x6a, 0xd9, 0x75, 0xcb, 0xa1, 0x3e, 0xf5, 0x92, 0x46, 0xfa, 0xfb, 0x25, 0x98, 0xbc, 0x63, - 0x90, 0xbe, 0xeb, 0xb4, 0x09, 0x45, 0xdf, 0x83, 0x2a, 0x1b, 0x46, 0xd7, 0xa0, 0xc6, 0x82, 0xf6, - 0xb8, 0x76, 0x75, 0xea, 0xe6, 0xd7, 0x6b, 0xe1, 0x34, 0x2b, 0xd4, 0xda, 0x60, 0xb7, 0xc7, 0x1a, - 0xfc, 0x1a, 0xd3, 0xae, 0xed, 0xdd, 0xa8, 0x6d, 0x6c, 0xbd, 0x4b, 0x4c, 0xba, 0x4e, 0xa8, 0xd1, - 0x40, 0x0f, 0x0e, 0x96, 0xcf, 0x1d, 0x1e, 0x2c, 0x43, 0xd8, 0x86, 0x15, 0x2a, 0x6a, 0xc2, 0x98, - 0x3f, 0x20, 0xe6, 0x42, 0x89, 0xa3, 0x5f, 0xab, 0x1d, 0xb9, 0x88, 0x35, 0xe5, 0x59, 0x7b, 0x40, - 0xcc, 0xc6, 0x79, 0x89, 0x3c, 0xc6, 0x7e, 0x61, 0x8e, 0x83, 0xde, 0x84, 0x71, 0x9f, 0x1a, 0x74, - 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, 0x1b, - 0x4b, 0x34, 0xfd, 0x1f, 0x25, 0x40, 0x4a, 0x77, 0xc5, 0x75, 0xba, 0x16, 0xb5, 0x5c, 0x07, 0xbd, - 0x00, 0x63, 0x74, 0x7f, 0x40, 0xf8, 0xe4, 0x4c, 0x36, 0x9e, 0x08, 0x1c, 0xea, 0xec, 0x0f, 0xc8, - 0xc3, 0x83, 0xe5, 0xcb, 0x69, 0x0b, 0x26, 0xc1, 0xdc, 0x06, 0xad, 0x29, 0x57, 0x4b, 0xdc, 0xfa, - 0x99, 0x78, 0xd7, 0x0f, 0x0f, 0x96, 0x33, 0x82, 0xb0, 0xa6, 0x90, 0xe2, 0x0e, 0xa2, 0x3d, 0x40, - 0xb6, 0xe1, 0xd3, 0x8e, 0x67, 0x38, 0xbe, 0xe8, 0xc9, 0xea, 0x13, 0x39, 0x09, 0x4f, 0x15, 0x5b, - 0x34, 0x66, 0xd1, 0x58, 0x94, 0x5e, 0xa0, 0xb5, 0x14, 0x1a, 0xce, 0xe8, 0x01, 0x3d, 0x01, 0xe3, - 0x1e, 0x31, 0x7c, 0xd7, 0x59, 0x18, 0xe3, 0xa3, 0x50, 0x13, 0x88, 0x79, 0x2b, 0x96, 0x52, 0xf4, - 0x24, 0x4c, 0xf4, 0x89, 0xef, 0x1b, 0x3d, 0xb2, 0x50, 0xe1, 0x8a, 0xb3, 0x52, 0x71, 0x62, 0x5d, - 0x34, 0xe3, 0x40, 0xae, 0x7f, 0xa0, 0xc1, 0xb4, 0x9a, 0xb9, 0x35, 0xcb, 0xa7, 0xe8, 0xdb, 0xa9, - 0x38, 0xac, 0x15, 0x1b, 0x12, 0xb3, 0xe6, 0x51, 0x78, 0x41, 0xf6, 0x56, 0x0d, 0x5a, 0x22, 0x31, - 0xb8, 0x0e, 0x15, 0x8b, 0x92, 0x3e, 0x5b, 0x87, 0xf2, 0xd5, 0xa9, 0x9b, 0x57, 0x8b, 0x86, 0x4c, - 0x63, 0x5a, 0x82, 0x56, 0x56, 0x99, 0x39, 0x16, 0x28, 0xfa, 0xcf, 0xc6, 0x22, 0xee, 0xb3, 0xd0, - 0x44, 0xef, 0x40, 0xd5, 0x27, 0x36, 0x31, 0xa9, 0xeb, 0x49, 0xf7, 0x9f, 0x2e, 0xe8, 0xbe, 0xb1, - 0x45, 0xec, 0xb6, 0x34, 0x6d, 0x9c, 0x67, 0xfe, 0x07, 0xbf, 0xb0, 0x82, 0x44, 0x6f, 0x40, 0x95, - 0x92, 0xfe, 0xc0, 0x36, 0x28, 0x91, 0xfb, 0xe8, 0xcb, 0xd1, 0x21, 0xb0, 0xc8, 0x61, 0x60, 0x2d, - 0xb7, 0xdb, 0x91, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0x7b, 0x30, 0x33, - 0x1c, 0x74, 0x99, 0x26, 0x65, 0xd9, 0xa1, 0xb7, 0x2f, 0x23, 0xe9, 0x56, 0xd1, 0xb9, 0xd9, 0x8c, - 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, 0x7d, 0xcb, - 0xc1, 0xc4, 0xe8, 0xee, 0xb7, 0x89, 0xe9, 0x3a, 0x5d, 0x9f, 0x87, 0x55, 0xa5, 0x31, 0x2f, 0x01, - 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, 0x6e, 0x96, - 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x49, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, 0x73, 0x1e, - 0xd9, 0xb3, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0xfd, 0x35, 0xab, 0x6f, 0xd1, 0x85, 0x71, - 0xee, 0xd3, 0xc2, 0xe1, 0xc1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0xe7, 0xe3, 0x30, - 0x9b, 0xc8, 0x37, 0xe8, 0x4d, 0xb8, 0x6c, 0x0e, 0x3d, 0x8f, 0x38, 0xb4, 0x39, 0xec, 0x6f, 0x11, - 0xaf, 0x6d, 0xee, 0x90, 0xee, 0xd0, 0x26, 0x5d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, 0xf8, 0xf2, - 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, 0x66, 0x89, - 0x63, 0xaa, 0x59, 0x68, 0xa6, 0x34, 0x70, 0x86, 0x15, 0xf3, 0xb1, 0x4b, 0x7c, 0xcb, 0x23, 0xdd, - 0xa4, 0x8f, 0xe5, 0xb8, 0x8f, 0x77, 0x32, 0xb5, 0x70, 0x8e, 0x35, 0x7a, 0x16, 0xa6, 0x44, 0x6f, - 0x7c, 0xfd, 0xe4, 0x42, 0x5f, 0x92, 0x60, 0x53, 0xcd, 0x50, 0x84, 0xa3, 0x7a, 0x6c, 0x68, 0xee, - 0x96, 0x4f, 0xbc, 0x3d, 0xd2, 0xcd, 0x5f, 0xe0, 0x8d, 0x94, 0x06, 0xce, 0xb0, 0x62, 0x43, 0x13, - 0x11, 0x98, 0x1a, 0xda, 0x78, 0x7c, 0x68, 0x9b, 0x99, 0x5a, 0x38, 0xc7, 0x9a, 0xc5, 0xb1, 0x70, - 0xf9, 0xf6, 0x9e, 0x61, 0xd9, 0xc6, 0x96, 0x4d, 0x16, 0x26, 0xe2, 0x71, 0xdc, 0x8c, 0x8b, 0x71, - 0x52, 0x1f, 0xbd, 0x0a, 0x17, 0x45, 0xd3, 0xa6, 0x63, 0x28, 0x90, 0x2a, 0x07, 0x79, 0x54, 0x82, - 0x5c, 0x6c, 0x26, 0x15, 0x70, 0xda, 0x06, 0xbd, 0x00, 0x33, 0xa6, 0x6b, 0xdb, 0x3c, 0x1e, 0x57, - 0xdc, 0xa1, 0x43, 0x17, 0x26, 0x39, 0x0a, 0x62, 0xfb, 0x71, 0x25, 0x26, 0xc1, 0x09, 0x4d, 0x44, - 0x00, 0xcc, 0xa0, 0xe0, 0xf8, 0x0b, 0xc0, 0xf3, 0xe3, 0x8d, 0xa2, 0x39, 0x40, 0x95, 0xaa, 0x90, - 0x03, 0xa8, 0x26, 0x1f, 0x47, 0x80, 0xf5, 0x3f, 0x6b, 0x30, 0x9f, 0x93, 0x3a, 0xd0, 0xcb, 0xb1, - 0x12, 0xfb, 0xb5, 0x44, 0x89, 0xbd, 0x92, 0x63, 0x16, 0xa9, 0xb3, 0x0e, 0x4c, 0x7b, 0x6c, 0x54, - 0x4e, 0x4f, 0xa8, 0xc8, 0x1c, 0xf9, 0xec, 0x31, 0xc3, 0xc0, 0x51, 0x9b, 0x30, 0xe7, 0x5f, 0x3c, - 0x3c, 0x58, 0x9e, 0x8e, 0xc9, 0x70, 0x1c, 0x5e, 0xff, 0x45, 0x09, 0xe0, 0x0e, 0x19, 0xd8, 0xee, - 0x7e, 0x9f, 0x38, 0x67, 0xc1, 0xa1, 0x36, 0x62, 0x1c, 0xea, 0xfa, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, - 0x25, 0x51, 0x6f, 0x25, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, - 0x2a, 0x87, 0x34, 0xea, 0xc5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x9f, 0x1b, - 0x8f, 0x7a, 0x17, 0x66, 0x18, 0xcb, 0x11, 0x6b, 0xc9, 0x39, 0xd4, 0xf8, 0xc8, 0x1c, 0x4a, 0x55, - 0xbb, 0xb5, 0x18, 0x12, 0x4e, 0x20, 0xe7, 0x70, 0xb6, 0x89, 0x2f, 0x22, 0x67, 0xfb, 0x50, 0x83, - 0x99, 0x70, 0x99, 0xce, 0x80, 0xb4, 0x35, 0xe3, 0xa4, 0xed, 0xc9, 0xc2, 0x21, 0x9a, 0xc3, 0xda, - 0xfe, 0xc5, 0x08, 0xbe, 0x52, 0x62, 0x1b, 0x7c, 0xcb, 0x30, 0x77, 0xd1, 0xe3, 0x30, 0xe6, 0x18, - 0xfd, 0x20, 0x32, 0xd5, 0x66, 0x69, 0x1a, 0x7d, 0x82, 0xb9, 0x04, 0xbd, 0xaf, 0x01, 0x92, 0x55, - 0xe0, 0xb6, 0xe3, 0xb8, 0xd4, 0x10, 0xb9, 0x52, 0xb8, 0xb5, 0x5a, 0xd8, 0xad, 0xa0, 0xc7, 0xda, - 0x66, 0x0a, 0xeb, 0xae, 0x43, 0xbd, 0xfd, 0x70, 0x91, 0xd3, 0x0a, 0x38, 0xc3, 0x01, 0x64, 0x00, - 0x78, 0x12, 0xb3, 0xe3, 0xca, 0x8d, 0x7c, 0xbd, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, - 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x85, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, - 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91, - 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x42, 0xe9, 0x39, 0x4d, 0xff, 0xa0, 0x12, 0x8d, 0x1d, 0xce, 0x98, - 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3, - 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0xcf, 0x97, 0x5b, 0x97, 0x4f, 0x87, 0x5b, 0x7f, 0x17, 0xaa, 0x7e, - 0xc0, 0xaa, 0xc7, 0x38, 0xe4, 0x8d, 0x11, 0xf2, 0xab, 0x24, 0xd4, 0xaa, 0x03, 0x45, 0xa5, 0x15, - 0x68, 0x16, 0x89, 0xae, 0x8c, 0x48, 0xa2, 0x4f, 0x95, 0xf8, 0xb2, 0x7c, 0x33, 0x30, 0x86, 0x3e, - 0xe9, 0xf2, 0xdc, 0x56, 0x0d, 0xf3, 0x4d, 0x8b, 0xb7, 0x62, 0x29, 0x45, 0xef, 0xc4, 0x42, 0xb6, - 0x7a, 0x92, 0x90, 0x9d, 0xc9, 0x0f, 0x57, 0xb4, 0x09, 0xf3, 0x03, 0xcf, 0xed, 0x79, 0xc4, 0xf7, - 0xef, 0x10, 0xa3, 0x6b, 0x5b, 0x0e, 0x09, 0xe6, 0x47, 0x30, 0xa2, 0x2b, 0x87, 0x07, 0xcb, 0xf3, - 0xad, 0x6c, 0x15, 0x9c, 0x67, 0xab, 0x3f, 0x18, 0x83, 0x0b, 0xc9, 0x0a, 0x98, 0x43, 0x52, 0xb5, - 0x13, 0x91, 0xd4, 0x6b, 0x91, 0xcd, 0x20, 0x18, 0xbc, 0x5a, 0xfd, 0x8c, 0x0d, 0x71, 0x1b, 0x66, - 0x65, 0x36, 0x08, 0x84, 0x92, 0xa6, 0xab, 0xd5, 0xdf, 0x8c, 0x8b, 0x71, 0x52, 0x1f, 0xbd, 0x08, - 0xd3, 0x1e, 0xe7, 0xdd, 0x01, 0x80, 0xe0, 0xae, 0x8f, 0x48, 0x80, 0x69, 0x1c, 0x15, 0xe2, 0xb8, - 0x2e, 0xe3, 0xad, 0x21, 0x1d, 0x0d, 0x00, 0xc6, 0xe2, 0xbc, 0xf5, 0x76, 0x52, 0x01, 0xa7, 0x6d, - 0xd0, 0x3a, 0x5c, 0x1a, 0x3a, 0x69, 0x28, 0x11, 0xca, 0x57, 0x24, 0xd4, 0xa5, 0xcd, 0xb4, 0x0a, - 0xce, 0xb2, 0x43, 0xdb, 0x31, 0x2a, 0x3b, 0xce, 0xd3, 0xf3, 0xcd, 0xc2, 0x1b, 0xaf, 0x30, 0x97, - 0xcd, 0xa0, 0xdb, 0xd5, 0xa2, 0x74, 0x5b, 0xff, 0x83, 0x16, 0x2d, 0x42, 0x8a, 0x02, 0x1f, 0x77, - 0xcb, 0x94, 0xb2, 0x88, 0xb0, 0x23, 0x37, 0x9b, 0xfd, 0xde, 0x1a, 0x89, 0xfd, 0x86, 0xc5, 0xf3, - 0x78, 0xfa, 0xfb, 0x47, 0x0d, 0x66, 0xef, 0x75, 0x3a, 0xad, 0x55, 0x87, 0xef, 0x96, 0x96, 0x41, - 0x77, 0x58, 0x15, 0x1d, 0x18, 0x74, 0x27, 0x59, 0x45, 0x99, 0x0c, 0x73, 0x09, 0x7a, 0x06, 0xaa, - 0xec, 0x5f, 0xe6, 0x38, 0x0f, 0xd7, 0x49, 0x9e, 0x64, 0xaa, 0x2d, 0xd9, 0xf6, 0x30, 0xf2, 0x37, - 0x56, 0x9a, 0xe8, 0x5b, 0x30, 0xc1, 0xf6, 0x36, 0x71, 0xba, 0x05, 0xc9, 0xaf, 0x74, 0xaa, 0x21, - 0x8c, 0x42, 0x3e, 0x23, 0x1b, 0x70, 0x00, 0xa7, 0xef, 0xc2, 0x5c, 0x64, 0x10, 0x78, 0x68, 0x93, - 0x37, 0x59, 0xbd, 0x42, 0x6d, 0xa8, 0xb0, 0xde, 0x59, 0x55, 0x2a, 0x17, 0xb8, 0x5e, 0x4c, 0x4c, - 0x44, 0xc8, 0x3d, 0xd8, 0x2f, 0x1f, 0x0b, 0x2c, 0x7d, 0x03, 0x26, 0x56, 0x5b, 0x0d, 0xdb, 0x15, - 0x7c, 0xc3, 0xb4, 0xba, 0x5e, 0x72, 0xa6, 0x56, 0x56, 0xef, 0x60, 0xcc, 0x25, 0x48, 0x87, 0x71, - 0x72, 0xdf, 0x24, 0x03, 0xca, 0x29, 0xc6, 0x64, 0x03, 0x58, 0x22, 0xbd, 0xcb, 0x5b, 0xb0, 0x94, - 0xe8, 0x3f, 0x29, 0xc1, 0x84, 0xec, 0xf6, 0x0c, 0xce, 0x1f, 0x6b, 0xb1, 0xf3, 0xc7, 0x53, 0xc5, - 0x96, 0x20, 0xf7, 0xf0, 0xd1, 0x49, 0x1c, 0x3e, 0xae, 0x15, 0xc4, 0x3b, 0xfa, 0xe4, 0xf1, 0x5e, - 0x09, 0x66, 0xe2, 0x8b, 0x8f, 0x9e, 0x85, 0x29, 0x96, 0x6a, 0x2d, 0x93, 0x34, 0x43, 0x86, 0xa7, - 0xae, 0x1f, 0xda, 0xa1, 0x08, 0x47, 0xf5, 0x50, 0x4f, 0x99, 0xb5, 0x5c, 0x8f, 0xca, 0x41, 0xe7, - 0x4f, 0xe9, 0x90, 0x5a, 0x76, 0x4d, 0x5c, 0xb6, 0xd7, 0x56, 0x1d, 0xba, 0xe1, 0xb5, 0xa9, 0x67, - 0x39, 0xbd, 0x54, 0x47, 0x0c, 0x0c, 0x47, 0x91, 0xd1, 0x5b, 0x2c, 0xed, 0xfb, 0xee, 0xd0, 0x33, - 0x49, 0x16, 0x7d, 0x0b, 0xa8, 0x07, 0xdb, 0x08, 0xdd, 0x35, 0xd7, 0x34, 0x6c, 0xb1, 0x38, 0x98, - 0x6c, 0x13, 0x8f, 0x38, 0x26, 0x09, 0x28, 0x93, 0x80, 0xc0, 0x0a, 0x4c, 0xff, 0xad, 0x06, 0x53, - 0x72, 0x2e, 0xce, 0x80, 0xa8, 0xbf, 0x1e, 0x27, 0xea, 0x4f, 0x14, 0xdc, 0xa1, 0xd9, 0x2c, 0xfd, - 0x77, 0x1a, 0x2c, 0x06, 0xae, 0xbb, 0x46, 0xb7, 0x61, 0xd8, 0x86, 0x63, 0x12, 0x2f, 0x88, 0xf5, - 0x45, 0x28, 0x59, 0x03, 0xb9, 0x92, 0x20, 0x01, 0x4a, 0xab, 0x2d, 0x5c, 0xb2, 0x06, 0xac, 0x8a, - 0xee, 0xb8, 0x3e, 0xe5, 0x6c, 0x5e, 0x1c, 0x14, 0x95, 0xd7, 0xf7, 0x64, 0x3b, 0x56, 0x1a, 0x68, - 0x13, 0x2a, 0x03, 0xd7, 0xa3, 0xac, 0x72, 0x95, 0x13, 0xeb, 0x7b, 0x84, 0xd7, 0x6c, 0xdd, 0x64, - 0x20, 0x86, 0x3b, 0x9d, 0xc1, 0x60, 0x81, 0xa6, 0xff, 0x50, 0x83, 0x47, 0x33, 0xfc, 0x97, 0xa4, - 0xa1, 0x0b, 0x13, 0x96, 0x10, 0xca, 0xf4, 0xf2, 0x7c, 0xb1, 0x6e, 0x33, 0xa6, 0x22, 0x4c, 0x6d, - 0x41, 0x0a, 0x0b, 0xa0, 0xf5, 0x5f, 0x69, 0x70, 0x31, 0xe5, 0x2f, 0x4f, 0xd1, 0x2c, 0x9e, 0x25, - 0xdb, 0x56, 0x29, 0x9a, 0x85, 0x25, 0x97, 0xa0, 0xd7, 0xa1, 0xca, 0xdf, 0x88, 0x4c, 0xd7, 0x96, - 0x13, 0x58, 0x0f, 0x26, 0xb0, 0x25, 0xdb, 0x1f, 0x1e, 0x2c, 0x5f, 0xc9, 0x38, 0x6b, 0x07, 0x62, - 0xac, 0x00, 0xd0, 0x32, 0x54, 0x88, 0xe7, 0xb9, 0x9e, 0x4c, 0xf6, 0x93, 0x6c, 0xa6, 0xee, 0xb2, - 0x06, 0x2c, 0xda, 0xf5, 0x5f, 0x87, 0x41, 0xca, 0xb2, 0x2f, 0xf3, 0x8f, 0x2d, 0x4e, 0x32, 0x31, - 0xb2, 0xa5, 0xc3, 0x5c, 0x82, 0x86, 0x70, 0xc1, 0x4a, 0xa4, 0x6b, 0xb9, 0x3b, 0xeb, 0xc5, 0xa6, - 0x51, 0x99, 0x35, 0x16, 0x24, 0xfc, 0x85, 0xa4, 0x04, 0xa7, 0xba, 0xd0, 0x09, 0xa4, 0xb4, 0xd0, - 0x1b, 0x30, 0xb6, 0x43, 0xe9, 0x20, 0xe3, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, 0xa3, - 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2f, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0x6f, 0xaa, 0xd1, - 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, 0xb4, - 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0xf7, 0xa0, 0x4c, 0xed, - 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, 0xcc, - 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, 0xc3, - 0xcd, 0xc7, 0x7e, 0xf9, 0x58, 0xe0, 0xe8, 0x3f, 0xd2, 0x60, 0x3a, 0x56, 0x2d, 0x90, 0x07, 0xe7, - 0xed, 0xc8, 0xde, 0x91, 0xf3, 0xf0, 0xdc, 0xe8, 0xbb, 0x4e, 0x6e, 0xfa, 0x39, 0xd9, 0xef, 0xf9, - 0xa8, 0x0c, 0xc7, 0xfa, 0xd0, 0x0d, 0x80, 0x70, 0xd8, 0x6c, 0x1f, 0xb0, 0xe0, 0x15, 0x1b, 0x5e, - 0xee, 0x03, 0x16, 0xd3, 0x3e, 0x16, 0xed, 0xe8, 0x26, 0x80, 0x4f, 0x4c, 0x8f, 0xd0, 0x66, 0x98, - 0xb8, 0x54, 0x39, 0x6e, 0x2b, 0x09, 0x8e, 0x68, 0xe9, 0x7f, 0xd2, 0x60, 0xba, 0x49, 0xe8, 0xf7, - 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, 0xf9, - 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, 0x3f, 0xd4, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, - 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, - 0x19, 0xbd, 0xa3, 0x61, 0x12, 0xe2, 0x85, 0x35, 0xa7, 0xe3, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, - 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x73, 0x1a, 0x01, 0x86, 0xb1, 0x6d, 0xcf, 0xed, 0x9f, 0x78, 0x0c, - 0x6a, 0x21, 0x5e, 0xf1, 0xdc, 0x3e, 0xe6, 0x58, 0xfa, 0x47, 0x1a, 0x5c, 0x8c, 0x69, 0x9e, 0x01, - 0x6f, 0x78, 0x23, 0xce, 0x1b, 0xae, 0x8d, 0x32, 0x90, 0x1c, 0xf6, 0xf0, 0x51, 0x29, 0x31, 0x0c, - 0x36, 0x60, 0xb4, 0x0d, 0x53, 0x03, 0xb7, 0xdb, 0x3e, 0x85, 0x07, 0xda, 0x59, 0xc6, 0xe7, 0x5a, - 0x21, 0x16, 0x8e, 0x02, 0xa3, 0xfb, 0x70, 0x91, 0x51, 0x0b, 0x7f, 0x60, 0x98, 0xa4, 0x7d, 0x0a, - 0x57, 0x56, 0x8f, 0xf0, 0x17, 0xa0, 0x24, 0x22, 0x4e, 0x77, 0x82, 0xd6, 0x61, 0xc2, 0x1a, 0xf0, - 0xf3, 0x85, 0x24, 0x92, 0xc7, 0x92, 0x30, 0x71, 0x1a, 0x11, 0x29, 0x5e, 0xfe, 0xc0, 0x01, 0x86, - 0xfe, 0xd7, 0x64, 0x34, 0x70, 0xba, 0xfa, 0x6a, 0x84, 0x1e, 0xc8, 0xb7, 0x9a, 0x93, 0x51, 0x83, - 0xa6, 0x64, 0x22, 0x27, 0x65, 0xd6, 0xd5, 0x04, 0x6f, 0xf9, 0x0a, 0x4c, 0x10, 0xa7, 0xcb, 0xc9, - 0xba, 0xb8, 0x08, 0xe1, 0xa3, 0xba, 0x2b, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x5c, 0x4e, 0x8c, 0x8a, - 0x97, 0xd9, 0x77, 0x4f, 0x2d, 0x38, 0x14, 0xe1, 0xcf, 0x0d, 0x90, 0xad, 0x90, 0xfe, 0x89, 0x98, - 0xff, 0xc6, 0x28, 0x31, 0x1f, 0xad, 0x7f, 0xb9, 0xe4, 0x0f, 0x7d, 0x07, 0xc6, 0x89, 0xe8, 0x42, - 0x54, 0xd5, 0x5b, 0xa3, 0x74, 0x11, 0xa6, 0xdf, 0xf0, 0x9c, 0x25, 0xdb, 0x24, 0x2a, 0x7a, 0x99, - 0xcd, 0x17, 0xd3, 0x65, 0xc7, 0x12, 0xc1, 0x9e, 0x27, 0x1b, 0x8f, 0x89, 0x61, 0xab, 0xe6, 0x87, - 0x07, 0xcb, 0x10, 0xfe, 0xc4, 0x51, 0x0b, 0xfe, 0x7a, 0x26, 0xef, 0x6c, 0xce, 0xe6, 0x0b, 0xa4, - 0xd1, 0x5e, 0xcf, 0x42, 0xd7, 0x4e, 0xed, 0xf5, 0x2c, 0x02, 0x79, 0xf4, 0x19, 0xf6, 0x9f, 0x25, - 0xb8, 0x14, 0x2a, 0x17, 0x7e, 0x3d, 0xcb, 0x30, 0xf9, 0xff, 0x57, 0x48, 0xc5, 0x5e, 0xb4, 0xc2, - 0xa9, 0xfb, 0xef, 0x7b, 0xd1, 0x0a, 0x7d, 0xcb, 0xa9, 0x76, 0xbf, 0x29, 0x45, 0x07, 0x30, 0xe2, - 0xb3, 0xca, 0x29, 0x7c, 0x88, 0xf3, 0x85, 0x7b, 0x99, 0xd1, 0xff, 0x52, 0x86, 0x0b, 0xc9, 0xdd, - 0x18, 0xbb, 0x7d, 0xd7, 0x8e, 0xbd, 0x7d, 0x6f, 0xc1, 0xdc, 0xf6, 0xd0, 0xb6, 0xf7, 0xf9, 0x18, - 0x22, 0x57, 0xf0, 0xe2, 0xde, 0xfe, 0x4b, 0xd2, 0x72, 0xee, 0x95, 0x0c, 0x1d, 0x9c, 0x69, 0x99, - 0xbe, 0x8c, 0x1f, 0xfb, 0x4f, 0x2f, 0xe3, 0x2b, 0x27, 0xb8, 0x8c, 0xcf, 0x7e, 0xcf, 0x28, 0x9f, - 0xe8, 0x3d, 0xe3, 0x24, 0x37, 0xf1, 0x19, 0x49, 0xec, 0xd8, 0xaf, 0x4a, 0x5e, 0x82, 0x99, 0xf8, - 0xeb, 0x90, 0x58, 0x4b, 0xf1, 0x40, 0x25, 0xdf, 0x62, 0x22, 0x6b, 0x29, 0xda, 0xb1, 0xd2, 0xd0, - 0x0f, 0x35, 0xb8, 0x9c, 0xfd, 0x15, 0x08, 0xb2, 0x61, 0xa6, 0x6f, 0xdc, 0x8f, 0x7e, 0x99, 0xa3, - 0x9d, 0x90, 0xad, 0xf0, 0x67, 0x81, 0xf5, 0x18, 0x16, 0x4e, 0x60, 0xa3, 0xb7, 0xa1, 0xda, 0x37, - 0xee, 0xb7, 0x87, 0x5e, 0x8f, 0x9c, 0x98, 0x15, 0xf1, 0x6d, 0xb4, 0x2e, 0x51, 0xb0, 0xc2, 0xd3, - 0x3f, 0xd3, 0x60, 0x3e, 0xe7, 0xb2, 0xff, 0x7f, 0x68, 0x94, 0xef, 0x95, 0xa0, 0xd2, 0x36, 0x0d, - 0x9b, 0x9c, 0x01, 0xa1, 0x78, 0x2d, 0x46, 0x28, 0x8e, 0xfb, 0x9a, 0x94, 0x7b, 0x95, 0xcb, 0x25, - 0x70, 0x82, 0x4b, 0x3c, 0x55, 0x08, 0xed, 0x68, 0x1a, 0xf1, 0x3c, 0x4c, 0xaa, 0x4e, 0x47, 0xcb, - 0x6e, 0xfa, 0x2f, 0x4b, 0x30, 0x15, 0xe9, 0x62, 0xc4, 0xdc, 0xb8, 0x1d, 0x2b, 0x08, 0xe5, 0x02, - 0x37, 0x2d, 0x91, 0xbe, 0x6a, 0x41, 0x09, 0x10, 0x5f, 0x43, 0x84, 0xef, 0xdf, 0xe9, 0xca, 0xf0, - 0x12, 0xcc, 0x50, 0xc3, 0xeb, 0x11, 0xaa, 0x68, 0xbb, 0xb8, 0x64, 0x54, 0x9f, 0xe5, 0x74, 0x62, - 0x52, 0x9c, 0xd0, 0x5e, 0x7c, 0x11, 0xa6, 0x63, 0x9d, 0x8d, 0xf2, 0x31, 0x43, 0x63, 0xe5, 0xc1, - 0xa7, 0x4b, 0xe7, 0x3e, 0xfe, 0x74, 0xe9, 0xdc, 0x27, 0x9f, 0x2e, 0x9d, 0xfb, 0xc1, 0xe1, 0x92, - 0xf6, 0xe0, 0x70, 0x49, 0xfb, 0xf8, 0x70, 0x49, 0xfb, 0xe4, 0x70, 0x49, 0xfb, 0xfb, 0xe1, 0x92, - 0xf6, 0xd3, 0xcf, 0x96, 0xce, 0xbd, 0xfd, 0xd8, 0x91, 0xff, 0xb7, 0xe1, 0xdf, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x5f, 0xd8, 0x14, 0x50, 0xfb, 0x30, 0x00, 0x00, + // 2875 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, + 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x77, 0x86, 0xdd, 0x24, 0x4b, 0x7e, 0x48, 0x09, 0x3b, 0xde, 0x4d, + 0xd6, 0x89, 0x7f, 0x4c, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, + 0x1e, 0x75, 0xd7, 0x98, 0xf5, 0x0d, 0x04, 0x97, 0x9c, 0x40, 0x42, 0x21, 0x1c, 0x91, 0x90, 0xb8, + 0x72, 0xe5, 0x10, 0x22, 0x10, 0x41, 0x8a, 0x38, 0x45, 0xe2, 0x40, 0x4e, 0x16, 0x71, 0x4e, 0x88, + 0x7f, 0x00, 0xed, 0x09, 0xd5, 0x8f, 0xae, 0xfe, 0x6d, 0xf7, 0x0c, 0x5e, 0x8b, 0x20, 0x4e, 0xeb, + 0xa9, 0xf7, 0xde, 0xa7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0x53, 0x55, 0xbd, 0x70, 0x7d, 0xef, 0x39, + 0xbf, 0x66, 0xb9, 0x75, 0xa3, 0x6f, 0xd5, 0xc9, 0x7d, 0x4a, 0x1c, 0xdf, 0x72, 0x1d, 0xbf, 0xbe, + 0x7f, 0x63, 0x9b, 0x50, 0xe3, 0x46, 0xbd, 0x4b, 0x1c, 0xe2, 0x19, 0x94, 0x74, 0x6a, 0x7d, 0xcf, + 0xa5, 0x2e, 0x7a, 0x4c, 0xa8, 0xd7, 0x8c, 0xbe, 0x55, 0x0b, 0xd5, 0x6b, 0x52, 0x7d, 0xf1, 0x7a, + 0xd7, 0xa2, 0xbb, 0x83, 0xed, 0x9a, 0xe9, 0xf6, 0xea, 0x5d, 0xb7, 0xeb, 0xd6, 0xb9, 0xd5, 0xf6, + 0x60, 0x87, 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x02, 0x6d, 0x51, 0x8f, 0x74, 0x6e, 0xba, 0x1e, 0xa9, + 0xef, 0xa7, 0x7a, 0x5c, 0x7c, 0x26, 0xd4, 0xe9, 0x19, 0xe6, 0xae, 0xe5, 0x10, 0xef, 0xa0, 0xde, + 0xdf, 0xeb, 0xb2, 0x06, 0xbf, 0xde, 0x23, 0xd4, 0xc8, 0xb2, 0xaa, 0xe7, 0x59, 0x79, 0x03, 0x87, + 0x5a, 0x3d, 0x92, 0x32, 0xb8, 0x75, 0x92, 0x81, 0x6f, 0xee, 0x92, 0x9e, 0x91, 0xb2, 0x7b, 0x3a, + 0xcf, 0x6e, 0x40, 0x2d, 0xbb, 0x6e, 0x39, 0xd4, 0xa7, 0x5e, 0xd2, 0x48, 0x7f, 0xbf, 0x04, 0x93, + 0x77, 0x0c, 0xd2, 0x73, 0x9d, 0x16, 0xa1, 0xe8, 0x7b, 0x50, 0x65, 0xc3, 0xe8, 0x18, 0xd4, 0x58, + 0xd0, 0x1e, 0xd7, 0xae, 0x4e, 0xdd, 0xfc, 0x7a, 0x2d, 0x9c, 0x66, 0x85, 0x5a, 0xeb, 0xef, 0x75, + 0x59, 0x83, 0x5f, 0x63, 0xda, 0xb5, 0xfd, 0x1b, 0xb5, 0xcd, 0xed, 0x77, 0x89, 0x49, 0xd7, 0x09, + 0x35, 0x1a, 0xe8, 0x93, 0xc3, 0xe5, 0x73, 0x47, 0x87, 0xcb, 0x10, 0xb6, 0x61, 0x85, 0x8a, 0x36, + 0x60, 0xcc, 0xef, 0x13, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5d, 0xc4, 0x9a, 0xf2, 0xac, + 0xd5, 0x27, 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc2, 0xb8, 0x4f, + 0x0d, 0x3a, 0xf0, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8, + 0xf8, 0x8d, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x1d, 0x8b, 0x5a, 0xae, + 0x83, 0x5e, 0x80, 0x31, 0x7a, 0xd0, 0x27, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0xb5, 0x0f, + 0xfa, 0xe4, 0xc1, 0xe1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, + 0x6e, 0xfd, 0x4c, 0xbc, 0xeb, 0x07, 0x87, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, + 0x3e, 0x20, 0xdb, 0xf0, 0x69, 0xdb, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x88, 0x9c, 0x84, 0xa7, + 0x8a, 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e, + 0x80, 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, + 0x29, 0x7a, 0x12, 0x26, 0x7a, 0xc4, 0xf7, 0x8d, 0x2e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, + 0xb1, 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, + 0xed, 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, + 0x91, 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x8f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0xcd, 0xab, 0x45, + 0x43, 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xe7, 0x63, 0x11, 0xf7, + 0x59, 0x68, 0xa2, 0x77, 0xa0, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x17, 0x74, + 0xdf, 0xd8, 0x26, 0x76, 0x4b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37, + 0xa0, 0x4a, 0x49, 0xaf, 0x6f, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xff, 0xd1, 0x21, 0xb0, 0xc8, 0x61, + 0x60, 0x4d, 0xb7, 0xd3, 0x96, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0xfb, + 0x30, 0x33, 0xe8, 0x77, 0x98, 0x26, 0x65, 0xd9, 0xa1, 0x7b, 0x20, 0x23, 0xe9, 0x56, 0xd1, 0xb9, + 0xd9, 0x8a, 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, + 0x3d, 0xcb, 0xc1, 0xc4, 0xe8, 0x1c, 0xb4, 0x88, 0xe9, 0x3a, 0x1d, 0x9f, 0x87, 0x55, 0xa5, 0x31, + 0x2f, 0x01, 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, + 0x6e, 0x96, 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x4e, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, + 0x73, 0x1e, 0xd9, 0xb7, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0x83, 0x35, 0xab, 0x67, 0xd1, + 0x85, 0x71, 0xee, 0xd3, 0xc2, 0xd1, 0xe1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0x83, + 0x71, 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x26, 0x5c, 0x36, 0x07, 0x9e, 0x47, 0x1c, 0xba, 0x31, 0xe8, + 0x6d, 0x13, 0xaf, 0x65, 0xee, 0x92, 0xce, 0xc0, 0x26, 0x1d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, + 0xf8, 0xf2, 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, + 0x66, 0x89, 0x63, 0xaa, 0x59, 0xd8, 0x48, 0x69, 0xe0, 0x0c, 0x2b, 0xe6, 0x63, 0x87, 0xf8, 0x96, + 0x47, 0x3a, 0x49, 0x1f, 0xcb, 0x71, 0x1f, 0xef, 0x64, 0x6a, 0xe1, 0x1c, 0x6b, 0xf4, 0x2c, 0x4c, + 0x89, 0xde, 0xf8, 0xfa, 0xc9, 0x85, 0xbe, 0x24, 0xc1, 0xa6, 0x36, 0x42, 0x11, 0x8e, 0xea, 0xb1, + 0xa1, 0xb9, 0xdb, 0x3e, 0xf1, 0xf6, 0x49, 0x27, 0x7f, 0x81, 0x37, 0x53, 0x1a, 0x38, 0xc3, 0x8a, + 0x0d, 0x4d, 0x44, 0x60, 0x6a, 0x68, 0xe3, 0xf1, 0xa1, 0x6d, 0x65, 0x6a, 0xe1, 0x1c, 0x6b, 0x16, + 0xc7, 0xc2, 0xe5, 0xdb, 0xfb, 0x86, 0x65, 0x1b, 0xdb, 0x36, 0x59, 0x98, 0x88, 0xc7, 0xf1, 0x46, + 0x5c, 0x8c, 0x93, 0xfa, 0xe8, 0x55, 0xb8, 0x28, 0x9a, 0xb6, 0x1c, 0x43, 0x81, 0x54, 0x39, 0xc8, + 0xa3, 0x12, 0xe4, 0xe2, 0x46, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x0b, 0x30, 0x63, 0xba, 0xb6, 0xcd, + 0xe3, 0x71, 0xc5, 0x1d, 0x38, 0x74, 0x61, 0x92, 0xa3, 0x20, 0xb6, 0x1f, 0x57, 0x62, 0x12, 0x9c, + 0xd0, 0x44, 0x04, 0xc0, 0x0c, 0x0a, 0x8e, 0xbf, 0x00, 0x3c, 0x3f, 0xde, 0x28, 0x9a, 0x03, 0x54, + 0xa9, 0x0a, 0x39, 0x80, 0x6a, 0xf2, 0x71, 0x04, 0x58, 0xff, 0xb3, 0x06, 0xf3, 0x39, 0xa9, 0x03, + 0xbd, 0x1c, 0x2b, 0xb1, 0x5f, 0x4b, 0x94, 0xd8, 0x2b, 0x39, 0x66, 0x91, 0x3a, 0xeb, 0xc0, 0xb4, + 0xc7, 0x46, 0xe5, 0x74, 0x85, 0x8a, 0xcc, 0x91, 0xcf, 0x9e, 0x30, 0x0c, 0x1c, 0xb5, 0x09, 0x73, + 0xfe, 0xc5, 0xa3, 0xc3, 0xe5, 0xe9, 0x98, 0x0c, 0xc7, 0xe1, 0xf5, 0x5f, 0x94, 0x00, 0xee, 0x90, + 0xbe, 0xed, 0x1e, 0xf4, 0x88, 0x73, 0x16, 0x1c, 0x6a, 0x33, 0xc6, 0xa1, 0xae, 0x9f, 0xb4, 0x3c, + 0xca, 0xb5, 0x5c, 0x12, 0xf5, 0x56, 0x82, 0x44, 0xd5, 0x8b, 0x43, 0x1e, 0xcf, 0xa2, 0xfe, 0x5a, + 0x86, 0x4b, 0xa1, 0x72, 0x48, 0xa3, 0x5e, 0x8c, 0xad, 0xf1, 0x57, 0x13, 0x6b, 0x3c, 0x9f, 0x61, + 0xf2, 0xd0, 0x78, 0xd4, 0xbb, 0x30, 0xc3, 0x58, 0x8e, 0x58, 0x4b, 0xce, 0xa1, 0xc6, 0x87, 0xe6, + 0x50, 0xaa, 0xda, 0xad, 0xc5, 0x90, 0x70, 0x02, 0x39, 0x87, 0xb3, 0x4d, 0x7c, 0x19, 0x39, 0xdb, + 0x47, 0x1a, 0xcc, 0x84, 0xcb, 0x74, 0x06, 0xa4, 0x6d, 0x23, 0x4e, 0xda, 0x9e, 0x2c, 0x1c, 0xa2, + 0x39, 0xac, 0xed, 0x9f, 0x8c, 0xe0, 0x2b, 0x25, 0xb6, 0xc1, 0xb7, 0x0d, 0x73, 0x0f, 0x3d, 0x0e, + 0x63, 0x8e, 0xd1, 0x0b, 0x22, 0x53, 0x6d, 0x96, 0x0d, 0xa3, 0x47, 0x30, 0x97, 0xa0, 0xf7, 0x35, + 0x40, 0xb2, 0x0a, 0xdc, 0x76, 0x1c, 0x97, 0x1a, 0x22, 0x57, 0x0a, 0xb7, 0x56, 0x0b, 0xbb, 0x15, + 0xf4, 0x58, 0xdb, 0x4a, 0x61, 0xdd, 0x75, 0xa8, 0x77, 0x10, 0x2e, 0x72, 0x5a, 0x01, 0x67, 0x38, + 0x80, 0x0c, 0x00, 0x4f, 0x62, 0xb6, 0x5d, 0xb9, 0x91, 0xaf, 0x17, 0xc8, 0x79, 0xcc, 0x60, 0xc5, + 0x75, 0x76, 0xac, 0x6e, 0x98, 0x76, 0xb0, 0x02, 0xc2, 0x11, 0xd0, 0xc5, 0xbb, 0x30, 0x9f, 0xe3, + 0x2d, 0xba, 0x00, 0xe5, 0x3d, 0x72, 0x20, 0xa6, 0x0d, 0xb3, 0x3f, 0xd1, 0x1c, 0x54, 0xf6, 0x0d, + 0x7b, 0x20, 0xd2, 0xef, 0x24, 0x16, 0x3f, 0x5e, 0x28, 0x3d, 0xa7, 0xe9, 0x1f, 0x56, 0xa2, 0xb1, + 0xc3, 0x19, 0xf3, 0x55, 0xa8, 0x7a, 0xa4, 0x6f, 0x5b, 0xa6, 0xe1, 0x4b, 0x22, 0xc4, 0xc9, 0x2f, + 0x96, 0x6d, 0x58, 0x49, 0x63, 0xdc, 0xba, 0xf4, 0x70, 0xb9, 0x75, 0xf9, 0x74, 0xb8, 0xf5, 0x77, + 0xa1, 0xea, 0x07, 0xac, 0x7a, 0x8c, 0x43, 0xde, 0x18, 0x22, 0xbf, 0x4a, 0x42, 0xad, 0x3a, 0x50, + 0x54, 0x5a, 0x81, 0x66, 0x91, 0xe8, 0xca, 0x90, 0x24, 0xfa, 0x54, 0x89, 0x2f, 0xcb, 0x37, 0x7d, + 0x63, 0xe0, 0x93, 0x0e, 0xcf, 0x6d, 0xd5, 0x30, 0xdf, 0x34, 0x79, 0x2b, 0x96, 0x52, 0xf4, 0x4e, + 0x2c, 0x64, 0xab, 0xa3, 0x84, 0xec, 0x4c, 0x7e, 0xb8, 0xa2, 0x2d, 0x98, 0xef, 0x7b, 0x6e, 0xd7, + 0x23, 0xbe, 0x7f, 0x87, 0x18, 0x1d, 0xdb, 0x72, 0x48, 0x30, 0x3f, 0x82, 0x11, 0x5d, 0x39, 0x3a, + 0x5c, 0x9e, 0x6f, 0x66, 0xab, 0xe0, 0x3c, 0x5b, 0xfd, 0x67, 0x15, 0xb8, 0x90, 0xac, 0x80, 0x39, + 0x24, 0x55, 0x1b, 0x89, 0xa4, 0x5e, 0x8b, 0x6c, 0x06, 0xc1, 0xe0, 0xd5, 0xea, 0x67, 0x6c, 0x88, + 0xdb, 0x30, 0x2b, 0xb3, 0x41, 0x20, 0x94, 0x34, 0x5d, 0xad, 0xfe, 0x56, 0x5c, 0x8c, 0x93, 0xfa, + 0xe8, 0x45, 0x98, 0xf6, 0x38, 0xef, 0x0e, 0x00, 0x04, 0x77, 0x7d, 0x44, 0x02, 0x4c, 0xe3, 0xa8, + 0x10, 0xc7, 0x75, 0x19, 0x6f, 0x0d, 0xe9, 0x68, 0x00, 0x30, 0x16, 0xe7, 0xad, 0xb7, 0x93, 0x0a, + 0x38, 0x6d, 0x83, 0xd6, 0xe1, 0xd2, 0xc0, 0x49, 0x43, 0x89, 0x50, 0xbe, 0x22, 0xa1, 0x2e, 0x6d, + 0xa5, 0x55, 0x70, 0x96, 0x1d, 0x5a, 0x85, 0x4b, 0x94, 0x78, 0x3d, 0xcb, 0x31, 0xa8, 0xe5, 0x74, + 0x15, 0x9c, 0x58, 0xf9, 0x79, 0x06, 0xd5, 0x4e, 0x8b, 0x71, 0x96, 0x0d, 0xda, 0x89, 0xb1, 0xe2, + 0x71, 0x9e, 0xe9, 0x6f, 0x16, 0xde, 0xc3, 0x85, 0x69, 0x71, 0x06, 0x73, 0xaf, 0x16, 0x65, 0xee, + 0xfa, 0x1f, 0xb4, 0x68, 0x3d, 0x53, 0x6c, 0xfa, 0xa4, 0x0b, 0xab, 0x94, 0x45, 0x84, 0x68, 0xb9, + 0xd9, 0x44, 0xfa, 0xd6, 0x50, 0x44, 0x3a, 0xac, 0xc3, 0x27, 0x33, 0xe9, 0x3f, 0x6a, 0x30, 0x7b, + 0xaf, 0xdd, 0x6e, 0xae, 0x3a, 0x7c, 0xe3, 0x35, 0x0d, 0xba, 0xcb, 0x0a, 0x72, 0xdf, 0xa0, 0xbb, + 0xc9, 0x82, 0xcc, 0x64, 0x98, 0x4b, 0xd0, 0x33, 0x50, 0x65, 0xff, 0x32, 0xc7, 0x79, 0xe4, 0x4f, + 0xf2, 0x7c, 0x55, 0x6d, 0xca, 0xb6, 0x07, 0x91, 0xbf, 0xb1, 0xd2, 0x44, 0xdf, 0x82, 0x09, 0x96, + 0x26, 0x88, 0xd3, 0x29, 0xc8, 0xa3, 0xa5, 0x53, 0x0d, 0x61, 0x14, 0x52, 0x23, 0xd9, 0x80, 0x03, + 0x38, 0x7d, 0x0f, 0xe6, 0x22, 0x83, 0xc0, 0x03, 0x9b, 0xbc, 0xc9, 0x4a, 0x1f, 0x6a, 0x41, 0x85, + 0xf5, 0xce, 0x0a, 0x5c, 0xb9, 0xc0, 0x4d, 0x65, 0x62, 0x22, 0x42, 0x1a, 0xc3, 0x7e, 0xf9, 0x58, + 0x60, 0xe9, 0x9b, 0x30, 0xb1, 0xda, 0x6c, 0xd8, 0xae, 0xa0, 0x2e, 0xa6, 0xd5, 0xf1, 0x92, 0x33, + 0xb5, 0xb2, 0x7a, 0x07, 0x63, 0x2e, 0x41, 0x3a, 0x8c, 0x93, 0xfb, 0x26, 0xe9, 0x53, 0xce, 0x56, + 0x26, 0x1b, 0xc0, 0x72, 0xf2, 0x5d, 0xde, 0x82, 0xa5, 0x44, 0xff, 0x49, 0x09, 0x26, 0x64, 0xb7, + 0x67, 0x70, 0x94, 0x59, 0x8b, 0x1d, 0x65, 0x9e, 0x2a, 0xb6, 0x04, 0xb9, 0xe7, 0x98, 0x76, 0xe2, + 0x1c, 0x73, 0xad, 0x20, 0xde, 0xf1, 0x87, 0x98, 0xf7, 0x4a, 0x30, 0x13, 0x5f, 0x7c, 0xf4, 0x2c, + 0x4c, 0xb1, 0xac, 0x6d, 0x99, 0x64, 0x23, 0x24, 0x8b, 0xea, 0x26, 0xa3, 0x15, 0x8a, 0x70, 0x54, + 0x0f, 0x75, 0x95, 0x59, 0xd3, 0xf5, 0xa8, 0x1c, 0x74, 0xfe, 0x94, 0x0e, 0xa8, 0x65, 0xd7, 0xc4, + 0xbd, 0x7d, 0x6d, 0xd5, 0xa1, 0x9b, 0x5e, 0x8b, 0x7a, 0x96, 0xd3, 0x4d, 0x75, 0xc4, 0xc0, 0x70, + 0x14, 0x19, 0xbd, 0xc5, 0x2a, 0x88, 0xef, 0x0e, 0x3c, 0x93, 0x64, 0x31, 0xc1, 0x80, 0xc5, 0xb0, + 0x8d, 0xd0, 0x59, 0x73, 0x4d, 0xc3, 0x16, 0x8b, 0x83, 0xc9, 0x0e, 0xf1, 0x88, 0x63, 0x92, 0x80, + 0x7d, 0x09, 0x08, 0xac, 0xc0, 0xf4, 0xdf, 0x6a, 0x30, 0x25, 0xe7, 0xe2, 0x0c, 0x38, 0xff, 0xeb, + 0x71, 0xce, 0xff, 0x44, 0xc1, 0x1d, 0x9a, 0x4d, 0xf8, 0x7f, 0xa7, 0xc1, 0x62, 0xe0, 0xba, 0x6b, + 0x74, 0x1a, 0x86, 0x6d, 0x38, 0x26, 0xf1, 0x82, 0x58, 0x5f, 0x84, 0x92, 0xd5, 0x97, 0x2b, 0x09, + 0x12, 0xa0, 0xb4, 0xda, 0xc4, 0x25, 0xab, 0xcf, 0x0a, 0xf2, 0xae, 0xeb, 0x53, 0x7e, 0x30, 0x10, + 0x67, 0x4e, 0xe5, 0xf5, 0x3d, 0xd9, 0x8e, 0x95, 0x06, 0xda, 0x82, 0x4a, 0xdf, 0xf5, 0x28, 0x2b, + 0x82, 0xe5, 0xc4, 0xfa, 0x1e, 0xe3, 0x35, 0x5b, 0x37, 0x19, 0x88, 0xe1, 0x4e, 0x67, 0x30, 0x58, + 0xa0, 0xe9, 0x3f, 0xd4, 0xe0, 0xd1, 0x0c, 0xff, 0x25, 0xff, 0xe8, 0xc0, 0x84, 0x25, 0x84, 0x32, + 0xbd, 0x3c, 0x5f, 0xac, 0xdb, 0x8c, 0xa9, 0x08, 0x53, 0x5b, 0x90, 0xc2, 0x02, 0x68, 0xfd, 0x57, + 0x1a, 0x5c, 0x4c, 0xf9, 0xcb, 0x53, 0x34, 0x8b, 0x67, 0x49, 0xdc, 0x55, 0x8a, 0x66, 0x61, 0xc9, + 0x25, 0xe8, 0x75, 0xa8, 0xf2, 0xe7, 0x26, 0xd3, 0xb5, 0xe5, 0x04, 0xd6, 0x83, 0x09, 0x6c, 0xca, + 0xf6, 0x07, 0x87, 0xcb, 0x57, 0x32, 0x8e, 0xed, 0x81, 0x18, 0x2b, 0x00, 0xb4, 0x0c, 0x15, 0xe2, + 0x79, 0xae, 0x27, 0x93, 0xfd, 0x24, 0x9b, 0xa9, 0xbb, 0xac, 0x01, 0x8b, 0x76, 0xfd, 0xd7, 0x61, + 0x90, 0xb2, 0xec, 0xcb, 0xfc, 0x63, 0x8b, 0x93, 0x4c, 0x8c, 0x6c, 0xe9, 0x30, 0x97, 0xa0, 0x01, + 0x5c, 0xb0, 0x12, 0xe9, 0x5a, 0xee, 0xce, 0x7a, 0xb1, 0x69, 0x54, 0x66, 0x8d, 0x05, 0x09, 0x7f, + 0x21, 0x29, 0xc1, 0xa9, 0x2e, 0x74, 0x02, 0x29, 0x2d, 0xf4, 0x06, 0x8c, 0xed, 0x52, 0xda, 0xcf, + 0x78, 0x37, 0x38, 0xa1, 0x48, 0x84, 0x2e, 0x54, 0xf9, 0xe8, 0xda, 0xed, 0x26, 0xe6, 0x50, 0xfa, + 0xef, 0x4b, 0x6a, 0x3e, 0xf8, 0x61, 0xeb, 0x9b, 0x6a, 0xb4, 0x2b, 0xb6, 0xe1, 0xfb, 0x3c, 0x85, + 0x89, 0x8b, 0x81, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, 0xa8, 0x1d, 0x16, 0x4f, 0x6d, 0x94, + 0xe2, 0x39, 0x95, 0x55, 0x38, 0xd1, 0x3d, 0x28, 0x53, 0xbb, 0xe8, 0x01, 0x5f, 0x22, 0xb6, 0xd7, + 0x5a, 0x8d, 0x29, 0x39, 0xe5, 0xe5, 0xf6, 0x5a, 0x0b, 0x33, 0x08, 0xb4, 0x09, 0x15, 0x6f, 0x60, + 0x13, 0x56, 0x07, 0xca, 0xc5, 0xeb, 0x0a, 0x9b, 0xc1, 0x70, 0xf3, 0xb1, 0x5f, 0x3e, 0x16, 0x38, + 0xfa, 0x8f, 0x34, 0x98, 0x8e, 0x55, 0x0b, 0xe4, 0xc1, 0x79, 0x3b, 0xb2, 0x77, 0xe4, 0x3c, 0x3c, + 0x37, 0xfc, 0xae, 0x93, 0x9b, 0x7e, 0x4e, 0xf6, 0x7b, 0x3e, 0x2a, 0xc3, 0xb1, 0x3e, 0x74, 0x03, + 0x20, 0x1c, 0x36, 0xdb, 0x07, 0x2c, 0x78, 0xc5, 0x86, 0x97, 0xfb, 0x80, 0xc5, 0xb4, 0x8f, 0x45, + 0x3b, 0xba, 0x09, 0xe0, 0x13, 0xd3, 0x23, 0x74, 0x23, 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x94, 0x04, + 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xbd, 0x41, 0xe8, 0xf7, 0x5d, 0x6f, 0xaf, 0xe9, 0xda, 0x96, + 0x79, 0x70, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x49, 0xf9, 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, + 0x3f, 0xd2, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, + 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, 0x19, 0xbd, 0xc3, 0x61, 0x12, 0xe2, + 0x85, 0x35, 0xa7, 0xed, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x21, + 0x8d, 0x00, 0xc3, 0xd8, 0x8e, 0xe7, 0xf6, 0x46, 0x1e, 0x83, 0x5a, 0x88, 0x57, 0x3c, 0xb7, 0x87, + 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, 0x1b, 0xde, 0x88, 0xf3, 0x86, 0x6b, + 0xc3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, 0x0d, 0x18, 0xed, 0xc0, 0x54, 0xdf, + 0xed, 0xb4, 0x4e, 0xe1, 0xad, 0x77, 0x96, 0xf1, 0xb9, 0x66, 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x3e, + 0x5c, 0x64, 0xd4, 0xc2, 0xef, 0x1b, 0x26, 0x69, 0x9d, 0xc2, 0xed, 0xd7, 0x23, 0xfc, 0x31, 0x29, + 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0xfa, 0xfc, 0x7c, 0x21, 0x89, 0xe4, 0x89, 0x24, + 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, 0xff, 0x25, 0x19, 0x0d, 0x9c, 0xae, + 0xbe, 0x1a, 0xa1, 0x07, 0xf2, 0xd9, 0x67, 0x34, 0x6a, 0xb0, 0x21, 0x99, 0xc8, 0xa8, 0xcc, 0xba, + 0x9a, 0xe0, 0x2d, 0x5f, 0x81, 0x09, 0xe2, 0x74, 0x38, 0x59, 0x17, 0x77, 0x2a, 0x7c, 0x54, 0x77, + 0x45, 0x13, 0x0e, 0x64, 0xfa, 0x8f, 0xcb, 0x89, 0x51, 0xf1, 0x32, 0xfb, 0xee, 0xa9, 0x05, 0x87, + 0x22, 0xfc, 0xb9, 0x01, 0xb2, 0x1d, 0xd2, 0x3f, 0x11, 0xf3, 0xdf, 0x18, 0x26, 0xe6, 0xa3, 0xf5, + 0x2f, 0x97, 0xfc, 0xa1, 0xef, 0xc0, 0x38, 0x11, 0x5d, 0x88, 0xaa, 0x7a, 0x6b, 0x98, 0x2e, 0xc2, + 0xf4, 0x1b, 0x9e, 0xb3, 0x64, 0x9b, 0x44, 0x45, 0x2f, 0xb3, 0xf9, 0x62, 0xba, 0xec, 0x58, 0x22, + 0xd8, 0xf3, 0x64, 0xe3, 0x31, 0x31, 0x6c, 0xd5, 0xfc, 0xe0, 0x70, 0x19, 0xc2, 0x9f, 0x38, 0x6a, + 0xc1, 0x1f, 0xe2, 0xe4, 0x9d, 0xcd, 0xd9, 0x7c, 0xcc, 0x34, 0xdc, 0x43, 0x5c, 0xe8, 0xda, 0xa9, + 0x3d, 0xc4, 0x45, 0x20, 0x8f, 0x3f, 0xc3, 0xfe, 0xa3, 0x04, 0x97, 0x42, 0xe5, 0xc2, 0x0f, 0x71, + 0x19, 0x26, 0xff, 0xfb, 0xa0, 0xa9, 0xd8, 0xe3, 0x58, 0x38, 0x75, 0xff, 0x79, 0x8f, 0x63, 0xa1, + 0x6f, 0x39, 0xd5, 0xee, 0x37, 0xa5, 0xe8, 0x00, 0x86, 0x7c, 0xa1, 0x39, 0x85, 0x6f, 0x7a, 0xbe, + 0x74, 0x8f, 0x3c, 0xfa, 0x07, 0x63, 0x70, 0x21, 0xb9, 0x1b, 0x63, 0x17, 0xf9, 0xda, 0x89, 0x17, + 0xf9, 0x4d, 0x98, 0xdb, 0x19, 0xd8, 0xf6, 0x01, 0x1f, 0x43, 0xe4, 0x36, 0x5f, 0x3c, 0x01, 0xfc, + 0x9f, 0xb4, 0x9c, 0x7b, 0x25, 0x43, 0x07, 0x67, 0x5a, 0xa6, 0xef, 0xf5, 0xc7, 0xfe, 0xdd, 0x7b, + 0xfd, 0xca, 0x08, 0xf7, 0xfa, 0x39, 0x17, 0xf1, 0x13, 0x23, 0x5c, 0xc4, 0x67, 0xbf, 0xb2, 0x94, + 0x47, 0x7a, 0x65, 0x19, 0xe5, 0x52, 0x3f, 0x23, 0x1f, 0x9e, 0xf8, 0xad, 0xcb, 0x4b, 0x30, 0x13, + 0x7f, 0xb3, 0x12, 0x61, 0x21, 0x9e, 0xcd, 0xe4, 0x0b, 0x51, 0x24, 0x2c, 0x44, 0x3b, 0x56, 0x1a, + 0xfa, 0x91, 0x06, 0x97, 0xb3, 0xbf, 0x4d, 0x41, 0x36, 0xcc, 0xf4, 0x8c, 0xfb, 0xd1, 0xef, 0x85, + 0xb4, 0x11, 0x89, 0x0f, 0x7f, 0x61, 0x58, 0x8f, 0x61, 0xe1, 0x04, 0x36, 0x7a, 0x1b, 0xaa, 0x3d, + 0xe3, 0x7e, 0x6b, 0xe0, 0x75, 0xc9, 0xc8, 0x04, 0x8b, 0xef, 0xc8, 0x75, 0x89, 0x82, 0x15, 0x9e, + 0xfe, 0x85, 0x06, 0xf3, 0x39, 0xef, 0x06, 0xff, 0x45, 0xa3, 0x7c, 0xaf, 0x04, 0x95, 0x96, 0x69, + 0xd8, 0xe4, 0x0c, 0xb8, 0xc9, 0x6b, 0x31, 0x6e, 0x72, 0xd2, 0x37, 0xae, 0xdc, 0xab, 0x5c, 0x5a, + 0x82, 0x13, 0xb4, 0xe4, 0xa9, 0x42, 0x68, 0xc7, 0x33, 0x92, 0xe7, 0x61, 0x52, 0x75, 0x3a, 0x5c, + 0xa2, 0xd4, 0x7f, 0x59, 0x82, 0xa9, 0x48, 0x17, 0x43, 0xa6, 0xd9, 0x9d, 0x58, 0x6d, 0x29, 0x17, + 0xb8, 0xb4, 0x89, 0xf4, 0x55, 0x0b, 0xaa, 0x89, 0xf8, 0x46, 0x23, 0x7c, 0x95, 0x4f, 0x17, 0x99, + 0x97, 0x60, 0x86, 0x1a, 0x5e, 0x97, 0x50, 0x75, 0x02, 0x10, 0xf7, 0x95, 0xea, 0x63, 0xa1, 0x76, + 0x4c, 0x8a, 0x13, 0xda, 0x8b, 0x2f, 0xc2, 0x74, 0xac, 0xb3, 0x61, 0x3e, 0xb1, 0x68, 0xac, 0x7c, + 0xf2, 0xf9, 0xd2, 0xb9, 0x4f, 0x3f, 0x5f, 0x3a, 0xf7, 0xd9, 0xe7, 0x4b, 0xe7, 0x7e, 0x70, 0xb4, + 0xa4, 0x7d, 0x72, 0xb4, 0xa4, 0x7d, 0x7a, 0xb4, 0xa4, 0x7d, 0x76, 0xb4, 0xa4, 0xfd, 0xed, 0x68, + 0x49, 0xfb, 0xe9, 0x17, 0x4b, 0xe7, 0xde, 0x7e, 0xec, 0xd8, 0xff, 0x71, 0xf1, 0xaf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x6a, 0x79, 0xb9, 0xab, 0x91, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2208,6 +2210,11 @@ func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x48 + } if m.CollisionCount != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) i-- @@ -3486,6 +3493,11 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TerminatingReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminatingReplicas)) + i-- + dAtA[i] = 0x38 + } if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -4024,6 +4036,9 @@ func (m *DeploymentStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -4502,6 +4517,9 @@ func (m *ReplicaSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.TerminatingReplicas != nil { + n += 1 + sovGenerated(uint64(*m.TerminatingReplicas)) + } return n } @@ -4793,6 +4811,7 @@ func (this *DeploymentStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -5182,6 +5201,7 @@ func (this *ReplicaSetStatus) String() string { `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `Conditions:` + repeatedStringForConditions + `,`, + `TerminatingReplicas:` + valueToStringGenerated(this.TerminatingReplicas) + `,`, `}`, }, "") return s @@ -7567,6 +7587,26 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11162,6 +11202,26 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatingReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminatingReplicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 9bbcaa0e2..70fcec0cc 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -320,19 +320,19 @@ message DeploymentStatus { // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; - // Total number of ready pods targeted by this deployment. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional optional int32 readyReplicas = 7; - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional optional int32 availableReplicas = 4; @@ -342,6 +342,13 @@ message DeploymentStatus { // +optional optional int32 unavailableReplicas = 5; + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 9; + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -863,16 +870,16 @@ message ReplicaSetList { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset repeated ReplicaSet items = 2; } // ReplicaSetSpec is the specification of a ReplicaSet. message ReplicaSetSpec { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional optional int32 replicas = 1; @@ -891,29 +898,36 @@ message ReplicaSetSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset optional int32 replicas = 1; - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional optional int32 fullyLabeledReplicas = 2; - // The number of ready replicas for this replica set. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional optional int32 readyReplicas = 4; - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional optional int32 availableReplicas = 5; + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + optional int32 terminatingReplicas = 7; + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional optional int64 observedGeneration = 3; diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/types.go b/tools/vendor/k8s.io/api/extensions/v1beta1/types.go index 09f58692f..b80a7a7e1 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -245,19 +245,19 @@ type DeploymentStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // Total number of non-terminating pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // Total number of non-terminating pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - // Total number of ready pods targeted by this deployment. + // Total number of non-terminating pods targeted by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` @@ -267,6 +267,13 @@ type DeploymentStatus struct { // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null + // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"` + // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge @@ -941,16 +948,16 @@ type ReplicaSetList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ReplicaSetSpec is the specification of a ReplicaSet. type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. + // Replicas is the number of desired pods. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` @@ -969,29 +976,36 @@ type ReplicaSetSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template // +optional Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently observed number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // Replicas is the most recently observed number of non-terminating pods. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // The number of pods that have labels matching the labels of the pod template of the replicaset. + // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset. // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - // The number of ready replicas for this replica set. + // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp + // and have not yet reached the Failed or Succeeded .status.phase. + // + // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field. + // +optional + TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"` + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 408022c9d..923fab3aa 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -169,11 +169,12 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "replicas": "Total number of non-terminating pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminating pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.", + "availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.", "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "conditions": "Represents the latest available observations of a deployment's current state.", "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } @@ -435,7 +436,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -444,10 +445,10 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -456,10 +457,11 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "replicas": "Replicas is the most recently observed number of non-terminating pods. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset", + "fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.", + "availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.", + "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", "conditions": "Represents the latest available observations of a replica set's current state.", } diff --git a/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 6b474ae48..2c7a8524e 100644 --- a/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -341,6 +341,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]DeploymentCondition, len(*in)) @@ -1045,6 +1050,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { *out = *in + if in.TerminatingReplicas != nil { + in, out := &in.TerminatingReplicas, &out.TerminatingReplicas + *out = new(int32) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]ReplicaSetCondition, len(*in)) diff --git a/tools/vendor/k8s.io/api/flowcontrol/v1/doc.go b/tools/vendor/k8s.io/api/flowcontrol/v1/doc.go index c9e7db158..ad5f45791 100644 --- a/tools/vendor/k8s.io/api/flowcontrol/v1/doc.go +++ b/tools/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -22,4 +22,4 @@ limitations under the License. // +groupName=flowcontrol.apiserver.k8s.io // Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io". -package v1 // import "k8s.io/api/flowcontrol/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/tools/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go index 50897b7eb..20268c1f2 100644 --- a/tools/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go @@ -22,4 +22,4 @@ limitations under the License. // +groupName=flowcontrol.apiserver.k8s.io // Package v1beta1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". -package v1beta1 // import "k8s.io/api/flowcontrol/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go b/tools/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go index 53b460d37..2dcad11ad 100644 --- a/tools/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go +++ b/tools/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go @@ -22,4 +22,4 @@ limitations under the License. // +groupName=flowcontrol.apiserver.k8s.io // Package v1beta2 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". -package v1beta2 // import "k8s.io/api/flowcontrol/v1beta2" +package v1beta2 diff --git a/tools/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/tools/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go index cd60cfef7..95f4430d3 100644 --- a/tools/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go +++ b/tools/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go @@ -22,4 +22,4 @@ limitations under the License. // +groupName=flowcontrol.apiserver.k8s.io // Package v1beta3 holds api types of version v1beta3 for group "flowcontrol.apiserver.k8s.io". -package v1beta3 // import "k8s.io/api/flowcontrol/v1beta3" +package v1beta3 diff --git a/tools/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/tools/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go index 5db6d52d4..f5fbbdbf0 100644 --- a/tools/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +groupName=imagepolicy.k8s.io -package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/networking/v1/doc.go b/tools/vendor/k8s.io/api/networking/v1/doc.go index 1d13e7bab..e2093b7df 100644 --- a/tools/vendor/k8s.io/api/networking/v1/doc.go +++ b/tools/vendor/k8s.io/api/networking/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io -package v1 // import "k8s.io/api/networking/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/networking/v1/generated.pb.go b/tools/vendor/k8s.io/api/networking/v1/generated.pb.go index 7c023e690..062382b63 100644 --- a/tools/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/tools/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -104,10 +104,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{2} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{3} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{4} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{2} + return fileDescriptor_2c41434372fec1d7, []int{5} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +219,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{3} + return fileDescriptor_2c41434372fec1d7, []int{6} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +247,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{4} + return fileDescriptor_2c41434372fec1d7, []int{7} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +275,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{5} + return fileDescriptor_2c41434372fec1d7, []int{8} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +303,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{6} + return fileDescriptor_2c41434372fec1d7, []int{9} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +331,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{7} + return fileDescriptor_2c41434372fec1d7, []int{10} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +359,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{8} + return fileDescriptor_2c41434372fec1d7, []int{11} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +387,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{9} + return fileDescriptor_2c41434372fec1d7, []int{12} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +415,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{10} + return fileDescriptor_2c41434372fec1d7, []int{13} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +443,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{11} + return fileDescriptor_2c41434372fec1d7, []int{14} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +471,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{12} + return fileDescriptor_2c41434372fec1d7, []int{15} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +499,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{13} + return fileDescriptor_2c41434372fec1d7, []int{16} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +527,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{14} + return fileDescriptor_2c41434372fec1d7, []int{17} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +555,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} } func (*IngressServiceBackend) ProtoMessage() {} func (*IngressServiceBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{15} + return fileDescriptor_2c41434372fec1d7, []int{18} } func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +583,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{16} + return fileDescriptor_2c41434372fec1d7, []int{19} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +611,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{17} + return fileDescriptor_2c41434372fec1d7, []int{20} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +639,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{18} + return fileDescriptor_2c41434372fec1d7, []int{21} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +667,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{19} + return fileDescriptor_2c41434372fec1d7, []int{22} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +695,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{20} + return fileDescriptor_2c41434372fec1d7, []int{23} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{21} + return fileDescriptor_2c41434372fec1d7, []int{24} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{22} + return fileDescriptor_2c41434372fec1d7, []int{25} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +779,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{23} + return fileDescriptor_2c41434372fec1d7, []int{26} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +807,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{24} + return fileDescriptor_2c41434372fec1d7, []int{27} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +835,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{25} + return fileDescriptor_2c41434372fec1d7, []int{28} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -776,10 +860,38 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{29} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} } func (*ServiceBackendPort) ProtoMessage() {} func (*ServiceBackendPort) Descriptor() ([]byte, []int) { - return fileDescriptor_2c41434372fec1d7, []int{26} + return fileDescriptor_2c41434372fec1d7, []int{30} } func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -804,9 +916,124 @@ func (m *ServiceBackendPort) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceBackendPort proto.InternalMessageInfo +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{31} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{32} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{33} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{34} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1.HTTPIngressRuleValue") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1.IPAddressSpec") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1.IngressBackend") @@ -831,7 +1058,12 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1.ParentReference") proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1.ServiceCIDRStatus") } func init() { @@ -839,111 +1071,125 @@ func init() { } var fileDescriptor_2c41434372fec1d7 = []byte{ - // 1652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x55, - 0x14, 0xce, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x69, 0x85, 0x09, 0xc2, 0x0e, 0x23, 0xda, - 0x06, 0xda, 0xda, 0x34, 0xad, 0x10, 0x6c, 0x78, 0x4c, 0x9a, 0xa6, 0xa1, 0xa9, 0x63, 0x5d, 0x5b, - 0x45, 0x20, 0x1e, 0x9d, 0x8c, 0x6f, 0x9c, 0x69, 0xc6, 0x33, 0xa3, 0x3b, 0xd7, 0xa5, 0x95, 0x10, - 0x62, 0xc3, 0x82, 0x1d, 0x7f, 0x01, 0xf1, 0x0b, 0x10, 0x2c, 0x90, 0x10, 0x14, 0x36, 0xa8, 0xcb, - 0x4a, 0x6c, 0xba, 0xc1, 0xa2, 0xe6, 0x5f, 0x64, 0x85, 0xee, 0x63, 0x1e, 0x7e, 0xd5, 0xa6, 0xaa, - 0xb2, 0x4a, 0xee, 0x39, 0xe7, 0x7e, 0xe7, 0x71, 0xcf, 0x6b, 0x0c, 0x6b, 0x87, 0x6f, 0x06, 0x25, - 0xdb, 0x2b, 0x9b, 0xbe, 0x5d, 0x76, 0x09, 0xfb, 0xdc, 0xa3, 0x87, 0xb6, 0xdb, 0x2c, 0xdf, 0xb9, - 0x58, 0x6e, 0x12, 0x97, 0x50, 0x93, 0x91, 0x46, 0xc9, 0xa7, 0x1e, 0xf3, 0x50, 0x5e, 0x4a, 0x96, - 0x4c, 0xdf, 0x2e, 0xc5, 0x92, 0xa5, 0x3b, 0x17, 0x57, 0x2e, 0x34, 0x6d, 0x76, 0xd0, 0xde, 0x2b, - 0x59, 0x5e, 0xab, 0xdc, 0xf4, 0x9a, 0x5e, 0x59, 0x5c, 0xd8, 0x6b, 0xef, 0x8b, 0x93, 0x38, 0x88, - 0xff, 0x24, 0xd0, 0x8a, 0x9e, 0x50, 0x69, 0x79, 0x94, 0x0c, 0x51, 0xb6, 0x72, 0x39, 0x96, 0x69, - 0x99, 0xd6, 0x81, 0xed, 0x12, 0x7a, 0xaf, 0xec, 0x1f, 0x36, 0x39, 0x21, 0x28, 0xb7, 0x08, 0x33, - 0x87, 0xdd, 0x2a, 0x8f, 0xba, 0x45, 0xdb, 0x2e, 0xb3, 0x5b, 0x64, 0xe0, 0xc2, 0x1b, 0xe3, 0x2e, - 0x04, 0xd6, 0x01, 0x69, 0x99, 0x03, 0xf7, 0x2e, 0x8d, 0xba, 0xd7, 0x66, 0xb6, 0x53, 0xb6, 0x5d, - 0x16, 0x30, 0xda, 0x7f, 0x49, 0xff, 0x4d, 0x83, 0x13, 0xd7, 0xea, 0xf5, 0xea, 0xb6, 0xdb, 0xa4, - 0x24, 0x08, 0xaa, 0x26, 0x3b, 0x40, 0xab, 0x30, 0xe3, 0x9b, 0xec, 0x20, 0xaf, 0xad, 0x6a, 0x6b, - 0x73, 0xc6, 0xfc, 0x83, 0x4e, 0x71, 0xaa, 0xdb, 0x29, 0xce, 0x70, 0x1e, 0x16, 0x1c, 0x74, 0x19, - 0xb2, 0xfc, 0x6f, 0xfd, 0x9e, 0x4f, 0xf2, 0xd3, 0x42, 0x2a, 0xdf, 0xed, 0x14, 0xb3, 0x55, 0x45, - 0x3b, 0x4a, 0xfc, 0x8f, 0x23, 0x49, 0x54, 0x83, 0xcc, 0x9e, 0x69, 0x1d, 0x12, 0xb7, 0x91, 0x4f, - 0xad, 0x6a, 0x6b, 0xb9, 0xf5, 0xb5, 0xd2, 0xa8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, 0x9c, - 0x50, 0x46, 0x64, 0x14, 0x01, 0x87, 0x48, 0xfa, 0x3e, 0x9c, 0x4c, 0xd8, 0x8f, 0xdb, 0x0e, 0xb9, - 0x69, 0x3a, 0x6d, 0x82, 0x2a, 0x90, 0xe6, 0x8a, 0x83, 0xbc, 0xb6, 0x3a, 0xbd, 0x96, 0x5b, 0x7f, - 0x75, 0xb4, 0xaa, 0x3e, 0xf7, 0x8d, 0x05, 0xa5, 0x2b, 0xcd, 0x4f, 0x01, 0x96, 0x30, 0xfa, 0x2e, - 0x64, 0xb6, 0xab, 0x86, 0xe3, 0x59, 0x87, 0x3c, 0x3e, 0x96, 0xdd, 0xa0, 0xfd, 0xf1, 0xd9, 0xd8, - 0xbe, 0x82, 0xb1, 0xe0, 0x20, 0x1d, 0x66, 0xc9, 0x5d, 0x8b, 0xf8, 0x2c, 0x9f, 0x5a, 0x9d, 0x5e, - 0x9b, 0x33, 0xa0, 0xdb, 0x29, 0xce, 0x6e, 0x0a, 0x0a, 0x56, 0x1c, 0xfd, 0xeb, 0x14, 0x64, 0x94, - 0x5a, 0x74, 0x0b, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, 0x6e, 0xfd, 0xf5, 0x84, 0xbd, - 0xd1, 0x6b, 0x96, 0xfc, 0xc3, 0x26, 0x27, 0x04, 0x25, 0x2e, 0xcd, 0x6d, 0xdf, 0xdd, 0xbb, 0x4d, - 0x2c, 0x76, 0x83, 0x30, 0xd3, 0x40, 0xca, 0x0e, 0x88, 0x69, 0x38, 0x42, 0x45, 0x5b, 0x30, 0x13, - 0xf8, 0xc4, 0x52, 0x81, 0x3f, 0x3d, 0x36, 0xf0, 0x35, 0x9f, 0x58, 0xb1, 0x6b, 0xfc, 0x84, 0x05, - 0x00, 0xda, 0x85, 0xd9, 0x80, 0x99, 0xac, 0x1d, 0x88, 0x87, 0xcf, 0xad, 0x9f, 0x1d, 0x0f, 0x25, - 0xc4, 0x8d, 0x45, 0x05, 0x36, 0x2b, 0xcf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0xd8, 0xfb, 0xda, - 0xe8, 0x26, 0x64, 0x02, 0x42, 0xef, 0xd8, 0x16, 0xc9, 0xcf, 0x08, 0x25, 0xe5, 0xf1, 0x4a, 0xa4, - 0x7c, 0x98, 0x2f, 0x39, 0x9e, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x01, 0x64, 0x29, 0x09, 0xbc, - 0x36, 0xb5, 0x88, 0xb2, 0xfe, 0x42, 0x12, 0x98, 0xd7, 0x3d, 0x87, 0xe4, 0xc9, 0xda, 0xd8, 0xf1, - 0x2c, 0xd3, 0x91, 0xa1, 0xc4, 0x64, 0x9f, 0x50, 0xe2, 0x5a, 0xc4, 0x98, 0xe7, 0x59, 0x8e, 0x15, - 0x04, 0x8e, 0xc0, 0x78, 0x15, 0xcd, 0x2b, 0x43, 0x36, 0x1c, 0xf3, 0x58, 0x1e, 0x74, 0xa7, 0xe7, - 0x41, 0x5f, 0x1b, 0x1b, 0x20, 0x61, 0xd7, 0xa8, 0x57, 0xd5, 0x7f, 0xd5, 0x60, 0x29, 0x29, 0xb8, - 0x63, 0x07, 0x0c, 0x7d, 0x3c, 0xe0, 0x44, 0x69, 0x32, 0x27, 0xf8, 0x6d, 0xe1, 0xc2, 0x92, 0x52, - 0x95, 0x0d, 0x29, 0x09, 0x07, 0xae, 0x43, 0xda, 0x66, 0xa4, 0x15, 0x88, 0x12, 0xc9, 0xad, 0x9f, - 0x99, 0xcc, 0x83, 0xb8, 0x3a, 0xb7, 0xf9, 0x65, 0x2c, 0x31, 0xf4, 0xbf, 0x35, 0x28, 0x26, 0xc5, - 0xaa, 0x26, 0x35, 0x5b, 0x84, 0x11, 0x1a, 0x44, 0x8f, 0x87, 0xd6, 0x20, 0x6b, 0x56, 0xb7, 0xb7, - 0xa8, 0xd7, 0xf6, 0xc3, 0xd2, 0xe5, 0xa6, 0xbd, 0xa7, 0x68, 0x38, 0xe2, 0xf2, 0x02, 0x3f, 0xb4, - 0x55, 0x97, 0x4a, 0x14, 0xf8, 0x75, 0xdb, 0x6d, 0x60, 0xc1, 0xe1, 0x12, 0xae, 0xd9, 0x0a, 0x9b, - 0x5f, 0x24, 0x51, 0x31, 0x5b, 0x04, 0x0b, 0x0e, 0x2a, 0x42, 0x3a, 0xb0, 0x3c, 0x5f, 0x66, 0xf0, - 0x9c, 0x31, 0xc7, 0x4d, 0xae, 0x71, 0x02, 0x96, 0x74, 0x74, 0x0e, 0xe6, 0xb8, 0x60, 0xe0, 0x9b, - 0x16, 0xc9, 0xa7, 0x85, 0xd0, 0x42, 0xb7, 0x53, 0x9c, 0xab, 0x84, 0x44, 0x1c, 0xf3, 0xf5, 0x1f, - 0xfa, 0xde, 0x87, 0x3f, 0x1d, 0x5a, 0x07, 0xb0, 0x3c, 0x97, 0x51, 0xcf, 0x71, 0x48, 0xd8, 0x8d, - 0xa2, 0xa4, 0xd9, 0x88, 0x38, 0x38, 0x21, 0x85, 0x6c, 0x00, 0x3f, 0x8a, 0x8d, 0x4a, 0x9e, 0xb7, - 0x26, 0x0b, 0xfd, 0x90, 0x98, 0x1a, 0x8b, 0x5c, 0x55, 0x82, 0x91, 0x00, 0xd7, 0x7f, 0xd4, 0x20, - 0xa7, 0xee, 0x1f, 0x43, 0x3a, 0x5d, 0xed, 0x4d, 0xa7, 0x97, 0xc7, 0x8f, 0x96, 0xe1, 0x99, 0xf4, - 0xb3, 0x06, 0x2b, 0xa1, 0xd5, 0x9e, 0xd9, 0x30, 0x4c, 0xc7, 0x74, 0x2d, 0x42, 0xc3, 0x4e, 0xbd, - 0x02, 0x29, 0x3b, 0x4c, 0x1f, 0x50, 0x00, 0xa9, 0xed, 0x2a, 0x4e, 0xd9, 0x3e, 0x3a, 0x0f, 0xd9, - 0x03, 0x2f, 0x60, 0x22, 0x31, 0x64, 0xea, 0x44, 0x06, 0x5f, 0x53, 0x74, 0x1c, 0x49, 0xa0, 0x2a, - 0xa4, 0x7d, 0x8f, 0xb2, 0x20, 0x3f, 0x23, 0x0c, 0x3e, 0x37, 0xd6, 0xe0, 0xaa, 0x47, 0x99, 0xea, - 0xa5, 0xf1, 0x88, 0xe2, 0x08, 0x58, 0x02, 0xe9, 0x5f, 0xc0, 0x0b, 0x43, 0x2c, 0x97, 0x57, 0xd0, - 0x67, 0x90, 0xb1, 0x25, 0x53, 0x4d, 0xc4, 0xcb, 0x63, 0x15, 0x0e, 0xf1, 0x3f, 0x1e, 0xc4, 0xe1, - 0xc0, 0x0d, 0x51, 0xf5, 0xef, 0x35, 0x58, 0x1e, 0xb0, 0x54, 0xec, 0x12, 0x1e, 0x65, 0x22, 0x62, - 0xe9, 0xc4, 0x2e, 0xe1, 0x51, 0x86, 0x05, 0x07, 0x5d, 0x87, 0xac, 0x58, 0x45, 0x2c, 0xcf, 0x51, - 0x51, 0x2b, 0x87, 0x51, 0xab, 0x2a, 0xfa, 0x51, 0xa7, 0xf8, 0xe2, 0xe0, 0x7e, 0x56, 0x0a, 0xd9, - 0x38, 0x02, 0xe0, 0x55, 0x47, 0x28, 0xf5, 0xa8, 0x2a, 0x4c, 0x51, 0x75, 0x9b, 0x9c, 0x80, 0x25, - 0x5d, 0xff, 0x2e, 0x4e, 0x4a, 0xbe, 0x2b, 0x70, 0xfb, 0xf8, 0x8b, 0xf4, 0xcf, 0x72, 0xfe, 0x5e, - 0x58, 0x70, 0x90, 0x0f, 0x4b, 0x76, 0xdf, 0x72, 0x31, 0x71, 0xd3, 0x8d, 0x6e, 0x18, 0x79, 0x85, - 0xbc, 0xd4, 0xcf, 0xc1, 0x03, 0xe8, 0xfa, 0x2d, 0x18, 0x90, 0xe2, 0xed, 0xfe, 0x80, 0x31, 0x7f, - 0x48, 0xe1, 0x8c, 0xde, 0x66, 0x62, 0xed, 0x59, 0xe1, 0x53, 0xbd, 0x5e, 0xc5, 0x02, 0x45, 0xff, - 0x46, 0x83, 0x53, 0x43, 0x07, 0x67, 0xd4, 0xd8, 0xb4, 0x91, 0x8d, 0xad, 0xa2, 0x5e, 0x54, 0xc6, - 0xe0, 0xfc, 0x68, 0x4b, 0x7a, 0x91, 0xf9, 0x8b, 0x0f, 0x7b, 0x7f, 0xfd, 0xcf, 0x54, 0xf4, 0x22, - 0xa2, 0xab, 0xbd, 0x1b, 0xc5, 0x5b, 0x74, 0x1d, 0xae, 0x59, 0xf5, 0xd0, 0x93, 0x89, 0xf8, 0x45, - 0x3c, 0x3c, 0x20, 0x8d, 0x1a, 0xb0, 0xd8, 0x20, 0xfb, 0x66, 0xdb, 0x61, 0x4a, 0xb7, 0x8a, 0xda, - 0xe4, 0xeb, 0x26, 0xea, 0x76, 0x8a, 0x8b, 0x57, 0x7a, 0x30, 0x70, 0x1f, 0x26, 0xda, 0x80, 0x69, - 0xe6, 0x84, 0xed, 0xe6, 0x95, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, 0x91, 0x53, 0xee, 0x4f, 0xd7, 0x77, - 0x6a, 0x98, 0xdf, 0x46, 0xef, 0x43, 0x9a, 0xb6, 0x1d, 0xc2, 0x97, 0xa9, 0xe9, 0x89, 0xf6, 0x32, - 0xfe, 0xa6, 0x71, 0xf9, 0xf3, 0x53, 0x80, 0x25, 0x84, 0xfe, 0x25, 0x2c, 0xf4, 0x6c, 0x5c, 0xa8, - 0x05, 0xf3, 0x4e, 0xa2, 0x84, 0x55, 0x14, 0x2e, 0xfd, 0xaf, 0xba, 0x57, 0x0d, 0xe7, 0xa4, 0xd2, - 0x38, 0x9f, 0xe4, 0xe1, 0x1e, 0x78, 0xdd, 0x04, 0x88, 0x7d, 0xe5, 0x95, 0xc8, 0xcb, 0x47, 0x76, - 0x1b, 0x55, 0x89, 0xbc, 0xaa, 0x02, 0x2c, 0xe9, 0x7c, 0x7a, 0x05, 0xc4, 0xa2, 0x84, 0x55, 0xe2, - 0x7e, 0x19, 0x4d, 0xaf, 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0xfd, 0x77, 0x0d, 0x16, 0x2a, 0xd2, 0xe4, - 0xaa, 0xe7, 0xd8, 0xd6, 0xbd, 0x63, 0x58, 0xb4, 0x6e, 0xf4, 0x2c, 0x5a, 0x4f, 0x68, 0xd3, 0x3d, - 0x86, 0x8d, 0xdc, 0xb4, 0x7e, 0xd2, 0xe0, 0xf9, 0x1e, 0xc9, 0xcd, 0xb8, 0x19, 0x45, 0x23, 0x41, - 0x1b, 0x37, 0x12, 0x7a, 0x10, 0x44, 0x69, 0x0d, 0x1d, 0x09, 0x68, 0x0b, 0x52, 0xcc, 0x53, 0x39, - 0x3a, 0x31, 0x1c, 0x21, 0x34, 0x9e, 0x6d, 0x75, 0x0f, 0xa7, 0x98, 0xa7, 0xff, 0xa2, 0x41, 0xbe, - 0x47, 0x2a, 0xd9, 0x44, 0x9f, 0xbd, 0xdd, 0x37, 0x60, 0x66, 0x9f, 0x7a, 0xad, 0xa7, 0xb1, 0x3c, - 0x0a, 0xfa, 0x55, 0xea, 0xb5, 0xb0, 0x80, 0xd1, 0xef, 0x6b, 0xb0, 0xdc, 0x23, 0x79, 0x0c, 0x0b, - 0xc9, 0x4e, 0xef, 0x42, 0x72, 0x76, 0x42, 0x1f, 0x46, 0xac, 0x25, 0xf7, 0x53, 0x7d, 0x1e, 0x70, - 0x5f, 0xd1, 0x3e, 0xe4, 0x7c, 0xaf, 0x51, 0x23, 0x0e, 0xb1, 0x98, 0x37, 0xac, 0xc0, 0x9f, 0xe4, - 0x84, 0xb9, 0x47, 0x9c, 0xf0, 0xaa, 0x71, 0xa2, 0xdb, 0x29, 0xe6, 0xaa, 0x31, 0x16, 0x4e, 0x02, - 0xa3, 0xbb, 0xb0, 0x1c, 0xed, 0xa2, 0x91, 0xb6, 0xd4, 0xd3, 0x6b, 0x3b, 0xd5, 0xed, 0x14, 0x97, - 0x2b, 0xfd, 0x88, 0x78, 0x50, 0x09, 0xba, 0x06, 0x19, 0xdb, 0x17, 0x9f, 0xdd, 0xea, 0x8b, 0xed, - 0x49, 0x8b, 0x9d, 0xfc, 0x3e, 0x97, 0x1f, 0x7f, 0xea, 0x80, 0xc3, 0xeb, 0xfa, 0x5f, 0xfd, 0x39, - 0xc0, 0x13, 0x0e, 0x6d, 0x25, 0xb6, 0x0f, 0x39, 0xf3, 0xce, 0x3d, 0xdd, 0xe6, 0xd1, 0x3b, 0x16, - 0x47, 0x37, 0xa1, 0x36, 0xb3, 0x9d, 0x92, 0xfc, 0x31, 0xa6, 0xb4, 0xed, 0xb2, 0x5d, 0x5a, 0x63, - 0xd4, 0x76, 0x9b, 0x72, 0x44, 0x27, 0xd6, 0xa2, 0xd3, 0x90, 0x51, 0x53, 0x53, 0x38, 0x9e, 0x96, - 0x5e, 0x6d, 0x4a, 0x12, 0x0e, 0x79, 0xfa, 0x51, 0x7f, 0x5e, 0x88, 0x19, 0x7a, 0xfb, 0x99, 0xe5, - 0xc5, 0x73, 0x2a, 0x1b, 0x47, 0xe7, 0xc6, 0x27, 0xf1, 0x62, 0x29, 0x33, 0x7d, 0x7d, 0xc2, 0x4c, - 0x4f, 0x4e, 0xb4, 0x91, 0x6b, 0x25, 0xfa, 0x10, 0x66, 0x89, 0x44, 0x97, 0x23, 0xf2, 0xe2, 0x84, - 0xe8, 0x71, 0x5b, 0x8d, 0x7f, 0x79, 0x50, 0x34, 0x05, 0x88, 0xde, 0xe1, 0x51, 0xe2, 0xb2, 0xfc, - 0x83, 0x5f, 0xee, 0xe1, 0x73, 0xc6, 0x4b, 0xd2, 0xd9, 0x88, 0x7c, 0xc4, 0x3f, 0x70, 0xa2, 0x23, - 0x4e, 0xde, 0xd0, 0x3f, 0x05, 0x34, 0xb8, 0xe4, 0x4c, 0xb0, 0x42, 0x9d, 0x81, 0x59, 0xb7, 0xdd, - 0xda, 0x23, 0xb2, 0x86, 0xd2, 0xb1, 0x81, 0x15, 0x41, 0xc5, 0x8a, 0x6b, 0xbc, 0xfd, 0xe0, 0x71, - 0x61, 0xea, 0xe1, 0xe3, 0xc2, 0xd4, 0xa3, 0xc7, 0x85, 0xa9, 0xaf, 0xba, 0x05, 0xed, 0x41, 0xb7, - 0xa0, 0x3d, 0xec, 0x16, 0xb4, 0x47, 0xdd, 0x82, 0xf6, 0x4f, 0xb7, 0xa0, 0x7d, 0xfb, 0x6f, 0x61, - 0xea, 0xa3, 0xfc, 0xa8, 0x5f, 0x4b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x03, 0xec, 0x04, - 0x48, 0x15, 0x00, 0x00, + // 1884 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x8f, 0x1b, 0x49, + 0x15, 0x9f, 0xf6, 0x8c, 0x67, 0xec, 0xe7, 0xf9, 0xc8, 0x14, 0x59, 0x61, 0x06, 0x61, 0x87, 0x5e, + 0xb2, 0x3b, 0x4b, 0x76, 0x6d, 0x32, 0x1b, 0x21, 0xb8, 0x00, 0xdb, 0x93, 0x6c, 0xe2, 0xcd, 0xc4, + 0xb1, 0xca, 0x56, 0x10, 0x88, 0x8f, 0xed, 0x69, 0xd7, 0x78, 0x7a, 0xa7, 0xdd, 0xd5, 0xaa, 0x2e, + 0x87, 0x44, 0x42, 0x88, 0x0b, 0x07, 0x6e, 0xf0, 0x27, 0x20, 0xfe, 0x02, 0x04, 0xd2, 0xae, 0xb4, + 0x82, 0x85, 0x0b, 0xca, 0x71, 0x25, 0x2e, 0x7b, 0xc1, 0x22, 0xe6, 0xbf, 0xc8, 0x09, 0xd5, 0x47, + 0x7f, 0xd9, 0xee, 0xb1, 0x89, 0x22, 0x9f, 0xc6, 0xfd, 0xde, 0xab, 0xdf, 0x7b, 0xf5, 0xea, 0x7d, + 0x55, 0x0d, 0x1c, 0x5e, 0x7c, 0x27, 0x6c, 0xb8, 0xb4, 0x69, 0x07, 0x6e, 0xd3, 0x27, 0xfc, 0x17, + 0x94, 0x5d, 0xb8, 0xfe, 0xa0, 0xf9, 0xf8, 0x66, 0x73, 0x40, 0x7c, 0xc2, 0x6c, 0x4e, 0xfa, 0x8d, + 0x80, 0x51, 0x4e, 0x51, 0x55, 0x49, 0x36, 0xec, 0xc0, 0x6d, 0x24, 0x92, 0x8d, 0xc7, 0x37, 0x0f, + 0xde, 0x19, 0xb8, 0xfc, 0x7c, 0x74, 0xda, 0x70, 0xe8, 0xb0, 0x39, 0xa0, 0x03, 0xda, 0x94, 0x0b, + 0x4e, 0x47, 0x67, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x0a, 0xe8, 0xc0, 0x4c, 0xa9, 0x74, 0x28, 0x23, + 0x73, 0x94, 0x1d, 0xdc, 0x4a, 0x64, 0x86, 0xb6, 0x73, 0xee, 0xfa, 0x84, 0x3d, 0x6d, 0x06, 0x17, + 0x03, 0x41, 0x08, 0x9b, 0x43, 0xc2, 0xed, 0x79, 0xab, 0x9a, 0x79, 0xab, 0xd8, 0xc8, 0xe7, 0xee, + 0x90, 0xcc, 0x2c, 0xf8, 0xf6, 0xa2, 0x05, 0xa1, 0x73, 0x4e, 0x86, 0xf6, 0xcc, 0xba, 0x77, 0xf3, + 0xd6, 0x8d, 0xb8, 0xeb, 0x35, 0x5d, 0x9f, 0x87, 0x9c, 0x4d, 0x2f, 0x32, 0xff, 0x66, 0xc0, 0xde, + 0xbd, 0x5e, 0xaf, 0xd3, 0xf2, 0x07, 0x8c, 0x84, 0x61, 0xc7, 0xe6, 0xe7, 0xe8, 0x1a, 0x6c, 0x04, + 0x36, 0x3f, 0xaf, 0x1a, 0xd7, 0x8c, 0xc3, 0xb2, 0xb5, 0xfd, 0x6c, 0x5c, 0x5f, 0x9b, 0x8c, 0xeb, + 0x1b, 0x82, 0x87, 0x25, 0x07, 0xdd, 0x82, 0x92, 0xf8, 0xdb, 0x7b, 0x1a, 0x90, 0xea, 0xba, 0x94, + 0xaa, 0x4e, 0xc6, 0xf5, 0x52, 0x47, 0xd3, 0x5e, 0xa4, 0x7e, 0xe3, 0x58, 0x12, 0x75, 0x61, 0xeb, + 0xd4, 0x76, 0x2e, 0x88, 0xdf, 0xaf, 0x16, 0xae, 0x19, 0x87, 0x95, 0xa3, 0xc3, 0x46, 0xde, 0xf1, + 0x35, 0xb4, 0x3d, 0x96, 0x92, 0xb7, 0xf6, 0xb4, 0x11, 0x5b, 0x9a, 0x80, 0x23, 0x24, 0xf3, 0x0c, + 0xae, 0xa6, 0xec, 0xc7, 0x23, 0x8f, 0x3c, 0xb2, 0xbd, 0x11, 0x41, 0x6d, 0x28, 0x0a, 0xc5, 0x61, + 0xd5, 0xb8, 0xb6, 0x7e, 0x58, 0x39, 0x7a, 0x2b, 0x5f, 0xd5, 0xd4, 0xf6, 0xad, 0x1d, 0xad, 0xab, + 0x28, 0xbe, 0x42, 0xac, 0x60, 0xcc, 0x4f, 0x0c, 0x28, 0xb7, 0x3a, 0xef, 0xf5, 0xfb, 0x42, 0x0e, + 0x7d, 0x08, 0x25, 0x71, 0xde, 0x7d, 0x9b, 0xdb, 0xd2, 0x4d, 0x95, 0xa3, 0x6f, 0xa5, 0x14, 0xc4, + 0xee, 0x6f, 0x04, 0x17, 0x03, 0x41, 0x08, 0x1b, 0x42, 0x5a, 0x28, 0x7b, 0x78, 0xfa, 0x11, 0x71, + 0xf8, 0x03, 0xc2, 0x6d, 0x0b, 0x69, 0x3d, 0x90, 0xd0, 0x70, 0x8c, 0x8a, 0x5a, 0xb0, 0x11, 0x06, + 0xc4, 0xd1, 0x9e, 0x7a, 0xf3, 0x12, 0x4f, 0x45, 0x46, 0x75, 0x03, 0xe2, 0x24, 0xa7, 0x25, 0xbe, + 0xb0, 0x84, 0x30, 0x3f, 0x36, 0x60, 0x27, 0x96, 0x3a, 0x71, 0x43, 0x8e, 0x7e, 0x32, 0x63, 0x7e, + 0x63, 0x39, 0xf3, 0xc5, 0x6a, 0x69, 0xfc, 0x15, 0xad, 0xa7, 0x14, 0x51, 0x52, 0xa6, 0xdf, 0x83, + 0xa2, 0xcb, 0xc9, 0x30, 0xac, 0x16, 0xa4, 0xeb, 0x5f, 0x5f, 0xc2, 0xf6, 0xc4, 0xe9, 0x2d, 0xb1, + 0x12, 0x2b, 0x00, 0x73, 0x90, 0x32, 0x5c, 0x6c, 0x08, 0x3d, 0x82, 0x72, 0x60, 0x33, 0xe2, 0x73, + 0x4c, 0xce, 0xb4, 0xe5, 0x97, 0x9c, 0x6c, 0x27, 0x12, 0x25, 0x8c, 0xf8, 0x0e, 0xb1, 0x76, 0x26, + 0xe3, 0x7a, 0x39, 0x26, 0xe2, 0x04, 0xca, 0x7c, 0x08, 0x5b, 0xad, 0x8e, 0xe5, 0x51, 0xe7, 0x42, + 0x44, 0xbf, 0xe3, 0xf6, 0xd9, 0x74, 0xf4, 0x1f, 0xb7, 0x6e, 0x63, 0x2c, 0x39, 0xc8, 0x84, 0x4d, + 0xf2, 0xc4, 0x21, 0x01, 0x97, 0x1b, 0x2c, 0x5b, 0x30, 0x19, 0xd7, 0x37, 0xef, 0x48, 0x0a, 0xd6, + 0x1c, 0xf3, 0x37, 0x05, 0xd8, 0xd2, 0x41, 0xb5, 0x82, 0x60, 0xb9, 0x9b, 0x09, 0x96, 0xeb, 0x0b, + 0xd3, 0x2a, 0x2f, 0x54, 0xd0, 0x43, 0xd8, 0x0c, 0xb9, 0xcd, 0x47, 0xa1, 0x4c, 0xeb, 0xcb, 0xe3, + 0x4e, 0x43, 0x49, 0x71, 0x6b, 0x57, 0x83, 0x6d, 0xaa, 0x6f, 0xac, 0x61, 0xcc, 0x7f, 0x18, 0xb0, + 0x9b, 0xcd, 0x65, 0xf4, 0x08, 0xb6, 0x42, 0xc2, 0x1e, 0xbb, 0x0e, 0xa9, 0x6e, 0x48, 0x25, 0xcd, + 0xc5, 0x4a, 0x94, 0x7c, 0x54, 0x0d, 0x2a, 0xa2, 0x12, 0x68, 0x1a, 0x8e, 0xc0, 0xd0, 0x0f, 0xa1, + 0xc4, 0x48, 0x48, 0x47, 0xcc, 0x21, 0xda, 0xfa, 0x77, 0xd2, 0xc0, 0xa2, 0xaa, 0x0b, 0x48, 0x51, + 0x8a, 0xfa, 0x27, 0xd4, 0xb1, 0x3d, 0xe5, 0xca, 0x24, 0x3c, 0xb6, 0x45, 0x3c, 0x63, 0x0d, 0x81, + 0x63, 0x30, 0x51, 0x23, 0xb7, 0xb5, 0x21, 0xc7, 0x9e, 0xbd, 0x92, 0x03, 0x3d, 0xc9, 0x1c, 0xe8, + 0x37, 0x17, 0x3a, 0x48, 0xda, 0x95, 0x5b, 0x00, 0xfe, 0x6a, 0xc0, 0x95, 0xb4, 0xe0, 0x0a, 0x6a, + 0xc0, 0xfd, 0x6c, 0x0d, 0x78, 0x63, 0xb9, 0x1d, 0xe4, 0x94, 0x81, 0x7f, 0x1b, 0x50, 0x4f, 0x8b, + 0x75, 0x6c, 0x66, 0x0f, 0x09, 0x27, 0x2c, 0x8c, 0x0f, 0x0f, 0x1d, 0x42, 0xc9, 0xee, 0xb4, 0xee, + 0x32, 0x3a, 0x0a, 0xa2, 0xd4, 0x15, 0xa6, 0xbd, 0xa7, 0x69, 0x38, 0xe6, 0x8a, 0x04, 0xbf, 0x70, + 0x75, 0x0f, 0x4a, 0x25, 0xf8, 0x7d, 0xd7, 0xef, 0x63, 0xc9, 0x11, 0x12, 0xbe, 0x3d, 0x8c, 0x5a, + 0x5b, 0x2c, 0xd1, 0xb6, 0x87, 0x04, 0x4b, 0x0e, 0xaa, 0x43, 0x31, 0x74, 0x68, 0xa0, 0x22, 0xb8, + 0x6c, 0x95, 0x85, 0xc9, 0x5d, 0x41, 0xc0, 0x8a, 0x8e, 0x6e, 0x40, 0x59, 0x08, 0x86, 0x81, 0xed, + 0x90, 0x6a, 0x51, 0x0a, 0xc9, 0xea, 0xd3, 0x8e, 0x88, 0x38, 0xe1, 0x9b, 0x7f, 0x9a, 0x3a, 0x1f, + 0x59, 0xea, 0x8e, 0x00, 0x1c, 0xea, 0x73, 0x46, 0x3d, 0x8f, 0x44, 0xd5, 0x28, 0x0e, 0x9a, 0xe3, + 0x98, 0x83, 0x53, 0x52, 0xc8, 0x05, 0x08, 0x62, 0xdf, 0xe8, 0xe0, 0xf9, 0xee, 0x72, 0xae, 0x9f, + 0xe3, 0x53, 0x6b, 0x57, 0xa8, 0x4a, 0x31, 0x52, 0xe0, 0xe6, 0x9f, 0x0d, 0xa8, 0xe8, 0xf5, 0x2b, + 0x08, 0xa7, 0xf7, 0xb3, 0xe1, 0xf4, 0xf5, 0xc5, 0x83, 0xc3, 0xfc, 0x48, 0xfa, 0xc4, 0x80, 0x83, + 0xc8, 0x6a, 0x6a, 0xf7, 0x2d, 0xdb, 0xb3, 0x7d, 0x87, 0xb0, 0xa8, 0x52, 0x1f, 0x40, 0xc1, 0x8d, + 0xc2, 0x07, 0x34, 0x40, 0xa1, 0xd5, 0xc1, 0x05, 0x37, 0x40, 0x6f, 0x43, 0xe9, 0x9c, 0x86, 0x5c, + 0x06, 0x86, 0x0a, 0x9d, 0xd8, 0xe0, 0x7b, 0x9a, 0x8e, 0x63, 0x09, 0xd4, 0x81, 0x62, 0x40, 0x19, + 0x0f, 0xab, 0x1b, 0xd2, 0xe0, 0x1b, 0x0b, 0x0d, 0xee, 0x50, 0xc6, 0x75, 0x2d, 0x4d, 0x06, 0x10, + 0x81, 0x80, 0x15, 0x90, 0xf9, 0x4b, 0xf8, 0xca, 0x1c, 0xcb, 0xd5, 0x12, 0xf4, 0x73, 0xd8, 0x72, + 0x15, 0x53, 0xcf, 0x3b, 0xb7, 0x16, 0x2a, 0x9c, 0xb3, 0xff, 0x64, 0xcc, 0x8a, 0xc6, 0xa9, 0x08, + 0xd5, 0xfc, 0xa3, 0x01, 0xfb, 0x33, 0x96, 0xca, 0x49, 0x91, 0x32, 0x2e, 0x3d, 0x56, 0x4c, 0x4d, + 0x8a, 0x94, 0x71, 0x2c, 0x39, 0xe8, 0x3e, 0x94, 0xe4, 0xa0, 0xe9, 0x50, 0x4f, 0x7b, 0xad, 0x19, + 0x79, 0xad, 0xa3, 0xe9, 0x2f, 0xc6, 0xf5, 0xaf, 0xce, 0x4e, 0xdf, 0x8d, 0x88, 0x8d, 0x63, 0x00, + 0x91, 0x75, 0x84, 0x31, 0xca, 0x74, 0x62, 0xca, 0xac, 0xbb, 0x23, 0x08, 0x58, 0xd1, 0xcd, 0x3f, + 0x24, 0x41, 0x29, 0x26, 0x41, 0x61, 0x9f, 0x38, 0x91, 0xe9, 0x5e, 0x2e, 0xce, 0x0b, 0x4b, 0x0e, + 0x0a, 0xe0, 0x8a, 0x3b, 0x35, 0x3a, 0x2e, 0x5d, 0x74, 0xe3, 0x15, 0x56, 0x55, 0x23, 0x5f, 0x99, + 0xe6, 0xe0, 0x19, 0x74, 0xf3, 0x43, 0x98, 0x91, 0x12, 0xe5, 0xfe, 0x9c, 0xf3, 0x60, 0x4e, 0xe2, + 0xe4, 0xcf, 0xaa, 0x89, 0xf6, 0x92, 0xdc, 0x53, 0xaf, 0xd7, 0xc1, 0x12, 0xc5, 0xfc, 0xad, 0x01, + 0xaf, 0xcd, 0x6d, 0x9c, 0x71, 0x61, 0x33, 0x72, 0x0b, 0x5b, 0x5b, 0x9f, 0xa8, 0xf2, 0xc1, 0xdb, + 0xf9, 0x96, 0x64, 0x91, 0xc5, 0x89, 0xcf, 0x3b, 0x7f, 0xf3, 0x9f, 0x85, 0xf8, 0x44, 0x64, 0x55, + 0xfb, 0x41, 0xec, 0x6f, 0x59, 0x75, 0x84, 0x66, 0x5d, 0x43, 0xaf, 0xa6, 0xfc, 0x17, 0xf3, 0xf0, + 0x8c, 0x34, 0xea, 0xc3, 0x6e, 0x9f, 0x9c, 0xd9, 0x23, 0x8f, 0x6b, 0xdd, 0xda, 0x6b, 0xcb, 0x5f, + 0x26, 0xd0, 0x64, 0x5c, 0xdf, 0xbd, 0x9d, 0xc1, 0xc0, 0x53, 0x98, 0xe8, 0x18, 0xd6, 0xb9, 0x17, + 0x95, 0x9b, 0x6f, 0x2c, 0x84, 0xee, 0x9d, 0x74, 0xad, 0x8a, 0xde, 0xfe, 0x7a, 0xef, 0xa4, 0x8b, + 0xc5, 0x6a, 0xf4, 0x01, 0x14, 0xd9, 0xc8, 0x23, 0x62, 0x98, 0x5a, 0x5f, 0x6a, 0x2e, 0x13, 0x67, + 0x9a, 0xa4, 0xbf, 0xf8, 0x0a, 0xb1, 0x82, 0x30, 0x7f, 0x05, 0x3b, 0x99, 0x89, 0x0b, 0x0d, 0x61, + 0xdb, 0x4b, 0xa5, 0xb0, 0xf6, 0xc2, 0xbb, 0xff, 0x57, 0xde, 0xeb, 0x82, 0x73, 0x55, 0x6b, 0xdc, + 0x4e, 0xf3, 0x70, 0x06, 0xde, 0xb4, 0x01, 0x92, 0xbd, 0x8a, 0x4c, 0x14, 0xe9, 0xa3, 0xaa, 0x8d, + 0xce, 0x44, 0x91, 0x55, 0x21, 0x56, 0x74, 0xd1, 0xbd, 0x42, 0xe2, 0x30, 0xc2, 0xdb, 0x49, 0xbd, + 0x8c, 0xbb, 0x57, 0x37, 0xe6, 0xe0, 0x94, 0x94, 0xf9, 0x77, 0x03, 0x76, 0xda, 0xca, 0xe4, 0x0e, + 0xf5, 0x5c, 0xe7, 0xe9, 0x0a, 0x06, 0xad, 0x07, 0x99, 0x41, 0xeb, 0x92, 0x32, 0x9d, 0x31, 0x2c, + 0x77, 0xd2, 0xfa, 0x8b, 0x01, 0x5f, 0xce, 0x48, 0xde, 0x49, 0x8a, 0x51, 0xdc, 0x12, 0x8c, 0x45, + 0x2d, 0x21, 0x83, 0x20, 0x53, 0x6b, 0x6e, 0x4b, 0x40, 0x77, 0xa1, 0xc0, 0xa9, 0x8e, 0xd1, 0xa5, + 0xe1, 0x08, 0x61, 0x49, 0x6f, 0xeb, 0x51, 0x5c, 0xe0, 0xd4, 0xfc, 0xd4, 0x80, 0x6a, 0x46, 0x2a, + 0x5d, 0x44, 0x5f, 0xbd, 0xdd, 0x0f, 0x60, 0xe3, 0x8c, 0xd1, 0xe1, 0xcb, 0x58, 0x1e, 0x3b, 0xfd, + 0x7d, 0x46, 0x87, 0x58, 0xc2, 0x98, 0x9f, 0x19, 0xb0, 0x9f, 0x91, 0x5c, 0xc1, 0x40, 0x72, 0x92, + 0x1d, 0x48, 0xde, 0x5c, 0x72, 0x0f, 0x39, 0x63, 0xc9, 0x67, 0x85, 0xa9, 0x1d, 0x88, 0xbd, 0xa2, + 0x33, 0xa8, 0x04, 0xb4, 0xdf, 0x25, 0x1e, 0x71, 0x38, 0x9d, 0x97, 0xe0, 0x97, 0x6d, 0xc2, 0x3e, + 0x25, 0x5e, 0xb4, 0xd4, 0xda, 0x9b, 0x8c, 0xeb, 0x95, 0x4e, 0x82, 0x85, 0xd3, 0xc0, 0xe8, 0x09, + 0xec, 0xc7, 0xb3, 0x68, 0xac, 0xad, 0xf0, 0xf2, 0xda, 0x5e, 0x9b, 0x8c, 0xeb, 0xfb, 0xed, 0x69, + 0x44, 0x3c, 0xab, 0x04, 0xdd, 0x83, 0x2d, 0x37, 0x90, 0xd7, 0x6e, 0x7d, 0x63, 0xbb, 0x6c, 0xb0, + 0x53, 0xf7, 0x73, 0x75, 0xf9, 0xd3, 0x1f, 0x38, 0x5a, 0x6e, 0xfe, 0x6b, 0x3a, 0x06, 0x44, 0xc0, + 0xa1, 0xbb, 0xa9, 0xe9, 0x43, 0xf5, 0xbc, 0x1b, 0x2f, 0x37, 0x79, 0x64, 0xdb, 0x62, 0x7e, 0x11, + 0x1a, 0x71, 0xd7, 0x6b, 0xa8, 0xa7, 0xb6, 0x46, 0xcb, 0xe7, 0x0f, 0x59, 0x97, 0x33, 0xd7, 0x1f, + 0xa8, 0x16, 0x9d, 0x1a, 0x8b, 0xae, 0xc3, 0x96, 0xee, 0x9a, 0x72, 0xe3, 0x45, 0xb5, 0xab, 0x3b, + 0x8a, 0x84, 0x23, 0x9e, 0xf9, 0x62, 0x3a, 0x2e, 0x64, 0x0f, 0xfd, 0xe8, 0x95, 0xc5, 0xc5, 0x97, + 0x74, 0x34, 0xe6, 0xc7, 0xc6, 0x4f, 0x93, 0xc1, 0x52, 0x45, 0xfa, 0xd1, 0x92, 0x91, 0x9e, 0xee, + 0x68, 0xb9, 0x63, 0x25, 0xfa, 0x11, 0x6c, 0x12, 0x85, 0xae, 0x5a, 0xe4, 0xcd, 0x25, 0xd1, 0x93, + 0xb2, 0x9a, 0xbc, 0x3c, 0x68, 0x9a, 0x06, 0x44, 0xdf, 0x17, 0x5e, 0x12, 0xb2, 0xe2, 0xc2, 0xaf, + 0xe6, 0xf0, 0xb2, 0xf5, 0x35, 0xb5, 0xd9, 0x98, 0xfc, 0x42, 0x5c, 0x70, 0xe2, 0x4f, 0x9c, 0x5e, + 0x61, 0x7e, 0x6c, 0xc0, 0xde, 0xd4, 0x0b, 0x12, 0x7a, 0x1d, 0x8a, 0x83, 0xd4, 0x15, 0x33, 0xce, + 0x66, 0x75, 0xc7, 0x54, 0x3c, 0x71, 0x53, 0x88, 0x1f, 0x22, 0xa6, 0x6e, 0x0a, 0xb3, 0xaf, 0x0b, + 0xa8, 0x99, 0xbe, 0x29, 0xaa, 0xc1, 0x76, 0x5f, 0x8b, 0xcf, 0xbd, 0x2d, 0xc6, 0x43, 0xdc, 0x46, + 0xde, 0x10, 0x67, 0xfe, 0x0c, 0xd0, 0xec, 0x78, 0xb6, 0xc4, 0xf0, 0xf7, 0x06, 0x6c, 0xfa, 0xa3, + 0xe1, 0x29, 0x51, 0xd9, 0x5f, 0x4c, 0x5c, 0xdb, 0x96, 0x54, 0xac, 0xb9, 0xe6, 0xef, 0x0b, 0x50, + 0xd1, 0x0a, 0x8e, 0x5b, 0xb7, 0xf1, 0x0a, 0xda, 0xf4, 0xfd, 0x4c, 0x9b, 0x7e, 0x6b, 0xe1, 0x58, + 0x2a, 0xcc, 0xca, 0x7d, 0xe4, 0xea, 0x4e, 0x3d, 0x72, 0xdd, 0x58, 0x0e, 0xee, 0xf2, 0x87, 0xae, + 0x4f, 0x0d, 0xd8, 0x4b, 0x49, 0xaf, 0xa0, 0x05, 0x7d, 0x90, 0x6d, 0x41, 0xd7, 0x97, 0xda, 0x45, + 0x4e, 0x03, 0x3a, 0xca, 0x18, 0x2f, 0xab, 0x4c, 0x1d, 0x8a, 0x8e, 0xdb, 0x67, 0x99, 0x11, 0x4f, + 0x30, 0x43, 0xac, 0xe8, 0xe6, 0x13, 0xd8, 0x9f, 0x71, 0x0f, 0x72, 0xe4, 0xab, 0x45, 0xdf, 0xe5, + 0x2e, 0xf5, 0xa3, 0x89, 0xa1, 0xb9, 0xdc, 0xa6, 0x8f, 0xa3, 0x75, 0x99, 0x67, 0x0e, 0x0d, 0x85, + 0x53, 0xb0, 0xd6, 0xf7, 0x9e, 0x3d, 0xaf, 0xad, 0x7d, 0xfe, 0xbc, 0xb6, 0xf6, 0xc5, 0xf3, 0xda, + 0xda, 0xaf, 0x27, 0x35, 0xe3, 0xd9, 0xa4, 0x66, 0x7c, 0x3e, 0xa9, 0x19, 0x5f, 0x4c, 0x6a, 0xc6, + 0x7f, 0x26, 0x35, 0xe3, 0x77, 0xff, 0xad, 0xad, 0xfd, 0xb8, 0x9a, 0xf7, 0x5f, 0xa4, 0xff, 0x05, + 0x00, 0x00, 0xff, 0xff, 0xb5, 0x6b, 0x8c, 0x52, 0x60, 0x1a, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -1028,7 +1274,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPBlock) Marshal() (dAtA []byte, err error) { +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1038,34 +1284,40 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Ingress) Marshal() (dAtA []byte, err error) { +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1075,38 +1327,32 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1118,7 +1364,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1128,19 +1374,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { + if m.ParentRef != nil { { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1148,15 +1394,140 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size + return len(dAtA) - i, nil +} + +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- @@ -2137,6 +2508,49 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2168,72 +2582,284 @@ func (m *ServiceBackendPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Ingress) Size() (n int) { - if m == nil { - return 0 +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l + return dAtA[:n], nil +} + +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() @@ -2635,6 +3261,23 @@ func (m *NetworkPolicySpec) Size() (n int) { return n } +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ServiceBackendPort) Size() (n int) { if m == nil { return 0 @@ -2647,39 +3290,138 @@ func (m *ServiceBackendPort) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForPaths += "}" + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s } +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *IPBlock) String() string { if this == nil { return "nil" @@ -3018,6 +3760,19 @@ func (this *NetworkPolicySpec) String() string { }, "") return s } +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *ServiceBackendPort) String() string { if this == nil { return "nil" @@ -3029,6 +3784,59 @@ func (this *ServiceBackendPort) String() string { }, "") return s } +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -3269,7 +4077,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPBlock) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3292,17 +4100,17 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3312,29 +4120,30 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CIDR = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3344,23 +4153,24 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3383,7 +4193,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3406,15 +4216,15 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3441,46 +4251,13 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3507,7 +4284,8 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3532,7 +4310,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3555,51 +4333,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3626,10 +4368,10 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Service == nil { - m.Service = &IngressServiceBackend{} + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3654,7 +4396,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3677,17 +4419,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3697,30 +4439,29 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3730,24 +4471,23 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3770,7 +4510,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3793,15 +4533,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3828,13 +4568,13 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3861,8 +4601,40 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3887,7 +4659,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3910,50 +4682,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s - iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3963,61 +4702,33 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4027,57 +4738,27 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Service == nil { + m.Service = &IngressServiceBackend{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -4100,7 +4781,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4123,17 +4804,17 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4143,27 +4824,28 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Controller = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4190,10 +4872,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4218,7 +4897,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *IngressClassList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4241,10 +4920,10 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4309,7 +4988,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) + m.Items = append(m.Items, IngressClass{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4335,7 +5014,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4358,15 +5037,15 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4394,11 +5073,12 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4426,13 +5106,45 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4442,25 +5154,57 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -4483,7 +5227,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4506,15 +5250,47 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4541,8 +5317,10 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4567,7 +5345,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4590,36 +5368,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4629,29 +5388,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4661,24 +5421,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4701,7 +5462,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4724,15 +5485,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4760,11 +5521,43 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4791,7 +5584,8 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4816,7 +5610,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4839,15 +5633,15 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4874,10 +5668,8 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4902,7 +5694,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4925,15 +5717,34 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4961,13 +5772,13 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4977,24 +5788,24 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5017,7 +5828,7 @@ func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5040,17 +5851,17 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5060,65 +5871,27 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.DefaultBackend == nil { - m.DefaultBackend = &IngressBackend{} - } - if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5145,44 +5918,10 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5204,7 +5943,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5227,15 +5966,15 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5262,7 +6001,10 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5287,7 +6029,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5310,15 +6052,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5346,13 +6088,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5362,23 +6104,24 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5401,7 +6144,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5424,15 +6167,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5459,13 +6202,16 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DefaultBackend == nil { + m.DefaultBackend = &IngressBackend{} + } + if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5492,63 +6238,14 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5575,16 +6272,99 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } - var msglen int + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5609,8 +6389,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5635,7 +6414,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IngressTLS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5658,15 +6437,129 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5693,14 +6586,13 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5727,10 +6619,1020 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5753,7 +7655,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5776,17 +7678,17 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5796,30 +7698,29 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) } - var msglen int + m.Number = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5829,26 +7730,11 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Number |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5870,7 +7756,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5893,15 +7779,15 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5928,16 +7814,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5964,16 +7847,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6000,10 +7880,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} - } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6028,7 +7905,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6051,17 +7928,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6071,28 +7948,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6119,33 +7996,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6167,7 +8022,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6190,116 +8045,15 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6327,7 +8081,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -6350,7 +8104,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6373,17 +8127,17 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6393,43 +8147,26 @@ func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) - } - m.Number = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Number |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/networking/v1/generated.proto b/tools/vendor/k8s.io/api/networking/v1/generated.proto index c72fdc8f3..e3e3e9215 100644 --- a/tools/vendor/k8s.io/api/networking/v1/generated.proto +++ b/tools/vendor/k8s.io/api/networking/v1/generated.proto @@ -72,6 +72,44 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. @@ -540,6 +578,25 @@ message NetworkPolicySpec { repeated string policyTypes = 4; } +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + // ServiceBackendPort is the service port being referenced. // +structType=atomic message ServiceBackendPort { @@ -554,3 +611,55 @@ message ServiceBackendPort { optional int32 number = 2; } +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/tools/vendor/k8s.io/api/networking/v1/register.go b/tools/vendor/k8s.io/api/networking/v1/register.go index a200d5437..b9bdcb78c 100644 --- a/tools/vendor/k8s.io/api/networking/v1/register.go +++ b/tools/vendor/k8s.io/api/networking/v1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressClassList{}, &NetworkPolicy{}, &NetworkPolicyList{}, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/tools/vendor/k8s.io/api/networking/v1/types.go b/tools/vendor/k8s.io/api/networking/v1/types.go index d75e27558..216647cee 100644 --- a/tools/vendor/k8s.io/api/networking/v1/types.go +++ b/tools/vendor/k8s.io/api/networking/v1/types.go @@ -635,3 +635,133 @@ type IngressClassList struct { // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.33 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index ff080540d..0e294848b 100644 --- a/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + var map_IPBlock = map[string]string{ "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", @@ -294,6 +323,18 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", @@ -304,4 +345,43 @@ func (ServiceBackendPort) SwaggerDoc() map[string]string { return map_ServiceBackendPort } +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/tools/vendor/k8s.io/api/networking/v1/well_known_labels.go b/tools/vendor/k8s.io/api/networking/v1/well_known_labels.go new file mode 100644 index 000000000..28e2e8f3f --- /dev/null +++ b/tools/vendor/k8s.io/api/networking/v1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/tools/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/tools/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 540873833..9ce6435a4 100644 --- a/tools/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/tools/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -73,6 +73,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -711,6 +792,22 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) { *out = *in @@ -726,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/tools/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/tools/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go index 21e8c671a..6894d8c53 100644 --- a/tools/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go +++ b/tools/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,18 @@ limitations under the License. package v1 +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *Ingress) APILifecycleIntroduced() (major, minor int) { @@ -56,3 +68,15 @@ func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) { func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) { return 1, 19 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 33 +} diff --git a/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go b/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go index 3827b0418..55264ae70 100644 --- a/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/networking/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io -package v1alpha1 // import "k8s.io/api/networking/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/networking/v1beta1/doc.go b/tools/vendor/k8s.io/api/networking/v1beta1/doc.go index fa6d01cea..c5a03e04e 100644 --- a/tools/vendor/k8s.io/api/networking/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/networking/v1beta1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io -package v1beta1 // import "k8s.io/api/networking/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/node/v1/doc.go b/tools/vendor/k8s.io/api/node/v1/doc.go index 57ca52445..3239af703 100644 --- a/tools/vendor/k8s.io/api/node/v1/doc.go +++ b/tools/vendor/k8s.io/api/node/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=node.k8s.io -package v1 // import "k8s.io/api/node/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/node/v1alpha1/doc.go b/tools/vendor/k8s.io/api/node/v1alpha1/doc.go index dfe99540b..2f3d46ac2 100644 --- a/tools/vendor/k8s.io/api/node/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +groupName=node.k8s.io -package v1alpha1 // import "k8s.io/api/node/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/node/v1beta1/doc.go b/tools/vendor/k8s.io/api/node/v1beta1/doc.go index c76ba89c4..7b47c8df6 100644 --- a/tools/vendor/k8s.io/api/node/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/node/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=node.k8s.io -package v1beta1 // import "k8s.io/api/node/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/policy/v1/doc.go b/tools/vendor/k8s.io/api/policy/v1/doc.go index c51e02685..ff47e7fd4 100644 --- a/tools/vendor/k8s.io/api/policy/v1/doc.go +++ b/tools/vendor/k8s.io/api/policy/v1/doc.go @@ -22,4 +22,4 @@ limitations under the License. // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. -package v1 // import "k8s.io/api/policy/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/policy/v1/generated.proto b/tools/vendor/k8s.io/api/policy/v1/generated.proto index 57128e811..953489072 100644 --- a/tools/vendor/k8s.io/api/policy/v1/generated.proto +++ b/tools/vendor/k8s.io/api/policy/v1/generated.proto @@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec { // Additional policies may be added in the future. // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. - // - // This field is beta-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/tools/vendor/k8s.io/api/policy/v1/types.go b/tools/vendor/k8s.io/api/policy/v1/types.go index f05367ebe..4e7436789 100644 --- a/tools/vendor/k8s.io/api/policy/v1/types.go +++ b/tools/vendor/k8s.io/api/policy/v1/types.go @@ -70,9 +70,6 @@ type PodDisruptionBudgetSpec struct { // Additional policies may be added in the future. // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. - // - // This field is beta-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/tools/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 799b0794a..9b2f5b945 100644 --- a/tools/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/policy/v1beta1/doc.go b/tools/vendor/k8s.io/api/policy/v1beta1/doc.go index 76da54b4c..777106c60 100644 --- a/tools/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -22,4 +22,4 @@ limitations under the License. // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. -package v1beta1 // import "k8s.io/api/policy/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/policy/v1beta1/generated.proto b/tools/vendor/k8s.io/api/policy/v1beta1/generated.proto index 91e33f233..e0cbe00f1 100644 --- a/tools/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/tools/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -115,9 +115,6 @@ message PodDisruptionBudgetSpec { // Additional policies may be added in the future. // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. - // - // This field is beta-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/tools/vendor/k8s.io/api/policy/v1beta1/types.go b/tools/vendor/k8s.io/api/policy/v1beta1/types.go index bc5f970d2..9bba454f9 100644 --- a/tools/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/tools/vendor/k8s.io/api/policy/v1beta1/types.go @@ -67,9 +67,6 @@ type PodDisruptionBudgetSpec struct { // Additional policies may be added in the future. // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. - // - // This field is beta-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/tools/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/tools/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 4a79d7594..cffc9a548 100644 --- a/tools/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/tools/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/tools/vendor/k8s.io/api/rbac/v1/doc.go b/tools/vendor/k8s.io/api/rbac/v1/doc.go index b0e4e5b5b..408546274 100644 --- a/tools/vendor/k8s.io/api/rbac/v1/doc.go +++ b/tools/vendor/k8s.io/api/rbac/v1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // +groupName=rbac.authorization.k8s.io -package v1 // import "k8s.io/api/rbac/v1" +package v1 diff --git a/tools/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/tools/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 918b8a337..70d3c0e97 100644 --- a/tools/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/tools/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +groupName=rbac.authorization.k8s.io -package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" +package v1alpha1 diff --git a/tools/vendor/k8s.io/api/rbac/v1beta1/doc.go b/tools/vendor/k8s.io/api/rbac/v1beta1/doc.go index 156f273e6..504a58d8b 100644 --- a/tools/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/tools/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=rbac.authorization.k8s.io -package v1beta1 // import "k8s.io/api/rbac/v1beta1" +package v1beta1 diff --git a/tools/vendor/k8s.io/api/resource/v1alpha3/doc.go b/tools/vendor/k8s.io/api/resource/v1alpha3/doc.go index ffc21307d..82e64f1d0 100644 --- a/tools/vendor/k8s.io/api/resource/v1alpha3/doc.go +++ b/tools/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=resource.k8s.io // Package v1alpha3 is the v1alpha3 version of the resource API. -package v1alpha3 // import "k8s.io/api/resource/v1alpha3" +package v1alpha3 diff --git a/tools/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/tools/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 540f7b818..716492fea 100644 --- a/tools/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/tools/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -29,6 +29,7 @@ import ( v11 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" math "math" math_bits "math/bits" @@ -161,10 +162,66 @@ func (m *CELDeviceSelector) XXX_DiscardUnknown() { var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo +func (m *Counter) Reset() { *m = Counter{} } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{4} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *CounterSet) Reset() { *m = CounterSet{} } +func (*CounterSet) ProtoMessage() {} +func (*CounterSet) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{5} +} +func (m *CounterSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CounterSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_CounterSet.Merge(m, src) +} +func (m *CounterSet) XXX_Size() int { + return m.Size() +} +func (m *CounterSet) XXX_DiscardUnknown() { + xxx_messageInfo_CounterSet.DiscardUnknown(m) +} + +var xxx_messageInfo_CounterSet proto.InternalMessageInfo + func (m *Device) Reset() { *m = Device{} } func (*Device) ProtoMessage() {} func (*Device) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{4} + return fileDescriptor_66649ee9bbcd89d2, []int{6} } func (m *Device) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +249,7 @@ var xxx_messageInfo_Device proto.InternalMessageInfo func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } func (*DeviceAllocationConfiguration) ProtoMessage() {} func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{5} + return fileDescriptor_66649ee9bbcd89d2, []int{7} } func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +277,7 @@ var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } func (*DeviceAllocationResult) ProtoMessage() {} func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{6} + return fileDescriptor_66649ee9bbcd89d2, []int{8} } func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +305,7 @@ var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } func (*DeviceAttribute) ProtoMessage() {} func (*DeviceAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{7} + return fileDescriptor_66649ee9bbcd89d2, []int{9} } func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +333,7 @@ var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } func (*DeviceClaim) ProtoMessage() {} func (*DeviceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{8} + return fileDescriptor_66649ee9bbcd89d2, []int{10} } func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +361,7 @@ var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } func (*DeviceClaimConfiguration) ProtoMessage() {} func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{9} + return fileDescriptor_66649ee9bbcd89d2, []int{11} } func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +389,7 @@ var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo func (m *DeviceClass) Reset() { *m = DeviceClass{} } func (*DeviceClass) ProtoMessage() {} func (*DeviceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{10} + return fileDescriptor_66649ee9bbcd89d2, []int{12} } func (m *DeviceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +417,7 @@ var xxx_messageInfo_DeviceClass proto.InternalMessageInfo func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } func (*DeviceClassConfiguration) ProtoMessage() {} func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{11} + return fileDescriptor_66649ee9bbcd89d2, []int{13} } func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +445,7 @@ var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } func (*DeviceClassList) ProtoMessage() {} func (*DeviceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{12} + return fileDescriptor_66649ee9bbcd89d2, []int{14} } func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +473,7 @@ var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } func (*DeviceClassSpec) ProtoMessage() {} func (*DeviceClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{13} + return fileDescriptor_66649ee9bbcd89d2, []int{15} } func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +501,7 @@ var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } func (*DeviceConfiguration) ProtoMessage() {} func (*DeviceConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{14} + return fileDescriptor_66649ee9bbcd89d2, []int{16} } func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +529,7 @@ var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } func (*DeviceConstraint) ProtoMessage() {} func (*DeviceConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{15} + return fileDescriptor_66649ee9bbcd89d2, []int{17} } func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,10 +554,38 @@ func (m *DeviceConstraint) XXX_DiscardUnknown() { var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo +func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} } +func (*DeviceCounterConsumption) ProtoMessage() {} +func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{18} +} +func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCounterConsumption.Merge(m, src) +} +func (m *DeviceCounterConsumption) XXX_Size() int { + return m.Size() +} +func (m *DeviceCounterConsumption) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo + func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } func (*DeviceRequest) ProtoMessage() {} func (*DeviceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{16} + return fileDescriptor_66649ee9bbcd89d2, []int{19} } func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +613,7 @@ var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } func (*DeviceRequestAllocationResult) ProtoMessage() {} func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{17} + return fileDescriptor_66649ee9bbcd89d2, []int{20} } func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +641,7 @@ var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } func (*DeviceSelector) ProtoMessage() {} func (*DeviceSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{18} + return fileDescriptor_66649ee9bbcd89d2, []int{21} } func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,10 +666,206 @@ func (m *DeviceSelector) XXX_DiscardUnknown() { var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo +func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} } +func (*DeviceSubRequest) ProtoMessage() {} +func (*DeviceSubRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{22} +} +func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSubRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSubRequest.Merge(m, src) +} +func (m *DeviceSubRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceSubRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo + +func (m *DeviceTaint) Reset() { *m = DeviceTaint{} } +func (*DeviceTaint) ProtoMessage() {} +func (*DeviceTaint) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{23} +} +func (m *DeviceTaint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaint.Merge(m, src) +} +func (m *DeviceTaint) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo + +func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} } +func (*DeviceTaintRule) ProtoMessage() {} +func (*DeviceTaintRule) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{24} +} +func (m *DeviceTaintRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaintRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaintRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaintRule.Merge(m, src) +} +func (m *DeviceTaintRule) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaintRule) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaintRule.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaintRule proto.InternalMessageInfo + +func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} } +func (*DeviceTaintRuleList) ProtoMessage() {} +func (*DeviceTaintRuleList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{25} +} +func (m *DeviceTaintRuleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaintRuleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaintRuleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaintRuleList.Merge(m, src) +} +func (m *DeviceTaintRuleList) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaintRuleList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaintRuleList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaintRuleList proto.InternalMessageInfo + +func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} } +func (*DeviceTaintRuleSpec) ProtoMessage() {} +func (*DeviceTaintRuleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{26} +} +func (m *DeviceTaintRuleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaintRuleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaintRuleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaintRuleSpec.Merge(m, src) +} +func (m *DeviceTaintRuleSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaintRuleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaintRuleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaintRuleSpec proto.InternalMessageInfo + +func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} } +func (*DeviceTaintSelector) ProtoMessage() {} +func (*DeviceTaintSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{27} +} +func (m *DeviceTaintSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceTaintSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceTaintSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceTaintSelector.Merge(m, src) +} +func (m *DeviceTaintSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceTaintSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceTaintSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceTaintSelector proto.InternalMessageInfo + +func (m *DeviceToleration) Reset() { *m = DeviceToleration{} } +func (*DeviceToleration) ProtoMessage() {} +func (*DeviceToleration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{28} +} +func (m *DeviceToleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceToleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceToleration.Merge(m, src) +} +func (m *DeviceToleration) XXX_Size() int { + return m.Size() +} +func (m *DeviceToleration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceToleration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo + func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } func (*NetworkDeviceData) ProtoMessage() {} func (*NetworkDeviceData) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{19} + return fileDescriptor_66649ee9bbcd89d2, []int{29} } func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +893,7 @@ var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } func (*OpaqueDeviceConfiguration) ProtoMessage() {} func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{20} + return fileDescriptor_66649ee9bbcd89d2, []int{30} } func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +921,7 @@ var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{21} + return fileDescriptor_66649ee9bbcd89d2, []int{31} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +949,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{22} + return fileDescriptor_66649ee9bbcd89d2, []int{32} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +977,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{23} + return fileDescriptor_66649ee9bbcd89d2, []int{33} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +1005,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{24} + return fileDescriptor_66649ee9bbcd89d2, []int{34} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +1033,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{25} + return fileDescriptor_66649ee9bbcd89d2, []int{35} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +1061,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{26} + return fileDescriptor_66649ee9bbcd89d2, []int{36} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +1089,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{27} + return fileDescriptor_66649ee9bbcd89d2, []int{37} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +1117,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{28} + return fileDescriptor_66649ee9bbcd89d2, []int{38} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +1145,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourcePool) Reset() { *m = ResourcePool{} } func (*ResourcePool) ProtoMessage() {} func (*ResourcePool) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{29} + return fileDescriptor_66649ee9bbcd89d2, []int{39} } func (m *ResourcePool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +1173,7 @@ var xxx_messageInfo_ResourcePool proto.InternalMessageInfo func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } func (*ResourceSlice) ProtoMessage() {} func (*ResourceSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{30} + return fileDescriptor_66649ee9bbcd89d2, []int{40} } func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +1201,7 @@ var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } func (*ResourceSliceList) ProtoMessage() {} func (*ResourceSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{31} + return fileDescriptor_66649ee9bbcd89d2, []int{41} } func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +1229,7 @@ var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } func (*ResourceSliceSpec) ProtoMessage() {} func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{32} + return fileDescriptor_66649ee9bbcd89d2, []int{42} } func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -980,6 +1261,9 @@ func init() { proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry") proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector") + proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1alpha3.Counter") + proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1alpha3.CounterSet") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.CounterSet.CountersEntry") proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device") proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration") proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult") @@ -992,9 +1276,18 @@ func init() { proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec") proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration") proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint") + proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption") + proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1alpha3.DeviceCounterConsumption.CountersEntry") proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceSubRequest") + proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaint") + proto.RegisterType((*DeviceTaintRule)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRule") + proto.RegisterType((*DeviceTaintRuleList)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleList") + proto.RegisterType((*DeviceTaintRuleSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleSpec") + proto.RegisterType((*DeviceTaintSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintSelector") + proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1alpha3.DeviceToleration") proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData") proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") @@ -1016,134 +1309,172 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2030 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0x1c, 0x57, - 0xdd, 0xb3, 0xe3, 0xcf, 0xdf, 0xfa, 0x2b, 0x2f, 0xa4, 0x38, 0xa6, 0xec, 0x3a, 0x53, 0x04, 0x4e, - 0x9b, 0xee, 0x36, 0x4e, 0xd5, 0x16, 0xc2, 0x01, 0x8f, 0xed, 0x06, 0x47, 0x89, 0xe3, 0x3c, 0xb7, - 0x11, 0x81, 0x12, 0x78, 0x9e, 0x7d, 0xb6, 0x07, 0xcf, 0xce, 0x4c, 0xe7, 0xbd, 0x71, 0xea, 0x0b, - 0xaa, 0xe0, 0x1e, 0xf1, 0x0f, 0x20, 0x0e, 0x48, 0x48, 0x5c, 0x80, 0xff, 0x00, 0x24, 0x90, 0x88, - 0xe0, 0x12, 0x09, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xce, 0xdc, 0x73, 0x42, 0xef, 0xcd, 0x9b, 0xcf, - 0xdd, 0x71, 0xc6, 0x55, 0xb1, 0xd2, 0xdb, 0xce, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xb8, 0x72, - 0xf8, 0x0e, 0x6b, 0xd9, 0x5e, 0x9b, 0xf8, 0x76, 0x3b, 0xa0, 0xcc, 0x0b, 0x03, 0x8b, 0xb6, 0x8f, - 0xae, 0x12, 0xc7, 0x3f, 0x20, 0xd7, 0xda, 0xfb, 0xd4, 0xa5, 0x01, 0xe1, 0xb4, 0xd3, 0xf2, 0x03, - 0x8f, 0x7b, 0xe8, 0xe5, 0x88, 0xba, 0x45, 0x7c, 0xbb, 0x15, 0x53, 0xb7, 0x62, 0xea, 0xc5, 0xd7, - 0xf7, 0x6d, 0x7e, 0x10, 0xee, 0xb6, 0x2c, 0xaf, 0xdb, 0xde, 0xf7, 0xf6, 0xbd, 0xb6, 0x64, 0xda, - 0x0d, 0xf7, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x22, 0x61, 0x8b, 0x46, 0x46, 0xb5, 0xe5, 0x05, 0x42, - 0x6d, 0x51, 0xe1, 0xe2, 0x9b, 0x29, 0x4d, 0x97, 0x58, 0x07, 0xb6, 0x4b, 0x83, 0xe3, 0xb6, 0x7f, - 0xb8, 0x9f, 0xb7, 0xf7, 0x34, 0x5c, 0xac, 0xdd, 0xa5, 0x9c, 0x0c, 0xd3, 0xd5, 0x2e, 0xe3, 0x0a, - 0x42, 0x97, 0xdb, 0xdd, 0x41, 0x35, 0x6f, 0x3d, 0x8f, 0x81, 0x59, 0x07, 0xb4, 0x4b, 0x8a, 0x7c, - 0xc6, 0xaf, 0x75, 0xb8, 0xb0, 0xea, 0x38, 0x9e, 0x25, 0x60, 0xeb, 0xf4, 0xc8, 0xb6, 0xe8, 0x0e, - 0x27, 0x3c, 0x64, 0xe8, 0xeb, 0x30, 0xde, 0x09, 0xec, 0x23, 0x1a, 0x2c, 0x68, 0x4b, 0xda, 0xf2, - 0x94, 0x39, 0xfb, 0xb8, 0xd7, 0x1c, 0xe9, 0xf7, 0x9a, 0xe3, 0xeb, 0x12, 0x8a, 0x15, 0x16, 0x2d, - 0xc1, 0xa8, 0xef, 0x79, 0xce, 0x42, 0x4d, 0x52, 0x4d, 0x2b, 0xaa, 0xd1, 0x6d, 0xcf, 0x73, 0xb0, - 0xc4, 0x48, 0x49, 0x52, 0xf2, 0x82, 0x5e, 0x90, 0x24, 0xa1, 0x58, 0x61, 0x91, 0x05, 0x60, 0x79, - 0x6e, 0xc7, 0xe6, 0xb6, 0xe7, 0xb2, 0x85, 0xd1, 0x25, 0x7d, 0xb9, 0xbe, 0xd2, 0x6e, 0xa5, 0x61, - 0x4e, 0x0e, 0xd6, 0xf2, 0x0f, 0xf7, 0x05, 0x80, 0xb5, 0x84, 0xff, 0x5a, 0x47, 0x57, 0x5b, 0x6b, - 0x31, 0x9f, 0x89, 0x94, 0x70, 0x48, 0x40, 0x0c, 0x67, 0xc4, 0xa2, 0x3b, 0x30, 0xda, 0x21, 0x9c, - 0x2c, 0x8c, 0x2d, 0x69, 0xcb, 0xf5, 0x95, 0xd7, 0x4b, 0xc5, 0x2b, 0xbf, 0xb5, 0x30, 0x79, 0xb8, - 0xf1, 0x11, 0xa7, 0x2e, 0x13, 0xc2, 0x93, 0xd3, 0xad, 0x13, 0x4e, 0xb0, 0x14, 0x84, 0x76, 0xa1, - 0xee, 0x52, 0xfe, 0xd0, 0x0b, 0x0e, 0x05, 0x70, 0x61, 0x5c, 0xca, 0xcd, 0x9a, 0x3d, 0x98, 0x9d, - 0xad, 0x2d, 0xc5, 0x20, 0xcf, 0x2d, 0xd8, 0xcc, 0xb9, 0x7e, 0xaf, 0x59, 0xdf, 0x4a, 0xe5, 0xe0, - 0xac, 0x50, 0xe3, 0xef, 0x1a, 0xcc, 0xab, 0x28, 0xd9, 0x9e, 0x8b, 0x29, 0x0b, 0x1d, 0x8e, 0x7e, - 0x04, 0x13, 0x91, 0xe3, 0x98, 0x8c, 0x50, 0x7d, 0xe5, 0xcd, 0x93, 0x95, 0x46, 0xda, 0x8a, 0x62, - 0xcc, 0x39, 0x75, 0xa6, 0x89, 0x08, 0xcf, 0x70, 0x2c, 0x15, 0xdd, 0x83, 0x69, 0xd7, 0xeb, 0xd0, - 0x1d, 0xea, 0x50, 0x8b, 0x7b, 0x81, 0x8c, 0x5e, 0x7d, 0x65, 0x29, 0xab, 0x45, 0xd4, 0x8a, 0xf0, - 0xff, 0x56, 0x86, 0xce, 0x9c, 0xef, 0xf7, 0x9a, 0xd3, 0x59, 0x08, 0xce, 0xc9, 0x31, 0x3e, 0xd5, - 0xa1, 0x6e, 0x12, 0x66, 0x5b, 0x91, 0x46, 0xf4, 0x53, 0x00, 0xc2, 0x79, 0x60, 0xef, 0x86, 0x5c, - 0x9e, 0x45, 0xc4, 0xfd, 0x9b, 0x27, 0x9f, 0x25, 0xc3, 0xde, 0x5a, 0x4d, 0x78, 0x37, 0x5c, 0x1e, - 0x1c, 0x9b, 0xaf, 0xc4, 0x19, 0x90, 0x22, 0x7e, 0xf6, 0xaf, 0xe6, 0xcc, 0xdd, 0x90, 0x38, 0xf6, - 0x9e, 0x4d, 0x3b, 0x5b, 0xa4, 0x4b, 0x71, 0x46, 0x23, 0x3a, 0x82, 0x49, 0x8b, 0xf8, 0xc4, 0xb2, - 0xf9, 0xf1, 0x42, 0x4d, 0x6a, 0x7f, 0xbb, 0xba, 0xf6, 0x35, 0xc5, 0x19, 0xe9, 0xbe, 0xa4, 0x74, - 0x4f, 0xc6, 0xe0, 0x41, 0xcd, 0x89, 0xae, 0x45, 0x07, 0xe6, 0x0a, 0xb6, 0xa3, 0x79, 0xd0, 0x0f, - 0xe9, 0x71, 0x54, 0x71, 0x58, 0xfc, 0x44, 0x6b, 0x30, 0x76, 0x44, 0x9c, 0x90, 0xca, 0xfa, 0xca, - 0x27, 0x6c, 0x79, 0x8c, 0x63, 0xa9, 0x38, 0xe2, 0xfd, 0x56, 0xed, 0x1d, 0x6d, 0xf1, 0x10, 0x66, - 0x72, 0xb6, 0x0e, 0xd1, 0xb5, 0x9e, 0xd7, 0xd5, 0x3a, 0xa9, 0xf6, 0x52, 0xe5, 0x77, 0x43, 0xe2, - 0x72, 0x9b, 0x1f, 0x67, 0x94, 0x19, 0x37, 0xe0, 0xdc, 0xda, 0xc6, 0x2d, 0xd5, 0x4f, 0x54, 0xdc, - 0xd1, 0x0a, 0x00, 0xfd, 0xc8, 0x0f, 0x28, 0x13, 0xb5, 0xa4, 0xba, 0x4a, 0x52, 0xae, 0x1b, 0x09, - 0x06, 0x67, 0xa8, 0x8c, 0x23, 0x50, 0x5d, 0x42, 0xf4, 0x19, 0x97, 0x74, 0xa9, 0xe2, 0x4b, 0x2a, - 0x51, 0xfa, 0x54, 0x62, 0xd0, 0x4d, 0x18, 0xdb, 0x15, 0x91, 0x51, 0xe6, 0x5f, 0xae, 0x1c, 0x44, - 0x73, 0xaa, 0xdf, 0x6b, 0x8e, 0x49, 0x00, 0x8e, 0x44, 0x18, 0x8f, 0x6a, 0xf0, 0xd5, 0x62, 0xc1, - 0xac, 0x79, 0xee, 0x9e, 0xbd, 0x1f, 0x06, 0xf2, 0x03, 0x7d, 0x07, 0xc6, 0x23, 0x91, 0xca, 0xa2, - 0xe5, 0xb8, 0xab, 0xed, 0x48, 0xe8, 0xb3, 0x5e, 0xf3, 0xa5, 0x22, 0x6b, 0x84, 0xc1, 0x8a, 0x0f, - 0x2d, 0xc3, 0x64, 0x40, 0x3f, 0x0c, 0x29, 0xe3, 0x4c, 0xe6, 0xdd, 0x94, 0x39, 0x2d, 0x52, 0x07, - 0x2b, 0x18, 0x4e, 0xb0, 0xe8, 0x63, 0x0d, 0xce, 0x47, 0x55, 0x99, 0xb3, 0x41, 0x55, 0xe4, 0xd5, - 0x2a, 0x39, 0x91, 0x63, 0x34, 0xbf, 0xa2, 0x8c, 0x3d, 0x3f, 0x04, 0x89, 0x87, 0xa9, 0x32, 0xfe, - 0xa3, 0xc1, 0x4b, 0xc3, 0x3b, 0x08, 0xda, 0x83, 0x89, 0x40, 0xfe, 0x8a, 0x8b, 0xf7, 0x7a, 0x15, - 0x83, 0xd4, 0x31, 0xcb, 0xfb, 0x51, 0xf4, 0xcd, 0x70, 0x2c, 0x1c, 0x59, 0x30, 0x6e, 0x49, 0x9b, - 0x54, 0x95, 0x5e, 0x3f, 0x5d, 0xbf, 0xcb, 0x7b, 0x20, 0x19, 0x42, 0x11, 0x18, 0x2b, 0xd1, 0xc6, - 0x6f, 0x35, 0x98, 0x2b, 0x54, 0x11, 0x6a, 0x80, 0x6e, 0xbb, 0x5c, 0xa6, 0x95, 0x1e, 0xc5, 0x68, - 0xd3, 0xe5, 0xf7, 0x44, 0xb2, 0x63, 0x81, 0x40, 0x97, 0x60, 0x74, 0x57, 0x8c, 0x40, 0x11, 0x8e, - 0x49, 0x73, 0xa6, 0xdf, 0x6b, 0x4e, 0x99, 0x9e, 0xe7, 0x44, 0x14, 0x12, 0x85, 0xbe, 0x01, 0xe3, - 0x8c, 0x07, 0xb6, 0xbb, 0xbf, 0x30, 0x2a, 0xb3, 0x45, 0xf6, 0xfb, 0x1d, 0x09, 0x89, 0xc8, 0x14, - 0x1a, 0xbd, 0x0a, 0x13, 0x47, 0x34, 0x90, 0x15, 0x32, 0x26, 0x29, 0x65, 0x37, 0xbd, 0x17, 0x81, - 0x22, 0xd2, 0x98, 0xc0, 0xf8, 0x7d, 0x0d, 0xea, 0x2a, 0x80, 0x0e, 0xb1, 0xbb, 0xe8, 0x7e, 0x26, - 0xa1, 0xa2, 0x48, 0xbc, 0x76, 0x8a, 0x48, 0x98, 0xf3, 0x71, 0xf3, 0x1a, 0x92, 0x81, 0x14, 0xea, - 0x96, 0xe7, 0x32, 0x1e, 0x10, 0xdb, 0x55, 0xe9, 0x9a, 0x6f, 0x10, 0x27, 0x25, 0x9e, 0x62, 0x33, - 0xcf, 0x2b, 0x05, 0xf5, 0x14, 0xc6, 0x70, 0x56, 0x2e, 0x7a, 0x90, 0x84, 0x58, 0x97, 0x1a, 0xde, - 0xaa, 0xa4, 0x41, 0x1c, 0xbe, 0x5a, 0x74, 0xff, 0xaa, 0xc1, 0x42, 0x19, 0x53, 0xae, 0x1e, 0xb5, - 0xcf, 0x54, 0x8f, 0xb5, 0xb3, 0xab, 0xc7, 0x3f, 0x69, 0x99, 0xd8, 0x33, 0x86, 0x7e, 0x0c, 0x93, - 0x62, 0x19, 0x92, 0xbb, 0x4d, 0xb4, 0x0e, 0xbc, 0x51, 0x6d, 0x75, 0xba, 0xb3, 0xfb, 0x13, 0x6a, - 0xf1, 0xdb, 0x94, 0x93, 0xb4, 0x19, 0xa7, 0x30, 0x9c, 0x48, 0x15, 0x9b, 0x13, 0xf3, 0xa9, 0x75, - 0x9a, 0x41, 0x24, 0x4d, 0xdb, 0xf1, 0xa9, 0x95, 0xf6, 0x6b, 0xf1, 0x85, 0xa5, 0x20, 0xe3, 0x97, - 0xd9, 0x60, 0x30, 0x96, 0x0f, 0x46, 0x99, 0x8b, 0xb5, 0xb3, 0x73, 0xf1, 0x1f, 0x93, 0x56, 0x20, - 0xed, 0xbb, 0x65, 0x33, 0x8e, 0x3e, 0x18, 0x70, 0x73, 0xab, 0x9a, 0x9b, 0x05, 0xb7, 0x74, 0x72, - 0x52, 0x65, 0x31, 0x24, 0xe3, 0xe2, 0x2d, 0x18, 0xb3, 0x39, 0xed, 0xc6, 0xf5, 0x75, 0xb9, 0xb2, - 0x8f, 0xcd, 0x19, 0x25, 0x75, 0x6c, 0x53, 0xf0, 0xe3, 0x48, 0x8c, 0xf1, 0x24, 0x7f, 0x02, 0xe1, - 0x7b, 0xf4, 0x43, 0x98, 0x62, 0x6a, 0x22, 0xc7, 0x5d, 0xe2, 0x4a, 0x15, 0x3d, 0xc9, 0x7a, 0x77, - 0x4e, 0xa9, 0x9a, 0x8a, 0x21, 0x0c, 0xa7, 0x12, 0x33, 0x15, 0x5c, 0x3b, 0x55, 0x05, 0x17, 0xe2, - 0x5f, 0x5a, 0xc1, 0x01, 0x0c, 0x0b, 0x20, 0xfa, 0x01, 0x8c, 0x7b, 0x3e, 0xf9, 0x30, 0xa4, 0x2a, - 0x2a, 0xcf, 0xd9, 0xe0, 0xee, 0x48, 0xda, 0x61, 0x69, 0x02, 0x42, 0x67, 0x84, 0xc6, 0x4a, 0xa4, - 0xf1, 0x48, 0x83, 0xf9, 0x62, 0x33, 0x3b, 0x45, 0xb7, 0xd8, 0x86, 0xd9, 0x2e, 0xe1, 0xd6, 0x41, - 0x32, 0x50, 0xd4, 0x5d, 0x69, 0xb9, 0xdf, 0x6b, 0xce, 0xde, 0xce, 0x61, 0x9e, 0xf5, 0x9a, 0xe8, - 0xdd, 0xd0, 0x71, 0x8e, 0xf3, 0x3b, 0x63, 0x81, 0xdf, 0xf8, 0xb9, 0x0e, 0x33, 0xb9, 0xde, 0x5d, - 0x61, 0x3b, 0x5a, 0x85, 0xb9, 0x4e, 0xea, 0x6c, 0x81, 0x50, 0x66, 0x7c, 0x59, 0x11, 0x67, 0x33, - 0x45, 0xf2, 0x15, 0xe9, 0xf3, 0xa9, 0xa3, 0x7f, 0xee, 0xa9, 0x73, 0x0f, 0x66, 0x49, 0x32, 0xad, - 0x6f, 0x7b, 0x1d, 0xaa, 0x66, 0x65, 0x4b, 0x71, 0xcd, 0xae, 0xe6, 0xb0, 0xcf, 0x7a, 0xcd, 0x2f, - 0x15, 0x67, 0xbc, 0x80, 0xe3, 0x82, 0x14, 0xf4, 0x0a, 0x8c, 0x59, 0x5e, 0xe8, 0x72, 0x39, 0x50, - 0xf5, 0xb4, 0x54, 0xd6, 0x04, 0x10, 0x47, 0x38, 0x74, 0x15, 0xea, 0xa4, 0xd3, 0xb5, 0xdd, 0x55, - 0xcb, 0xa2, 0x8c, 0xc9, 0x6b, 0xdc, 0x64, 0x34, 0xa5, 0x57, 0x53, 0x30, 0xce, 0xd2, 0x18, 0xff, - 0xd5, 0xe2, 0x1d, 0xb1, 0x64, 0x97, 0x41, 0x97, 0xc5, 0x66, 0x24, 0x51, 0x2a, 0x30, 0x99, 0xe5, - 0x46, 0x82, 0x71, 0x8c, 0xcf, 0x5c, 0xb7, 0x6b, 0x95, 0xae, 0xdb, 0x7a, 0x85, 0xeb, 0xf6, 0xe8, - 0x89, 0xd7, 0xed, 0xc2, 0x89, 0xc7, 0x2a, 0x9c, 0xf8, 0x03, 0x98, 0x2d, 0xec, 0xf4, 0x37, 0x41, - 0xb7, 0xa8, 0xa3, 0x8a, 0xee, 0x39, 0xb7, 0xde, 0x81, 0x1b, 0x81, 0x39, 0xd1, 0xef, 0x35, 0xf5, - 0xb5, 0x8d, 0x5b, 0x58, 0x08, 0x31, 0x7e, 0xa7, 0xc1, 0xb9, 0x81, 0x9b, 0x31, 0xba, 0x0e, 0x33, - 0xb6, 0xcb, 0x69, 0xb0, 0x47, 0x2c, 0xba, 0x95, 0xa6, 0xf8, 0x05, 0x75, 0xaa, 0x99, 0xcd, 0x2c, - 0x12, 0xe7, 0x69, 0xd1, 0x45, 0xd0, 0x6d, 0x3f, 0xde, 0xae, 0xa5, 0xb6, 0xcd, 0x6d, 0x86, 0x05, - 0x4c, 0xd4, 0xc3, 0x01, 0x09, 0x3a, 0x0f, 0x49, 0x40, 0x57, 0x3b, 0x1d, 0x71, 0xdf, 0x50, 0x3e, - 0x4d, 0xea, 0xe1, 0xbb, 0x79, 0x34, 0x2e, 0xd2, 0x1b, 0xbf, 0xd1, 0xe0, 0x62, 0x69, 0x27, 0xa9, - 0xfc, 0x80, 0x42, 0x00, 0x7c, 0x12, 0x90, 0x2e, 0xe5, 0x34, 0x60, 0x43, 0xa6, 0x6b, 0x85, 0x77, - 0x89, 0x64, 0x70, 0x6f, 0x27, 0x82, 0x70, 0x46, 0xa8, 0xf1, 0xab, 0x1a, 0xcc, 0x60, 0x15, 0x8f, - 0x68, 0x55, 0xfc, 0xff, 0xaf, 0x0b, 0x77, 0x73, 0xeb, 0xc2, 0x73, 0x52, 0x23, 0x67, 0x5c, 0xd9, - 0xc2, 0x80, 0xee, 0x8b, 0x25, 0x9a, 0xf0, 0x90, 0x55, 0xbb, 0xf8, 0xe4, 0x85, 0x4a, 0xc6, 0x34, - 0x08, 0xd1, 0x37, 0x56, 0x02, 0x8d, 0xbe, 0x06, 0x8d, 0x1c, 0xbd, 0xe8, 0xf4, 0x61, 0x97, 0x06, - 0x98, 0xee, 0xd1, 0x80, 0xba, 0x16, 0x45, 0x57, 0x60, 0x92, 0xf8, 0xf6, 0x8d, 0xc0, 0x0b, 0x7d, - 0x15, 0xd1, 0x64, 0x94, 0xaf, 0x6e, 0x6f, 0x4a, 0x38, 0x4e, 0x28, 0x04, 0x75, 0x6c, 0x91, 0xca, - 0xab, 0xcc, 0x7a, 0x1d, 0xc1, 0x71, 0x42, 0x91, 0xb4, 0xef, 0xd1, 0xd2, 0xf6, 0x6d, 0x82, 0x1e, - 0xda, 0x1d, 0x75, 0x27, 0x78, 0x43, 0x11, 0xe8, 0xef, 0x6f, 0xae, 0x3f, 0xeb, 0x35, 0x2f, 0x95, - 0x3d, 0xfe, 0xf1, 0x63, 0x9f, 0xb2, 0xd6, 0xfb, 0x9b, 0xeb, 0x58, 0x30, 0x1b, 0x7f, 0xd6, 0xe0, - 0x5c, 0xee, 0x90, 0x67, 0xb0, 0xd2, 0x6c, 0xe7, 0x57, 0x9a, 0xd7, 0x4e, 0x11, 0xb2, 0x92, 0xa5, - 0xc6, 0x2e, 0x1c, 0x42, 0x6e, 0x35, 0xef, 0x15, 0x1f, 0xc3, 0x2e, 0x57, 0xbe, 0x39, 0x94, 0xbf, - 0x80, 0x19, 0x7f, 0xab, 0xc1, 0xf9, 0x21, 0x59, 0x84, 0x1e, 0x00, 0xa4, 0x33, 0x66, 0x88, 0xd3, - 0x86, 0x28, 0x1c, 0xb8, 0xe7, 0xce, 0xca, 0x27, 0xaa, 0x14, 0x9a, 0x91, 0x88, 0x18, 0xd4, 0x03, - 0xca, 0x68, 0x70, 0x44, 0x3b, 0xef, 0x7a, 0x81, 0x72, 0xdd, 0xb7, 0x4f, 0xe1, 0xba, 0x81, 0xec, - 0x4d, 0xef, 0x5e, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xd0, 0x83, 0xd4, 0x85, 0xd1, 0xdb, 0xeb, 0xb5, - 0x4a, 0x27, 0xca, 0x3f, 0x1b, 0x9f, 0xe0, 0xcc, 0x7f, 0x6a, 0x70, 0x21, 0x67, 0xe4, 0x7b, 0xb4, - 0xeb, 0x3b, 0x84, 0xd3, 0x33, 0x68, 0x46, 0xf7, 0x73, 0xcd, 0xe8, 0xed, 0x53, 0x78, 0x32, 0x36, - 0xb2, 0xf4, 0x16, 0xf3, 0x0f, 0x0d, 0x2e, 0x0e, 0xe5, 0x38, 0x83, 0xe2, 0xfa, 0x5e, 0xbe, 0xb8, - 0xae, 0x7d, 0x86, 0x73, 0x95, 0xdf, 0x1c, 0x2e, 0x96, 0xfa, 0xe1, 0x0b, 0x39, 0x3d, 0x8c, 0x3f, - 0x68, 0x30, 0x1d, 0x53, 0x8a, 0x75, 0xa9, 0xc2, 0xce, 0xbc, 0x02, 0xa0, 0xfe, 0x30, 0x89, 0x6f, - 0xf7, 0x7a, 0x6a, 0xf7, 0x8d, 0x04, 0x83, 0x33, 0x54, 0xe8, 0x26, 0xa0, 0xd8, 0xc2, 0x1d, 0x47, - 0x2e, 0x05, 0x62, 0xf5, 0xd4, 0x25, 0xef, 0xa2, 0xe2, 0x45, 0x78, 0x80, 0x02, 0x0f, 0xe1, 0x32, - 0xfe, 0xa2, 0xa5, 0x73, 0x5b, 0x82, 0x5f, 0x54, 0xcf, 0x4b, 0xe3, 0x4a, 0x3d, 0x9f, 0x9d, 0x3b, - 0x92, 0xf2, 0x85, 0x9d, 0x3b, 0xd2, 0xba, 0x92, 0x92, 0x78, 0xa4, 0x17, 0x4e, 0x21, 0x4b, 0xa1, - 0xea, 0x96, 0x77, 0x2b, 0xf3, 0x37, 0x59, 0x7d, 0xe5, 0xd5, 0x6a, 0xe6, 0x88, 0x34, 0x1d, 0xba, - 0xe3, 0x5f, 0x81, 0x49, 0xd7, 0xeb, 0x44, 0xfb, 0x70, 0x61, 0xbb, 0xd8, 0x52, 0x70, 0x9c, 0x50, - 0x0c, 0xfc, 0x91, 0x33, 0xfa, 0xf9, 0xfc, 0x91, 0x23, 0x37, 0x22, 0xc7, 0x11, 0x04, 0xf1, 0xf5, - 0x21, 0xdd, 0x88, 0x14, 0x1c, 0x27, 0x14, 0xe8, 0x4e, 0x3a, 0x5f, 0xc6, 0x65, 0x4c, 0xbe, 0x56, - 0x65, 0x44, 0x97, 0x0f, 0x14, 0xd3, 0x7c, 0xfc, 0xb4, 0x31, 0xf2, 0xe4, 0x69, 0x63, 0xe4, 0x93, - 0xa7, 0x8d, 0x91, 0x8f, 0xfb, 0x0d, 0xed, 0x71, 0xbf, 0xa1, 0x3d, 0xe9, 0x37, 0xb4, 0x4f, 0xfa, - 0x0d, 0xed, 0xd3, 0x7e, 0x43, 0xfb, 0xc5, 0xbf, 0x1b, 0x23, 0xdf, 0x7f, 0xf9, 0xa4, 0x7f, 0x95, - 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x60, 0x85, 0x64, 0x74, 0x1e, 0x00, 0x00, + // 2635 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x5b, 0x6f, 0x1c, 0x57, + 0x39, 0xb3, 0xbb, 0x5e, 0xaf, 0xbf, 0x8d, 0x1d, 0xfb, 0x84, 0x84, 0x8d, 0x49, 0x77, 0x93, 0x09, + 0x17, 0xa7, 0x75, 0xd6, 0x8d, 0x53, 0xb5, 0x85, 0x80, 0x84, 0xd7, 0x76, 0x52, 0xa7, 0x89, 0xe3, + 0x9c, 0x75, 0x03, 0x81, 0x12, 0x18, 0xcf, 0x1e, 0xdb, 0x83, 0x67, 0x67, 0xa6, 0x73, 0x66, 0x9d, + 0x5a, 0x42, 0xa8, 0xe2, 0x07, 0x54, 0xbc, 0xf2, 0x80, 0x2a, 0xf1, 0x50, 0x89, 0x17, 0xe0, 0x99, + 0x17, 0x90, 0x40, 0x6a, 0x04, 0x3c, 0x44, 0xa2, 0x42, 0x15, 0x12, 0x0b, 0x59, 0x84, 0xf8, 0x0b, + 0xc8, 0x4f, 0xe8, 0x5c, 0xe6, 0xba, 0x3b, 0xce, 0xac, 0x49, 0xac, 0x20, 0xf5, 0x6d, 0xf7, 0x3b, + 0xdf, 0xed, 0x7c, 0xf7, 0x73, 0xe6, 0xc0, 0xec, 0xce, 0xeb, 0xb4, 0x6e, 0xd8, 0x73, 0x9a, 0x63, + 0xcc, 0xb9, 0x84, 0xda, 0x1d, 0x57, 0x27, 0x73, 0xbb, 0x97, 0x35, 0xd3, 0xd9, 0xd6, 0xae, 0xcc, + 0x6d, 0x11, 0x8b, 0xb8, 0x9a, 0x47, 0x5a, 0x75, 0xc7, 0xb5, 0x3d, 0x1b, 0x9d, 0x15, 0xd8, 0x75, + 0xcd, 0x31, 0xea, 0x3e, 0x76, 0xdd, 0xc7, 0x9e, 0xbe, 0xb4, 0x65, 0x78, 0xdb, 0x9d, 0x8d, 0xba, + 0x6e, 0xb7, 0xe7, 0xb6, 0xec, 0x2d, 0x7b, 0x8e, 0x13, 0x6d, 0x74, 0x36, 0xf9, 0x3f, 0xfe, 0x87, + 0xff, 0x12, 0xcc, 0xa6, 0xd5, 0x88, 0x68, 0xdd, 0x76, 0x99, 0xd8, 0xa4, 0xc0, 0xe9, 0x57, 0x42, + 0x9c, 0xb6, 0xa6, 0x6f, 0x1b, 0x16, 0x71, 0xf7, 0xe6, 0x9c, 0x9d, 0xad, 0xb8, 0xbe, 0xc3, 0x50, + 0xd1, 0xb9, 0x36, 0xf1, 0xb4, 0x41, 0xb2, 0xe6, 0xd2, 0xa8, 0xdc, 0x8e, 0xe5, 0x19, 0xed, 0x7e, + 0x31, 0xaf, 0x3e, 0x89, 0x80, 0xea, 0xdb, 0xa4, 0xad, 0x25, 0xe9, 0xd4, 0x0f, 0xf2, 0x70, 0x6a, + 0xc1, 0x34, 0x6d, 0x9d, 0xc1, 0x96, 0xc8, 0xae, 0xa1, 0x93, 0xa6, 0xa7, 0x79, 0x1d, 0x8a, 0xbe, + 0x08, 0xc5, 0x96, 0x6b, 0xec, 0x12, 0xb7, 0xa2, 0x9c, 0x53, 0x66, 0xc6, 0x1a, 0x13, 0x0f, 0xbb, + 0xb5, 0x63, 0xbd, 0x6e, 0xad, 0xb8, 0xc4, 0xa1, 0x58, 0xae, 0xa2, 0x73, 0x50, 0x70, 0x6c, 0xdb, + 0xac, 0xe4, 0x38, 0xd6, 0x71, 0x89, 0x55, 0x58, 0xb3, 0x6d, 0x13, 0xf3, 0x15, 0xce, 0x89, 0x73, + 0xae, 0xe4, 0x13, 0x9c, 0x38, 0x14, 0xcb, 0x55, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x32, 0x3c, 0xc3, + 0xb6, 0x68, 0xa5, 0x70, 0x2e, 0x3f, 0x53, 0x9e, 0x9f, 0xab, 0x87, 0x6e, 0x0e, 0x36, 0x56, 0x77, + 0x76, 0xb6, 0x18, 0x80, 0xd6, 0x99, 0xfd, 0xea, 0xbb, 0x97, 0xeb, 0x8b, 0x3e, 0x5d, 0x03, 0x49, + 0xe6, 0x10, 0x80, 0x28, 0x8e, 0xb0, 0x45, 0x6f, 0x42, 0xa1, 0xa5, 0x79, 0x5a, 0x65, 0xe4, 0x9c, + 0x32, 0x53, 0x9e, 0xbf, 0x94, 0xca, 0x5e, 0xda, 0xad, 0x8e, 0xb5, 0x07, 0xcb, 0xef, 0x7a, 0xc4, + 0xa2, 0x8c, 0x79, 0x89, 0xed, 0x6c, 0x49, 0xf3, 0x34, 0xcc, 0x99, 0xa0, 0x0d, 0x28, 0x5b, 0xc4, + 0x7b, 0x60, 0xbb, 0x3b, 0x0c, 0x58, 0x29, 0x72, 0x9e, 0x51, 0x95, 0xfb, 0x23, 0xb3, 0xbe, 0x2a, + 0x09, 0xf8, 0x9e, 0x19, 0x59, 0xe3, 0x44, 0xaf, 0x5b, 0x2b, 0xaf, 0x86, 0x7c, 0x70, 0x94, 0xa9, + 0xfa, 0x47, 0x05, 0x26, 0xa5, 0x87, 0x0c, 0xdb, 0xc2, 0x84, 0x76, 0x4c, 0x0f, 0x7d, 0x17, 0x46, + 0x85, 0xd1, 0x28, 0xf7, 0x4e, 0x79, 0xfe, 0x95, 0x83, 0x85, 0x0a, 0x69, 0x49, 0x36, 0x8d, 0x13, + 0xd2, 0x58, 0xa3, 0x62, 0x9d, 0x62, 0x9f, 0x2b, 0xba, 0x0b, 0xc7, 0x2d, 0xbb, 0x45, 0x9a, 0xc4, + 0x24, 0xba, 0x67, 0xbb, 0xdc, 0x73, 0xe5, 0xf9, 0x73, 0x51, 0x29, 0x2c, 0x4f, 0x98, 0xed, 0x57, + 0x23, 0x78, 0x8d, 0xc9, 0x5e, 0xb7, 0x76, 0x3c, 0x0a, 0xc1, 0x31, 0x3e, 0xea, 0xdf, 0x8a, 0x50, + 0x6e, 0x68, 0xd4, 0xd0, 0x85, 0x44, 0xf4, 0x43, 0x00, 0xcd, 0xf3, 0x5c, 0x63, 0xa3, 0xe3, 0xf1, + 0xbd, 0x30, 0x9f, 0x7f, 0xf9, 0xe0, 0xbd, 0x44, 0xc8, 0xeb, 0x0b, 0x01, 0xed, 0xb2, 0xe5, 0xb9, + 0x7b, 0x8d, 0x0b, 0xbe, 0xf7, 0xc3, 0x85, 0x1f, 0xfd, 0xbd, 0x36, 0x7e, 0xa7, 0xa3, 0x99, 0xc6, + 0xa6, 0x41, 0x5a, 0xab, 0x5a, 0x9b, 0xe0, 0x88, 0x44, 0xb4, 0x0b, 0x25, 0x5d, 0x73, 0x34, 0xdd, + 0xf0, 0xf6, 0x2a, 0x39, 0x2e, 0xfd, 0xb5, 0xec, 0xd2, 0x17, 0x25, 0xa5, 0x90, 0x7d, 0x5e, 0xca, + 0x2e, 0xf9, 0xe0, 0x7e, 0xc9, 0x81, 0x2c, 0xf4, 0x03, 0x98, 0xd4, 0x6d, 0x8b, 0x76, 0xda, 0x84, + 0x2e, 0xda, 0x1d, 0xcb, 0x23, 0x2e, 0xad, 0xe4, 0xb9, 0xfc, 0x57, 0xb3, 0x78, 0x52, 0xd2, 0x2c, + 0x72, 0x16, 0x0e, 0x0f, 0xfc, 0x8a, 0x14, 0x3f, 0xb9, 0x98, 0xe0, 0x8b, 0xfb, 0x24, 0xa1, 0x19, + 0x28, 0x31, 0xaf, 0x30, 0x9d, 0x2a, 0x05, 0x91, 0xb7, 0x4c, 0xf1, 0x55, 0x09, 0xc3, 0xc1, 0x6a, + 0x5f, 0x1c, 0x8c, 0x3c, 0x9d, 0x38, 0x60, 0x1a, 0x68, 0xa6, 0xc9, 0x10, 0x28, 0x4f, 0x9b, 0x92, + 0xd0, 0x60, 0x41, 0xc2, 0x70, 0xb0, 0x8a, 0xee, 0x40, 0xd1, 0xd3, 0x0c, 0xcb, 0xa3, 0x95, 0x51, + 0x6e, 0x9f, 0x8b, 0x59, 0xec, 0xb3, 0xce, 0x28, 0xc2, 0x42, 0xc3, 0xff, 0x52, 0x2c, 0x19, 0x4d, + 0x9b, 0x70, 0x22, 0x11, 0x38, 0x68, 0x12, 0xf2, 0x3b, 0x64, 0x4f, 0x94, 0x3a, 0xcc, 0x7e, 0xa2, + 0x45, 0x18, 0xd9, 0xd5, 0xcc, 0x0e, 0xe1, 0x85, 0x2d, 0x5e, 0x29, 0xd2, 0x13, 0xcc, 0xe7, 0x8a, + 0x05, 0xed, 0x57, 0x72, 0xaf, 0x2b, 0xd3, 0x3b, 0x30, 0x1e, 0x0b, 0x94, 0x01, 0xb2, 0x96, 0xe2, + 0xb2, 0xea, 0x07, 0x15, 0xbd, 0x50, 0xf8, 0x9d, 0x8e, 0x66, 0x79, 0x86, 0xb7, 0x17, 0x11, 0xa6, + 0x5e, 0x87, 0xa9, 0xc5, 0xe5, 0x9b, 0xb2, 0x90, 0xfb, 0xc6, 0x9e, 0x07, 0x20, 0xef, 0x3a, 0x2e, + 0xa1, 0xac, 0x88, 0xc9, 0x72, 0x1e, 0xd4, 0xc9, 0xe5, 0x60, 0x05, 0x47, 0xb0, 0xd4, 0xfb, 0x30, + 0x2a, 0xc3, 0x05, 0x35, 0x7d, 0xed, 0x94, 0xc3, 0x68, 0xd7, 0x18, 0x97, 0x92, 0x46, 0xee, 0x32, + 0x26, 0x52, 0x59, 0xf5, 0x3f, 0x0a, 0x80, 0x14, 0xd0, 0x24, 0x1e, 0xeb, 0x22, 0x16, 0x8b, 0x46, + 0x25, 0xde, 0x45, 0x78, 0x34, 0xf2, 0x15, 0xd4, 0x82, 0x92, 0xee, 0x67, 0x4a, 0x2e, 0x4b, 0xa6, + 0x84, 0xdc, 0xfd, 0x9f, 0xb2, 0x48, 0x4c, 0x06, 0x89, 0xea, 0x67, 0x48, 0xc0, 0x79, 0x7a, 0x03, + 0xc6, 0x63, 0xc8, 0x03, 0x9c, 0x75, 0x35, 0xee, 0xac, 0x2f, 0x64, 0xd2, 0x22, 0xea, 0xa3, 0x5d, + 0x90, 0x9d, 0x2f, 0xc3, 0xae, 0x6f, 0xc0, 0xc8, 0x06, 0xab, 0x38, 0x52, 0xd8, 0xc5, 0xcc, 0xc5, + 0xa9, 0x31, 0xc6, 0x4c, 0xce, 0x01, 0x58, 0xb0, 0x50, 0xdf, 0xcf, 0xc1, 0x0b, 0xc9, 0x46, 0xb0, + 0x68, 0x5b, 0x9b, 0xc6, 0x56, 0xc7, 0xe5, 0x7f, 0xd0, 0xd7, 0xa1, 0x28, 0x58, 0x4a, 0x8d, 0x66, + 0xfc, 0x04, 0x6a, 0x72, 0xe8, 0x7e, 0xb7, 0x76, 0x3a, 0x49, 0x2a, 0x56, 0xb0, 0xa4, 0x63, 0x79, + 0xed, 0x92, 0x77, 0x3a, 0x84, 0x7a, 0xc2, 0x4b, 0xb2, 0xb2, 0x60, 0x09, 0xc3, 0xc1, 0x2a, 0x7a, + 0x4f, 0x81, 0x93, 0x2d, 0x59, 0xcc, 0x22, 0x3a, 0xc8, 0x4e, 0x73, 0x39, 0x5b, 0x15, 0x8c, 0x10, + 0x36, 0x3e, 0x27, 0x95, 0x3d, 0x39, 0x60, 0x11, 0x0f, 0x12, 0xa5, 0xfe, 0x4b, 0x81, 0xd3, 0x83, + 0x3b, 0x23, 0xda, 0x84, 0x51, 0x97, 0xff, 0xf2, 0x9b, 0xd2, 0xd5, 0x2c, 0x0a, 0xc9, 0x6d, 0xa6, + 0xf7, 0x59, 0xf1, 0x9f, 0x62, 0x9f, 0x39, 0xd2, 0xa1, 0xa8, 0x73, 0x9d, 0x64, 0x4c, 0x5f, 0x1d, + 0xae, 0x8f, 0xc7, 0x2d, 0x10, 0xd4, 0x3b, 0x01, 0xc6, 0x92, 0xb5, 0xfa, 0x73, 0x05, 0x4e, 0x24, + 0x0a, 0x14, 0xaa, 0x42, 0xde, 0xb0, 0x3c, 0x1e, 0x56, 0x79, 0xe1, 0xa3, 0x15, 0xcb, 0x13, 0x19, + 0xca, 0x16, 0xd0, 0x79, 0x28, 0x6c, 0xb0, 0xb1, 0x2e, 0xcf, 0x8b, 0xf3, 0x78, 0xaf, 0x5b, 0x1b, + 0x6b, 0xd8, 0xb6, 0x29, 0x30, 0xf8, 0x12, 0xfa, 0x12, 0x14, 0xa9, 0xe7, 0x1a, 0xd6, 0x96, 0xec, + 0x21, 0x7c, 0x8e, 0x69, 0x72, 0x88, 0x40, 0x93, 0xcb, 0xe8, 0x45, 0x18, 0xdd, 0x25, 0x2e, 0x2f, + 0x3e, 0x23, 0x1c, 0x93, 0x77, 0x87, 0xbb, 0x02, 0x24, 0x50, 0x7d, 0x04, 0xf5, 0x97, 0x39, 0x28, + 0x4b, 0x07, 0x9a, 0x9a, 0xd1, 0x46, 0xf7, 0x22, 0x01, 0x25, 0x3c, 0xf1, 0xd2, 0x10, 0x9e, 0x08, + 0x73, 0x7d, 0x40, 0x04, 0x12, 0x28, 0xb3, 0xce, 0xe8, 0xb9, 0xa2, 0xbd, 0x08, 0x07, 0xd4, 0x33, + 0x06, 0x9e, 0x24, 0x6b, 0x9c, 0x94, 0x02, 0xca, 0x21, 0x8c, 0xe2, 0x28, 0x5f, 0x74, 0x3f, 0x70, + 0xf1, 0x30, 0x0d, 0x9e, 0x6d, 0x3e, 0x9b, 0x77, 0x3f, 0x52, 0xa0, 0x92, 0x46, 0x14, 0xcb, 0x47, + 0xe5, 0x50, 0xf9, 0x98, 0x3b, 0xba, 0x7c, 0xfc, 0xad, 0x12, 0xf1, 0x3d, 0xa5, 0xe8, 0x7b, 0x50, + 0x62, 0x03, 0x3e, 0x9f, 0xd7, 0x45, 0xef, 0x79, 0x39, 0xdb, 0x71, 0xe0, 0xf6, 0xc6, 0xf7, 0x89, + 0xee, 0xdd, 0x22, 0x9e, 0x16, 0xf6, 0xb9, 0x10, 0x86, 0x03, 0xae, 0xe8, 0x36, 0x14, 0xa8, 0x43, + 0xf4, 0x61, 0x7a, 0x3c, 0x57, 0xad, 0xe9, 0x10, 0x3d, 0xac, 0xd7, 0xec, 0x1f, 0xe6, 0x8c, 0xd4, + 0x9f, 0x46, 0x9d, 0x41, 0x69, 0xdc, 0x19, 0x69, 0x26, 0x56, 0x8e, 0xce, 0xc4, 0xbf, 0x09, 0x4a, + 0x01, 0xd7, 0xef, 0xa6, 0x41, 0x3d, 0xf4, 0x76, 0x9f, 0x99, 0xeb, 0xd9, 0xcc, 0xcc, 0xa8, 0xb9, + 0x91, 0x83, 0x2c, 0xf3, 0x21, 0x11, 0x13, 0xaf, 0xc2, 0x88, 0xe1, 0x91, 0xb6, 0x9f, 0x5f, 0x17, + 0x33, 0xdb, 0x38, 0x1c, 0x1c, 0x56, 0x18, 0x3d, 0x16, 0x6c, 0xd4, 0x47, 0xf1, 0x1d, 0x30, 0xdb, + 0xa3, 0xef, 0xc0, 0x18, 0x95, 0xc3, 0x8e, 0x5f, 0x25, 0x66, 0xb3, 0xc8, 0x09, 0xc6, 0xd5, 0x29, + 0x29, 0x6a, 0xcc, 0x87, 0x50, 0x1c, 0x72, 0x8c, 0x64, 0x70, 0x6e, 0xa8, 0x0c, 0x4e, 0xf8, 0x3f, + 0x35, 0x83, 0x5d, 0x18, 0xe4, 0x40, 0xf4, 0x6d, 0x28, 0xda, 0x8e, 0xf6, 0x4e, 0x30, 0x78, 0x3d, + 0xe1, 0x64, 0x72, 0x9b, 0xe3, 0x0e, 0x0a, 0x13, 0x60, 0x32, 0xc5, 0x32, 0x96, 0x2c, 0xd5, 0xf7, + 0x15, 0x98, 0x4c, 0x16, 0xb3, 0x21, 0xaa, 0xc5, 0x1a, 0x4c, 0xb4, 0x35, 0x4f, 0xdf, 0x0e, 0x1a, + 0x8a, 0x3c, 0xff, 0xcf, 0xf4, 0xba, 0xb5, 0x89, 0x5b, 0xb1, 0x95, 0xfd, 0x6e, 0x0d, 0x5d, 0xeb, + 0x98, 0xe6, 0x5e, 0xfc, 0x2c, 0x94, 0xa0, 0x57, 0x3f, 0xcc, 0x05, 0x99, 0xd3, 0x77, 0xb8, 0x61, + 0x13, 0xac, 0x1e, 0x8c, 0x73, 0xc9, 0x09, 0x36, 0x1c, 0xf4, 0x70, 0x04, 0x0b, 0xb9, 0x7d, 0x03, + 0xe3, 0xd2, 0xe1, 0x8e, 0x56, 0xcf, 0xd9, 0xf8, 0xf8, 0xd7, 0x02, 0x8c, 0xc7, 0x9a, 0x5c, 0x86, + 0x31, 0x72, 0x01, 0x4e, 0xb4, 0xc2, 0xa8, 0xe4, 0xe7, 0x3e, 0xe1, 0xaf, 0xcf, 0x4a, 0xe4, 0x68, + 0x4a, 0x71, 0xba, 0x24, 0x7e, 0x3c, 0xc7, 0xf2, 0x4f, 0x3d, 0xc7, 0xee, 0xc2, 0x84, 0x16, 0x8c, + 0x35, 0xb7, 0xec, 0x96, 0x7f, 0x30, 0xad, 0x4b, 0xaa, 0x89, 0x85, 0xd8, 0xea, 0x7e, 0xb7, 0xf6, + 0x99, 0xe4, 0x30, 0xc4, 0xe0, 0x38, 0xc1, 0x05, 0x5d, 0x80, 0x11, 0xee, 0x1d, 0x3e, 0x79, 0xe4, + 0xc3, 0x9a, 0xc2, 0x0d, 0x8b, 0xc5, 0x1a, 0xba, 0x0c, 0x65, 0xad, 0xd5, 0x36, 0xac, 0x05, 0x5d, + 0x27, 0xd4, 0x3f, 0x90, 0xf2, 0x71, 0x66, 0x21, 0x04, 0xe3, 0x28, 0x0e, 0xb2, 0x60, 0x62, 0xd3, + 0x70, 0xa9, 0xb7, 0xb0, 0xab, 0x19, 0xa6, 0xb6, 0x61, 0x12, 0x79, 0x3c, 0xcd, 0x34, 0x3f, 0x34, + 0x3b, 0x1b, 0xfe, 0x80, 0x72, 0xda, 0xdf, 0xdf, 0xb5, 0x18, 0x37, 0x9c, 0xe0, 0xce, 0x86, 0x15, + 0xcf, 0x36, 0x89, 0xc8, 0x68, 0x5a, 0x29, 0x65, 0x17, 0xb6, 0x1e, 0x90, 0x85, 0xc3, 0x4a, 0x08, + 0xa3, 0x38, 0xca, 0x57, 0xfd, 0x4b, 0x70, 0x46, 0x48, 0x99, 0x65, 0xd1, 0x45, 0x36, 0x19, 0xf3, + 0x25, 0x19, 0x6f, 0x91, 0xe1, 0x96, 0x83, 0xb1, 0xbf, 0x1e, 0xb9, 0x42, 0xcc, 0x65, 0xba, 0x42, + 0xcc, 0x67, 0xb8, 0x42, 0x2c, 0x1c, 0x78, 0x85, 0x98, 0x70, 0xe4, 0x48, 0x06, 0x47, 0x26, 0x0c, + 0x5b, 0x7c, 0x46, 0x86, 0x7d, 0x1b, 0x26, 0x12, 0xa7, 0xf2, 0x1b, 0x90, 0xd7, 0x89, 0x29, 0x6b, + 0xfb, 0x13, 0x2e, 0x0d, 0xfb, 0xce, 0xf4, 0x8d, 0xd1, 0x5e, 0xb7, 0x96, 0x5f, 0x5c, 0xbe, 0x89, + 0x19, 0x13, 0xf5, 0xd7, 0x79, 0xbf, 0x9a, 0x87, 0xa1, 0xf5, 0x69, 0x59, 0xf8, 0x5f, 0xcb, 0x42, + 0x22, 0x34, 0x46, 0x9f, 0x51, 0x68, 0xfc, 0x3b, 0x18, 0x7b, 0xf9, 0x3d, 0x15, 0x7a, 0x21, 0xd2, + 0x33, 0x1a, 0x65, 0x49, 0x9e, 0x7f, 0x93, 0xec, 0x89, 0x06, 0x72, 0x21, 0xda, 0x40, 0xc6, 0x06, + 0x5f, 0xaf, 0xa0, 0xab, 0x50, 0x24, 0x9b, 0x9b, 0x44, 0xf7, 0x64, 0x52, 0xf9, 0x17, 0xa3, 0xc5, + 0x65, 0x0e, 0xdd, 0xef, 0xd6, 0xa6, 0x22, 0x22, 0x05, 0x10, 0x4b, 0x12, 0xf4, 0x0d, 0x18, 0xf3, + 0x8c, 0x36, 0x59, 0x68, 0xb5, 0x48, 0x8b, 0xdb, 0xbb, 0x3c, 0xff, 0x62, 0xb6, 0x89, 0x70, 0xdd, + 0x68, 0x13, 0x71, 0x58, 0x5c, 0xf7, 0x19, 0xe0, 0x90, 0x97, 0xfa, 0x30, 0x98, 0xdd, 0xb8, 0x58, + 0xdc, 0x31, 0xc9, 0x11, 0x0c, 0xf9, 0xcd, 0xd8, 0x90, 0x7f, 0x39, 0xf3, 0xfd, 0x21, 0x53, 0x2f, + 0x75, 0xd0, 0xff, 0x48, 0xf1, 0x87, 0xb6, 0x00, 0xf7, 0x08, 0x86, 0x69, 0x1c, 0x1f, 0xa6, 0x2f, + 0x0d, 0xb5, 0x97, 0x94, 0x81, 0xfa, 0xe3, 0xfe, 0x9d, 0xf0, 0xa1, 0xba, 0x0d, 0x13, 0xad, 0x58, + 0xaa, 0x0e, 0x73, 0x4e, 0xe1, 0xac, 0x82, 0x1c, 0x47, 0x2c, 0x53, 0xe3, 0x79, 0x8f, 0x13, 0xcc, + 0xd9, 0x39, 0x81, 0x5f, 0xcf, 0x66, 0xbb, 0xe9, 0x8a, 0x5e, 0xf3, 0x06, 0xdb, 0x12, 0xfa, 0x0b, + 0x36, 0xea, 0x4f, 0x72, 0xb1, 0x6d, 0x05, 0x72, 0xbe, 0xd6, 0x5f, 0xf3, 0x44, 0xa6, 0x9d, 0xcc, + 0x54, 0xef, 0xd4, 0x44, 0x4f, 0x83, 0x01, 0xfd, 0xec, 0x6c, 0xac, 0x9f, 0x95, 0x12, 0xbd, 0x4c, + 0x4d, 0xf4, 0x32, 0x18, 0xd0, 0xc7, 0x62, 0x55, 0x75, 0xe4, 0x69, 0x57, 0x55, 0xf5, 0x67, 0x39, + 0xbf, 0x5d, 0x84, 0x45, 0xe9, 0x49, 0x65, 0xe7, 0x0d, 0x28, 0xd9, 0x0e, 0xc3, 0xb5, 0xfd, 0xad, + 0xcf, 0xfa, 0x81, 0x7a, 0x5b, 0xc2, 0xf7, 0xbb, 0xb5, 0x4a, 0x92, 0xad, 0xbf, 0x86, 0x03, 0xea, + 0xb0, 0x80, 0xe5, 0x33, 0x15, 0xb0, 0xc2, 0xf0, 0x05, 0x6c, 0x11, 0xa6, 0xc2, 0x02, 0xdb, 0x24, + 0xba, 0x6d, 0xb5, 0xa8, 0xac, 0xf4, 0xa7, 0x7a, 0xdd, 0xda, 0xd4, 0x7a, 0x72, 0x11, 0xf7, 0xe3, + 0xab, 0xbf, 0x50, 0x60, 0xaa, 0xef, 0x63, 0x1d, 0xba, 0x0a, 0xe3, 0x06, 0x9b, 0xc8, 0x37, 0x35, + 0x9d, 0x44, 0x82, 0xe7, 0x94, 0x54, 0x6f, 0x7c, 0x25, 0xba, 0x88, 0xe3, 0xb8, 0xe8, 0x0c, 0xe4, + 0x0d, 0xc7, 0xbf, 0x18, 0xe5, 0x1d, 0x7c, 0x65, 0x8d, 0x62, 0x06, 0x63, 0xad, 0x78, 0x5b, 0x73, + 0x5b, 0x0f, 0x34, 0x97, 0xd5, 0x4a, 0x97, 0x4d, 0x2f, 0xf9, 0x78, 0x2b, 0x7e, 0x23, 0xbe, 0x8c, + 0x93, 0xf8, 0xea, 0x87, 0x0a, 0x9c, 0x49, 0x3d, 0x04, 0x66, 0xfe, 0x9e, 0xab, 0x01, 0x38, 0x9a, + 0xab, 0xb5, 0x89, 0x3c, 0x38, 0x1d, 0xe2, 0x33, 0x69, 0x50, 0x8e, 0xd7, 0x02, 0x46, 0x38, 0xc2, + 0x54, 0xfd, 0x20, 0x07, 0xe3, 0x58, 0x46, 0xb0, 0xb8, 0xe5, 0x7b, 0xf6, 0x4d, 0xe0, 0x4e, 0xac, + 0x09, 0x3c, 0x61, 0xdc, 0x8a, 0x29, 0x97, 0xd6, 0x02, 0xd0, 0x3d, 0x28, 0x52, 0xfe, 0xad, 0x3c, + 0xdb, 0x9d, 0x75, 0x9c, 0x29, 0x27, 0x0c, 0x9d, 0x20, 0xfe, 0x63, 0xc9, 0x50, 0xed, 0x29, 0x50, + 0x8d, 0xe1, 0xcb, 0x8f, 0x7a, 0x2e, 0x26, 0x9b, 0xc4, 0x25, 0x96, 0x4e, 0xd0, 0x2c, 0x94, 0x34, + 0xc7, 0xb8, 0xee, 0xda, 0x1d, 0x47, 0x7a, 0x34, 0x68, 0x1c, 0x0b, 0x6b, 0x2b, 0x1c, 0x8e, 0x03, + 0x0c, 0x86, 0xed, 0x6b, 0x24, 0xe3, 0x2a, 0x72, 0x33, 0x2a, 0xe0, 0x38, 0xc0, 0x08, 0x26, 0xc7, + 0x42, 0xea, 0xe4, 0xd8, 0x80, 0x7c, 0xc7, 0x68, 0xc9, 0xeb, 0xdc, 0x97, 0xfd, 0x62, 0xf1, 0xd6, + 0xca, 0xd2, 0x7e, 0xb7, 0x76, 0x3e, 0xed, 0x2d, 0x82, 0xb7, 0xe7, 0x10, 0x5a, 0x7f, 0x6b, 0x65, + 0x09, 0x33, 0x62, 0xf5, 0x77, 0x0a, 0x4c, 0xc5, 0x36, 0x79, 0x04, 0x0d, 0x74, 0x2d, 0xde, 0x40, + 0x5f, 0x1a, 0xc2, 0x65, 0x29, 0xed, 0xd3, 0x48, 0x6c, 0x82, 0xf7, 0xce, 0xf5, 0xe4, 0xf7, 0xf9, + 0x8b, 0x99, 0x2f, 0x7d, 0xd3, 0x3f, 0xca, 0xab, 0x7f, 0xc8, 0xc1, 0xc9, 0x01, 0x51, 0x84, 0xee, + 0x03, 0x84, 0xe3, 0xed, 0x00, 0xa3, 0x0d, 0x10, 0xd8, 0xf7, 0x89, 0x62, 0x82, 0x7f, 0x35, 0x0f, + 0xa1, 0x11, 0x8e, 0x88, 0x42, 0xd9, 0x25, 0x94, 0xb8, 0xbb, 0xa4, 0x75, 0x8d, 0x57, 0x7f, 0x66, + 0xba, 0xaf, 0x0e, 0x61, 0xba, 0xbe, 0xe8, 0x0d, 0xa7, 0x62, 0x1c, 0x32, 0xc6, 0x51, 0x29, 0xe8, + 0x7e, 0x68, 0x42, 0xf1, 0x14, 0xe4, 0x4a, 0xa6, 0x1d, 0xc5, 0x5f, 0xb1, 0x1c, 0x60, 0xcc, 0x8f, + 0x15, 0x38, 0x15, 0x53, 0x72, 0x9d, 0xb4, 0x1d, 0x53, 0xf3, 0x8e, 0x62, 0x22, 0xbd, 0x17, 0x2b, + 0x46, 0xaf, 0x0d, 0x61, 0x49, 0x5f, 0xc9, 0xd4, 0xb9, 0xf4, 0xcf, 0x0a, 0x9c, 0x19, 0x48, 0x71, + 0x04, 0xc9, 0xf5, 0xcd, 0x78, 0x72, 0x5d, 0x39, 0xc4, 0xbe, 0xd2, 0x2f, 0x7d, 0xcf, 0xa4, 0xda, + 0xe1, 0xff, 0xb2, 0x7b, 0xa8, 0xbf, 0x52, 0xe0, 0xb8, 0x8f, 0xc9, 0xa6, 0xc3, 0x0c, 0xc7, 0xf5, + 0x79, 0x00, 0xf9, 0x7e, 0xcb, 0xff, 0x30, 0x93, 0x0f, 0xf5, 0xbe, 0x1e, 0xac, 0xe0, 0x08, 0x16, + 0xba, 0x01, 0xc8, 0xd7, 0xb0, 0x69, 0xfa, 0xd7, 0x9b, 0xbc, 0x05, 0xe4, 0x1b, 0xd3, 0x92, 0x16, + 0xe1, 0x3e, 0x0c, 0x3c, 0x80, 0x4a, 0xfd, 0xbd, 0x12, 0xf6, 0x6d, 0x0e, 0x7e, 0x5e, 0x2d, 0xcf, + 0x95, 0x4b, 0xb5, 0x7c, 0xb4, 0xef, 0x70, 0xcc, 0xe7, 0xb6, 0xef, 0x70, 0xed, 0x52, 0x52, 0xe2, + 0x4f, 0x85, 0xc4, 0x2e, 0x78, 0x2a, 0x64, 0x9d, 0xf2, 0x6e, 0x46, 0x5e, 0xed, 0xc5, 0x4f, 0xf7, + 0x07, 0xa8, 0xc3, 0xc2, 0x74, 0xe0, 0xf5, 0xdc, 0x6c, 0xe4, 0x3d, 0x51, 0x62, 0xba, 0xc8, 0xf0, + 0xa6, 0xa8, 0xf0, 0x94, 0xde, 0x14, 0xcd, 0x46, 0xde, 0x14, 0x89, 0x9b, 0xbf, 0x70, 0x22, 0xea, + 0x7f, 0x57, 0x74, 0x3b, 0xec, 0x2f, 0xe2, 0xce, 0xef, 0xf3, 0x59, 0x5a, 0xf4, 0x01, 0x4f, 0xe6, + 0x30, 0x9c, 0x76, 0x88, 0x2b, 0xc0, 0xa1, 0x96, 0x2c, 0x53, 0x47, 0xb9, 0x32, 0xd3, 0xbd, 0x6e, + 0xed, 0xf4, 0xda, 0x40, 0x0c, 0x9c, 0x42, 0x89, 0xb6, 0x61, 0x82, 0x6e, 0x6b, 0x2e, 0x69, 0x05, + 0x8f, 0xc4, 0xc4, 0xc5, 0xef, 0x4c, 0xd6, 0xa7, 0x2f, 0xe1, 0xfd, 0x72, 0x33, 0xc6, 0x07, 0x27, + 0xf8, 0x36, 0x1a, 0x0f, 0x1f, 0x57, 0x8f, 0x3d, 0x7a, 0x5c, 0x3d, 0xf6, 0xc9, 0xe3, 0xea, 0xb1, + 0xf7, 0x7a, 0x55, 0xe5, 0x61, 0xaf, 0xaa, 0x3c, 0xea, 0x55, 0x95, 0x4f, 0x7a, 0x55, 0xe5, 0x1f, + 0xbd, 0xaa, 0xf2, 0xe3, 0x7f, 0x56, 0x8f, 0x7d, 0xeb, 0xec, 0x41, 0x4f, 0x74, 0xff, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0xa5, 0x57, 0x37, 0xad, 0xc1, 0x2b, 0x00, 0x00, } func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { @@ -1178,16 +1509,18 @@ func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - i-- - dAtA[i] = 0x2a if len(m.Conditions) > 0 { for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -1285,17 +1618,10 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] - baseI := i + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1303,21 +1629,85 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x3a } } - if len(m.Attributes) > 0 { - keysForAttributes := make([]string, 0, len(m.Attributes)) - for k := range m.Attributes { - keysForAttributes = append(keysForAttributes, string(k)) + if m.AllNodes != nil { + i-- + if *m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x22 + } + if len(m.ConsumesCounters) > 0 { + for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { @@ -1374,6 +1764,96 @@ func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CounterSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Device) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1919,6 +2399,63 @@ func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Counters) > 0 { + keysForCounters := make([]string, 0, len(m.Counters)) + for k := range m.Counters { + keysForCounters = append(keysForCounters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Counters[string(keysForCounters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCounters[iNdEx]) + copy(dAtA[i:], keysForCounters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CounterSet) + copy(dAtA[i:], m.CounterSet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1939,6 +2476,34 @@ func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.FirstAvailable) > 0 { + for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } if m.AdminAccess != nil { i-- if *m.AdminAccess { @@ -2004,6 +2569,20 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.AdminAccess != nil { i-- if *m.AdminAccess { @@ -2072,7 +2651,7 @@ func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { +func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2082,77 +2661,66 @@ func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.HardwareAddress) - copy(dAtA[i:], m.HardwareAddress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) - i-- - dAtA[i] = 0x1a - if len(m.IPs) > 0 { - for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.IPs[iNdEx]) - copy(dAtA[i:], m.IPs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x3a } } - i -= len(m.InterfaceName) - copy(dAtA[i:], m.InterfaceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) i-- dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { +func (m *DeviceTaint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2162,50 +2730,47 @@ func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) i-- dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { +func (m *DeviceTaintRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2215,40 +2780,40 @@ func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceTaintRule) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceTaintRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { +func (m *DeviceTaintRuleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2258,12 +2823,12 @@ func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceTaintRuleList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceTaintRuleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2295,7 +2860,7 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *DeviceTaintRuleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2305,18 +2870,18 @@ func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceTaintRuleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceTaintRuleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Taint.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2324,11 +2889,23 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + if m.DeviceSelector != nil { + { + size, err := m.DeviceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2338,20 +2915,20 @@ func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Devices) > 0 { - for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2359,39 +2936,41 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } } - if len(m.ReservedFor) > 0 { - for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + if m.Device != nil { + i -= len(*m.Device) + copy(dAtA[i:], *m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Device))) + i-- + dAtA[i] = 0x22 } - if m.Allocation != nil { - { - size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if m.Pool != nil { + i -= len(*m.Pool) + copy(dAtA[i:], *m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pool))) + i-- + dAtA[i] = 0x1a + } + if m.Driver != nil { + i -= len(*m.Driver) + copy(dAtA[i:], *m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Driver))) + i-- + dAtA[i] = 0x12 + } + if m.DeviceClassName != nil { + i -= len(*m.DeviceClassName) + copy(dAtA[i:], *m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeviceClassName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { +func (m *DeviceToleration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2401,40 +2980,45 @@ func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { +func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) i-- dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2444,44 +3028,39 @@ func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) i-- dAtA[i] = 0x12 } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2491,18 +3070,18 @@ func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2511,20 +3090,15 @@ func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro } i-- dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourcePool) Marshal() (dAtA []byte, err error) { +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2534,31 +3108,50 @@ func (m *ResourcePool) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2568,40 +3161,40 @@ func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2611,12 +3204,12 @@ func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2648,7 +3241,7 @@ func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2658,12 +3251,45 @@ func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2679,20 +3305,26 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x22 } } - i-- - if m.AllNodes { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i-- - dAtA[i] = 0x28 - if m.NodeSelector != nil { + if m.Allocation != nil { { - size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2700,15 +3332,33 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x1a + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2717,282 +3367,432 @@ func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *AllocatedDeviceStatus) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Pool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Device) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = m.Data.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.NetworkData != nil { - l = m.NetworkData.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *AllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Devices.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *BasicDevice) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Attributes) > 0 { - for k, v := range m.Attributes { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CELDeviceSelector) Size() (n int) { - if m == nil { - return 0 +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Device) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Basic != nil { - l = m.Basic.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DeviceAllocationConfiguration) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Source) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Requests) > 0 { - for _, s := range m.Requests { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.DeviceConfiguration.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DeviceAllocationResult) Size() (n int) { - if m == nil { - return 0 +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Config) > 0 { - for _, e := range m.Config { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DeviceAttribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IntValue != nil { - n += 1 + sovGenerated(uint64(*m.IntValue)) - } - if m.BoolValue != nil { - n += 2 - } - if m.StringValue != nil { - l = len(*m.StringValue) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VersionValue != nil { - l = len(*m.VersionValue) - n += 1 + l + sovGenerated(uint64(l)) +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DeviceClaim) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Constraints) > 0 { - for _, e := range m.Constraints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.Config) > 0 { - for _, e := range m.Config { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DeviceClaimConfiguration) Size() (n int) { - if m == nil { - return 0 +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - if len(m.Requests) > 0 { - for _, s := range m.Requests { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SharedCounters) > 0 { + for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } } - l = m.DeviceConfiguration.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.PerDeviceNodeSelection != nil { + i-- + if *m.PerDeviceNodeSelection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DeviceClass) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DeviceClassConfiguration) Size() (n int) { +func (m *AllocationResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.DeviceConfiguration.Size() + l = m.Devices.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DeviceClassList) Size() (n int) { +func (m *BasicDevice) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *DeviceClassSpec) Size() (n int) { - if m == nil { - return 0 + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - var l int - _ = l - if len(m.Selectors) > 0 { - for _, e := range m.Selectors { + if len(m.ConsumesCounters) > 0 { + for _, e := range m.ConsumesCounters { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Config) > 0 { - for _, e := range m.Config { + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AllNodes != nil { + n += 2 + } + if len(m.Taints) > 0 { + for _, e := range m.Taints { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3000,39 +3800,29 @@ func (m *DeviceClassSpec) Size() (n int) { return n } -func (m *DeviceConfiguration) Size() (n int) { +func (m *CELDeviceSelector) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Opaque != nil { - l = m.Opaque.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeviceConstraint) Size() (n int) { +func (m *Counter) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Requests) > 0 { - for _, s := range m.Requests { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.MatchAttribute != nil { - l = len(*m.MatchAttribute) - n += 1 + l + sovGenerated(uint64(l)) - } + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeviceRequest) Size() (n int) { +func (m *CounterSet) Size() (n int) { if m == nil { return 0 } @@ -3040,89 +3830,141 @@ func (m *DeviceRequest) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeviceClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Selectors) > 0 { - for _, e := range m.Selectors { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.AllocationMode) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - if m.AdminAccess != nil { - n += 2 - } return n } -func (m *DeviceRequestAllocationResult) Size() (n int) { +func (m *Device) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Request) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Pool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Device) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if m.AdminAccess != nil { - n += 2 + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DeviceSelector) Size() (n int) { +func (m *DeviceAllocationConfiguration) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.CEL != nil { - l = m.CEL.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *NetworkDeviceData) Size() (n int) { +func (m *DeviceAllocationResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.InterfaceName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.IPs) > 0 { - for _, s := range m.IPs { - l = len(s) + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.HardwareAddress) - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *OpaqueDeviceConfiguration) Size() (n int) { +func (m *DeviceAttribute) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Parameters.Size() + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ResourceClaim) Size() (n int) { +func (m *DeviceClass) Size() (n int) { if m == nil { return 0 } @@ -3132,29 +3974,21 @@ func (m *ResourceClaim) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ResourceClaimConsumerReference) Size() (n int) { +func (m *DeviceClassConfiguration) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) + l = m.DeviceConfiguration.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ResourceClaimList) Size() (n int) { +func (m *DeviceClassList) Size() (n int) { if m == nil { return 0 } @@ -3171,65 +4005,135 @@ func (m *ResourceClaimList) Size() (n int) { return n } -func (m *ResourceClaimSpec) Size() (n int) { +func (m *DeviceClassSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.Devices.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *ResourceClaimStatus) Size() (n int) { +func (m *DeviceConfiguration) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Allocation != nil { - l = m.Allocation.Size() + if m.Opaque != nil { + l = m.Opaque.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.ReservedFor) > 0 { - for _, e := range m.ReservedFor { - l = e.Size() + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Devices) > 0 { - for _, e := range m.Devices { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCounterConsumption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CounterSet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Counters) > 0 { + for k, v := range m.Counters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } return n } -func (m *ResourceClaimTemplate) Size() (n int) { +func (m *DeviceRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.FirstAvailable) > 0 { + for _, e := range m.FirstAvailable { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *ResourceClaimTemplateList) Size() (n int) { +func (m *DeviceRequestAllocationResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() + l = len(m.Request) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3237,33 +4141,67 @@ func (m *ResourceClaimTemplateList) Size() (n int) { return n } -func (m *ResourceClaimTemplateSpec) Size() (n int) { +func (m *DeviceSelector) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceSubRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *ResourcePool) Size() (n int) { +func (m *DeviceTaint) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Generation)) - n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ResourceSlice) Size() (n int) { +func (m *DeviceTaintRule) Size() (n int) { if m == nil { return 0 } @@ -3276,7 +4214,7 @@ func (m *ResourceSlice) Size() (n int) { return n } -func (m *ResourceSliceList) Size() (n int) { +func (m *DeviceTaintRuleList) Size() (n int) { if m == nil { return 0 } @@ -3293,25 +4231,45 @@ func (m *ResourceSliceList) Size() (n int) { return n } -func (m *ResourceSliceSpec) Size() (n int) { +func (m *DeviceTaintRuleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Pool.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) + if m.DeviceSelector != nil { + l = m.DeviceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Taint.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.NodeSelector != nil { - l = m.NodeSelector.Size() + return n +} + +func (m *DeviceTaintSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeviceClassName != nil { + l = len(*m.DeviceClassName) n += 1 + l + sovGenerated(uint64(l)) } - n += 2 - if len(m.Devices) > 0 { - for _, e := range m.Devices { + if m.Driver != nil { + l = len(*m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pool != nil { + l = len(*m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Device != nil { + l = len(*m.Device) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -3319,15 +4277,273 @@ func (m *ResourceSliceSpec) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllocatedDeviceStatus) String() string { - if this == nil { - return "nil" +func (m *DeviceToleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PerDeviceNodeSelection != nil { + n += 2 + } + if len(m.SharedCounters) > 0 { + for _, e := range m.SharedCounters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" } repeatedStringForConditions := "[]Condition{" for _, f := range this.Conditions { @@ -3339,7 +4555,7 @@ func (this *AllocatedDeviceStatus) String() string { `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, `Device:` + fmt.Sprintf("%v", this.Device) + `,`, `Conditions:` + repeatedStringForConditions + `,`, - `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`, `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, `}`, }, "") @@ -3360,7 +4576,17 @@ func (this *BasicDevice) String() string { if this == nil { return "nil" } - keysForAttributes := make([]string, 0, len(this.Attributes)) + repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{" + for _, f := range this.ConsumesCounters { + repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + "," + } + repeatedStringForConsumesCounters += "}" + repeatedStringForTaints := "[]DeviceTaint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + keysForAttributes := make([]string, 0, len(this.Attributes)) for k := range this.Attributes { keysForAttributes = append(keysForAttributes, string(k)) } @@ -3383,6 +4609,11 @@ func (this *BasicDevice) String() string { s := strings.Join([]string{`&BasicDevice{`, `Attributes:` + mapStringForAttributes + `,`, `Capacity:` + mapStringForCapacity + `,`, + `ConsumesCounters:` + repeatedStringForConsumesCounters + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`, + `Taints:` + repeatedStringForTaints + `,`, `}`, }, "") return s @@ -3397,6 +4628,37 @@ func (this *CELDeviceSelector) String() string { }, "") return s } +func (this *Counter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Counter{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CounterSet) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&CounterSet{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} func (this *Device) String() string { if this == nil { return "nil" @@ -3571,6 +4833,27 @@ func (this *DeviceConstraint) String() string { }, "") return s } +func (this *DeviceCounterConsumption) String() string { + if this == nil { + return "nil" + } + keysForCounters := make([]string, 0, len(this.Counters)) + for k := range this.Counters { + keysForCounters = append(keysForCounters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCounters) + mapStringForCounters := "map[string]Counter{" + for _, k := range keysForCounters { + mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k]) + } + mapStringForCounters += "}" + s := strings.Join([]string{`&DeviceCounterConsumption{`, + `CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`, + `Counters:` + mapStringForCounters + `,`, + `}`, + }, "") + return s +} func (this *DeviceRequest) String() string { if this == nil { return "nil" @@ -3580,6 +4863,16 @@ func (this *DeviceRequest) String() string { repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," } repeatedStringForSelectors += "}" + repeatedStringForFirstAvailable := "[]DeviceSubRequest{" + for _, f := range this.FirstAvailable { + repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForFirstAvailable += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" s := strings.Join([]string{`&DeviceRequest{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, @@ -3587,6 +4880,8 @@ func (this *DeviceRequest) String() string { `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `FirstAvailable:` + repeatedStringForFirstAvailable + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -3595,12 +4890,18 @@ func (this *DeviceRequestAllocationResult) String() string { if this == nil { return "nil" } + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" s := strings.Join([]string{`&DeviceRequestAllocationResult{`, `Request:` + fmt.Sprintf("%v", this.Request) + `,`, `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, `Device:` + fmt.Sprintf("%v", this.Device) + `,`, `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -3615,6 +4916,115 @@ func (this *DeviceSelector) String() string { }, "") return s } +func (this *DeviceSubRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForTolerations := "[]DeviceToleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&DeviceSubRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func (this *DeviceTaint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceTaint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceTaintRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceTaintRule{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceTaintRuleSpec", "DeviceTaintRuleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceTaintRuleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceTaintRule{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceTaintRule", "DeviceTaintRule", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceTaintRuleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceTaintRuleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceTaintRuleSpec{`, + `DeviceSelector:` + strings.Replace(this.DeviceSelector.String(), "DeviceTaintSelector", "DeviceTaintSelector", 1) + `,`, + `Taint:` + strings.Replace(strings.Replace(this.Taint.String(), "DeviceTaint", "DeviceTaint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceTaintSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceTaintSelector{`, + `DeviceClassName:` + valueToStringGenerated(this.DeviceClassName) + `,`, + `Driver:` + valueToStringGenerated(this.Driver) + `,`, + `Pool:` + valueToStringGenerated(this.Pool) + `,`, + `Device:` + valueToStringGenerated(this.Device) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func (this *DeviceToleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceToleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} func (this *NetworkDeviceData) String() string { if this == nil { return "nil" @@ -3797,6 +5207,11 @@ func (this *ResourceSliceSpec) String() string { repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," } repeatedStringForDevices += "}" + repeatedStringForSharedCounters := "[]CounterSet{" + for _, f := range this.SharedCounters { + repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + "," + } + repeatedStringForSharedCounters += "}" s := strings.Join([]string{`&ResourceSliceSpec{`, `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, @@ -3804,6 +5219,8 @@ func (this *ResourceSliceSpec) String() string { `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, `Devices:` + repeatedStringForDevices + `,`, + `PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`, + `SharedCounters:` + repeatedStringForSharedCounters + `,`, `}`, }, "") return s @@ -3813,10 +5230,1915 @@ func valueToStringGenerated(v interface{}) string { if rv.IsNil() { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &runtime.RawExtension{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{}) + if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllNodes = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, DeviceTaint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CounterSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CounterSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counters == nil { + m.Counters = make(map[string]Counter) + } + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Counters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3839,15 +7161,15 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3875,13 +7197,13 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3891,29 +7213,80 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Pool = string(dAtA[iNdEx:postIndex]) + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3923,27 +7296,28 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Device = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3970,14 +7344,63 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4004,13 +7427,63 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4037,10 +7510,41 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NetworkData == nil { - m.NetworkData = &NetworkDeviceData{} + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4065,7 +7569,7 @@ func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *AllocationResult) Unmarshal(dAtA []byte) error { +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4088,15 +7592,15 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4123,13 +7627,14 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4156,10 +7661,8 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4184,7 +7687,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *BasicDevice) Unmarshal(dAtA []byte) error { +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4207,144 +7710,15 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attributes == nil { - m.Attributes = make(map[QualifiedName]DeviceAttribute) - } - var mapkey QualifiedName - mapvalue := &DeviceAttribute{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &DeviceAttribute{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attributes[QualifiedName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4371,105 +7745,12 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Capacity == nil { - m.Capacity = make(map[QualifiedName]resource.Quantity) + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} } - var mapkey QualifiedName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Capacity[QualifiedName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4492,7 +7773,7 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error { } return nil } -func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4515,15 +7796,15 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4551,7 +7832,40 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s iNdEx = postIndex default: iNdEx = preIndex @@ -4574,7 +7888,7 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *Device) Unmarshal(dAtA []byte) error { +func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4597,15 +7911,15 @@ func (m *Device) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Device: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4633,11 +7947,11 @@ func (m *Device) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.CounterSet = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4664,12 +7978,105 @@ func (m *Device) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Basic == nil { - m.Basic = &BasicDevice{} + if m.Counters == nil { + m.Counters = make(map[string]Counter) } - if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &Counter{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Counter{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Counters[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4692,7 +8099,7 @@ func (m *Device) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4715,15 +8122,15 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4751,11 +8158,11 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4783,11 +8190,11 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4814,63 +8221,86 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4897,14 +8327,14 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, DeviceRequestAllocationResult{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{}) + if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4931,8 +8361,8 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = append(m.Config, DeviceAllocationConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4957,7 +8387,7 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4980,17 +8410,17 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } - var v int64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5000,17 +8430,29 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IntValue = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5020,16 +8462,27 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.BoolValue = &b - case 4: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5057,12 +8510,11 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.StringValue = &s + m.Pool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5090,8 +8542,62 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.VersionValue = &s + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5114,7 +8620,7 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClaim) Unmarshal(dAtA []byte) error { +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5137,49 +8643,15 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requests = append(m.Requests, DeviceRequest{}) - if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5206,42 +8678,10 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Constraints = append(m.Constraints, DeviceConstraint{}) - if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} } - m.Config = append(m.Config, DeviceClaimConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5266,7 +8706,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5289,15 +8729,47 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5325,11 +8797,11 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5356,65 +8828,16 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeviceClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5424,28 +8847,46 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5472,7 +8913,8 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Tolerations = append(m.Tolerations, DeviceToleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5497,7 +8939,7 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceTaint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5520,17 +8962,17 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5540,80 +8982,61 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeviceClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5623,28 +9046,27 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5671,8 +9093,10 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DeviceClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TimeAdded == nil { + m.TimeAdded = &v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5697,7 +9121,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { +func (m *DeviceTaintRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5720,15 +9144,15 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceTaintRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceTaintRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5755,14 +9179,13 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selectors = append(m.Selectors, DeviceSelector{}) - if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5789,8 +9212,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = append(m.Config, DeviceClassConfiguration{}) - if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5815,7 +9237,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { +func (m *DeviceTaintRuleList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5838,15 +9260,15 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceTaintRuleList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceTaintRuleList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5873,10 +9295,41 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Opaque == nil { - m.Opaque = &OpaqueDeviceConfiguration{} + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DeviceTaintRule{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5901,7 +9354,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { +func (m *DeviceTaintRuleSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5924,17 +9377,17 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceTaintRuleSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceTaintRuleSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5944,29 +9397,33 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + if m.DeviceSelector == nil { + m.DeviceSelector = &DeviceTaintSelector{} + } + if err := m.DeviceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Taint", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5976,24 +9433,24 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := FullyQualifiedName(dAtA[iNdEx:postIndex]) - m.MatchAttribute = &s + if err := m.Taint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6016,7 +9473,7 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceRequest) Unmarshal(dAtA []byte) error { +func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6039,15 +9496,15 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6075,11 +9532,12 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.DeviceClassName = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6107,13 +9565,14 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Driver = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6123,29 +9582,28 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Selectors = append(m.Selectors, DeviceSelector{}) - if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Pool = &s iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6173,13 +9631,14 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Device = &s iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) } - m.Count = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6189,32 +9648,26 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.AdminAccess = &b + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6236,7 +9689,7 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { +func (m *DeviceToleration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6259,15 +9712,15 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6295,11 +9748,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Request = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6327,11 +9780,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6359,11 +9812,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Pool = string(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6391,84 +9844,13 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Device = string(dAtA[iNdEx:postIndex]) + m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AdminAccess = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeviceSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) } - var msglen int + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6478,28 +9860,12 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CEL == nil { - m.CEL = &CELDeviceSelector{} - } - if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.TolerationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8381,6 +11747,61 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PerDeviceNodeSelection = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SharedCounters = append(m.SharedCounters, CounterSet{}) + if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/tools/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/tools/vendor/k8s.io/api/resource/v1alpha3/generated.proto index e802a0143..103cafc6a 100644 --- a/tools/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/tools/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -62,6 +62,8 @@ message AllocatedDeviceStatus { // If the device has been configured according to the class and claim // config references, the `Ready` condition should be True. // + // Must not contain more than 8 entries. + // // +optional // +listType=map // +listMapKey=type @@ -111,6 +113,64 @@ message BasicDevice { // // +optional map capacity = 2; + + // ConsumesCounters defines a list of references to sharedCounters + // and the set of counters that the device will + // consume from those counter sets. + // + // There can only be a single entry per counterSet. + // + // The total number of device counter consumption entries + // must be <= 32. In addition, the total number in the + // entire ResourceSlice must be <= 1024 (for example, + // 64 devices with 16 counters each). + // + // +optional + // +listType=atomic + // +featureGate=DRAPartitionableDevices + repeated DeviceCounterConsumption consumesCounters = 3; + + // NodeName identifies the node where the device is available. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional string nodeName = 4; + + // NodeSelector defines the nodes where the device is available. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 5; + + // AllNodes indicates that all nodes have access to the device. + // + // Must only be set if Spec.PerDeviceNodeSelection is set to true. + // At most one of NodeName, NodeSelector and AllNodes can be set. + // + // +optional + // +oneOf=DeviceNodeSelection + // +featureGate=DRAPartitionableDevices + optional bool allNodes = 6; + + // If specified, these are the driver-defined taints. + // + // The maximum number of taints is 4. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceTaint taints = 7; } // CELDeviceSelector contains a CEL expression for selecting a device. @@ -170,6 +230,42 @@ message CELDeviceSelector { optional string expression = 1; } +// Counter describes a quantity associated with a device. +message Counter { + // Value defines how much of a certain device counter is available. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; +} + +// CounterSet defines a named set of counters +// that are available to be used by devices defined in the +// ResourceSlice. +// +// The counters are not allocatable by themselves, but +// can be referenced by devices. When a device is allocated, +// the portion of counters it uses will no longer be available for use +// by other devices. +message CounterSet { + // CounterSet is the name of the set from which the + // counters defined will be consumed. + // + // +required + optional string name = 1; + + // Counters defines the counters that will be consumed by the device. + // The name of each counter must be unique in that set and must be a DNS label. + // + // To ensure this uniqueness, capacities defined by the vendor + // must be listed without the driver name as domain prefix in + // their name. All others must be listed with their domain prefix. + // + // The maximum number of counters is 32. + // + // +required + map counters = 2; +} + // Device represents one individual hardware instance that can be selected based // on its attributes. Besides the name, exactly one field must be set. message Device { @@ -198,6 +294,10 @@ message DeviceAllocationConfiguration { // Requests lists the names of requests where the configuration applies. // If empty, its applies to all requests. // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
    [/]. If just + // the main request is given, the configuration applies to all subrequests. + // // +optional // +listType=atomic repeated string requests = 2; @@ -284,6 +384,10 @@ message DeviceClaimConfiguration { // Requests lists the names of requests where the configuration applies. // If empty, it applies to all requests. // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
    [/]. If just + // the main request is given, the configuration applies to all subrequests. + // // +optional // +listType=atomic repeated string requests = 1; @@ -368,6 +472,10 @@ message DeviceConstraint { // constraint. If this is not specified, this constraint applies to all // requests in this claim. // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
    [/]. If just + // the main request is given, the constraint applies to all subrequests. + // // +optional // +listType=atomic repeated string requests = 1; @@ -390,14 +498,30 @@ message DeviceConstraint { optional string matchAttribute = 2; } +// DeviceCounterConsumption defines a set of counters that +// a device will consume from a CounterSet. +message DeviceCounterConsumption { + // CounterSet defines the set from which the + // counters defined will be consumed. + // + // +required + optional string counterSet = 1; + + // Counters defines the Counter that will be consumed by + // the device. + // + // The maximum number counters in a device is 32. + // In addition, the maximum number of all counters + // in all devices is 1024 (for example, 64 devices with + // 16 counters each). + // + // +required + map counters = 2; +} + // DeviceRequest is a request for devices required for a claim. // This is typically a request for a single resource like a device, but can // also ask for several identical devices. -// -// A DeviceClassName is currently required. Clients must check that it is -// indeed set. It's absence indicates that something changed in a way that -// is not supported by the client yet, in which case it must refuse to -// handle the request. message DeviceRequest { // Name can be used to reference this request in a pod.spec.containers[].resources.claims // entry and in a constraint of the claim. @@ -411,7 +535,10 @@ message DeviceRequest { // additional configuration and selectors to be inherited by this // request. // - // A class is required. Which classes are available depends on the cluster. + // A class is required if no subrequests are specified in the + // firstAvailable list and no class can be set if subrequests + // are specified in the firstAvailable list. + // Which classes are available depends on the cluster. // // Administrators may use this to restrict which devices may get // requested by only installing classes with selectors for permitted @@ -419,7 +546,8 @@ message DeviceRequest { // then administrators can create an empty DeviceClass for users // to reference. // - // +required + // +optional + // +oneOf=deviceRequestType optional string deviceClassName = 2; // Selectors define criteria which must be satisfied by a specific @@ -427,6 +555,9 @@ message DeviceRequest { // request. All selectors must be satisfied for a device to be // considered. // + // This field can only be set when deviceClassName is set and no subrequests + // are specified in the firstAvailable list. + // // +optional // +listType=atomic repeated DeviceSelector selectors = 3; @@ -439,13 +570,17 @@ message DeviceRequest { // count field. // // - All: This request is for all of the matching devices in a pool. + // At least one device must exist on the node for the allocation to succeed. // Allocation will fail if some devices are already allocated, // unless adminAccess is requested. // - // If AlloctionMode is not specified, the default mode is ExactCount. If + // If AllocationMode is not specified, the default mode is ExactCount. If // the mode is ExactCount and count is not specified, the default count is // one. Any other requests must specify this field. // + // This field can only be set when deviceClassName is set and no subrequests + // are specified in the firstAvailable list. + // // More modes may get added in the future. Clients must refuse to handle // requests with unknown modes. // @@ -455,6 +590,9 @@ message DeviceRequest { // Count is used only when the count mode is "ExactCount". Must be greater than zero. // If AllocationMode is ExactCount and this field is not specified, the default is one. // + // This field can only be set when deviceClassName is set and no subrequests + // are specified in the firstAvailable list. + // // +optional // +oneOf=AllocationMode optional int64 count = 5; @@ -465,6 +603,9 @@ message DeviceRequest { // all ordinary claims to the device with respect to access modes and // any resource allocations. // + // This field can only be set when deviceClassName is set and no subrequests + // are specified in the firstAvailable list. + // // This is an alpha field and requires enabling the DRAAdminAccess // feature gate. Admin access is disabled if this field is unset or // set to false, otherwise it is enabled. @@ -472,13 +613,65 @@ message DeviceRequest { // +optional // +featureGate=DRAAdminAccess optional bool adminAccess = 6; + + // FirstAvailable contains subrequests, of which exactly one will be + // satisfied by the scheduler to satisfy this request. It tries to + // satisfy them in the order in which they are listed here. So if + // there are two entries in the list, the scheduler will only check + // the second one if it determines that the first one cannot be used. + // + // This field may only be set in the entries of DeviceClaim.Requests. + // + // DRA does not yet implement scoring, so the scheduler will + // select the first set of devices that satisfies all the + // requests in the claim. And if the requirements can + // be satisfied on more than one node, other scheduling features + // will determine which node is chosen. This means that the set of + // devices allocated to a claim might not be the optimal set + // available to the cluster. Scoring will be implemented later. + // + // +optional + // +oneOf=deviceRequestType + // +listType=atomic + // +featureGate=DRAPrioritizedList + repeated DeviceSubRequest firstAvailable = 7; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This field can only be set when deviceClassName is set and no subrequests + // are specified in the firstAvailable list. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 8; } // DeviceRequestAllocationResult contains the allocation result for one request. message DeviceRequestAllocationResult { // Request is the name of the request in the claim which caused this - // device to be allocated. Multiple devices may have been allocated - // per request. + // device to be allocated. If it references a subrequest in the + // firstAvailable list on a DeviceRequest, this field must + // include both the name of the main request and the subrequest + // using the format
    /. + // + // Multiple devices may have been allocated per request. // // +required optional string request = 1; @@ -519,6 +712,19 @@ message DeviceRequestAllocationResult { // +optional // +featureGate=DRAAdminAccess optional bool adminAccess = 5; + + // A copy of all tolerations specified in the request at the time + // when the device got allocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; } // DeviceSelector must have exactly one field set. @@ -530,6 +736,262 @@ message DeviceSelector { optional CELDeviceSelector cel = 1; } +// DeviceSubRequest describes a request for device provided in the +// claim.spec.devices.requests[].firstAvailable array. Each +// is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// DeviceSubRequest is similar to Request, but doesn't expose the AdminAccess +// or FirstAvailable fields, as those can only be set on the top-level request. +// AdminAccess is not supported for requests with a prioritized list, and +// recursive FirstAvailable fields are not supported. +message DeviceSubRequest { + // Name can be used to reference this subrequest in the list of constraints + // or the list of configurations for the claim. References must use the + // format
    /. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // subrequest. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AllocationMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 7; +} + +// The device this taint is attached to has the "effect" on +// any claim which does not tolerate the taint and, through the claim, +// to pods using the claim. +message DeviceTaint { + // The taint key to be applied to a device. + // Must be a label name. + // + // +required + optional string key = 1; + + // The taint value corresponding to the taint key. + // Must be a label value. + // + // +optional + optional string value = 2; + + // The effect of the taint on claims that do not tolerate the taint + // and through such claims on the pods using them. + // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for + // nodes is not valid here. + // + // +required + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // Added automatically during create or update if not set. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// DeviceTaintRule adds one taint to all devices which match the selector. +// This has the same effect as if the taint was specified directly +// in the ResourceSlice by the DRA driver. +message DeviceTaintRule { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec specifies the selector and one taint. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceTaintRuleSpec spec = 2; +} + +// DeviceTaintRuleList is a collection of DeviceTaintRules. +message DeviceTaintRuleList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of DeviceTaintRules. + repeated DeviceTaintRule items = 2; +} + +// DeviceTaintRuleSpec specifies the selector and one taint. +message DeviceTaintRuleSpec { + // DeviceSelector defines which device(s) the taint is applied to. + // All selector criteria must be satified for a device to + // match. The empty selector matches all devices. Without + // a selector, no devices are matches. + // + // +optional + optional DeviceTaintSelector deviceSelector = 1; + + // The taint that gets applied to matching devices. + // + // +required + optional DeviceTaint taint = 2; +} + +// DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to. +// The empty selector matches all devices. Without a selector, no devices +// are matched. +message DeviceTaintSelector { + // If DeviceClassName is set, the selectors defined there must be + // satisfied by a device to be selected. This field corresponds + // to class.metadata.name. + // + // +optional + optional string deviceClassName = 1; + + // If driver is set, only devices from that driver are selected. + // This fields corresponds to slice.spec.driver. + // + // +optional + optional string driver = 2; + + // If pool is set, only devices in that pool are selected. + // + // Also setting the driver name may be useful to avoid + // ambiguity when different drivers use the same pool name, + // but this is not required because selecting pools from + // different drivers may also be useful, for example when + // drivers with node-local devices use the node name as + // their pool name. + // + // +optional + optional string pool = 3; + + // If device is set, only devices with that name are selected. + // This field corresponds to slice.spec.devices[].name. + // + // Setting also driver and pool may be required to avoid ambiguity, + // but is not required. + // + // +optional + optional string device = 4; + + // Selectors contains the same selection criteria as a ResourceClaim. + // Currently, CEL expressions are supported. All of these selectors + // must be satisfied. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 5; +} + +// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message DeviceToleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // Must be a label name. + // + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a ResourceClaim can + // tolerate all taints of a particular category. + // + // +optional + // +default="Equal" + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value must be empty, otherwise just a regular string. + // Must be a label value. + // + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and NoExecute. + // + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // If larger than zero, the time when the pod needs to be evicted is calculated as